blob: aa184407520f0263844839e03d2f1edde3c6628f [file] [log] [blame]
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 * This code builds two trees of free clusters extents.
7 * Trees are sorted by start of extent and by length of extent.
8 * NTFS_MAX_WND_EXTENTS defines the maximum number of elements in trees.
Kari Argillandere8b8e972021-08-03 14:57:09 +03009 * In extreme case code reads on-disk bitmap to find free clusters.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +030010 *
11 */
12
Konstantin Komarov3f3b4422021-08-13 17:21:29 +030013#include <linux/buffer_head.h>
14#include <linux/fs.h>
Kari Argillander6e3331e2021-09-07 17:28:42 +030015#include <linux/kernel.h>
Konstantin Komarov3f3b4422021-08-13 17:21:29 +030016
Konstantin Komarov3f3b4422021-08-13 17:21:29 +030017#include "ntfs.h"
18#include "ntfs_fs.h"
19
20/*
21 * Maximum number of extents in tree.
22 */
23#define NTFS_MAX_WND_EXTENTS (32u * 1024u)
24
25struct rb_node_key {
26 struct rb_node node;
27 size_t key;
28};
29
Konstantin Komarov3f3b4422021-08-13 17:21:29 +030030struct e_node {
Kari Argillandere8b8e972021-08-03 14:57:09 +030031 struct rb_node_key start; /* Tree sorted by start. */
32 struct rb_node_key count; /* Tree sorted by len. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +030033};
34
35static int wnd_rescan(struct wnd_bitmap *wnd);
36static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw);
37static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits);
38
39static struct kmem_cache *ntfs_enode_cachep;
40
41int __init ntfs3_init_bitmap(void)
42{
43 ntfs_enode_cachep =
44 kmem_cache_create("ntfs3_enode_cache", sizeof(struct e_node), 0,
45 SLAB_RECLAIM_ACCOUNT, NULL);
46 return ntfs_enode_cachep ? 0 : -ENOMEM;
47}
48
49void ntfs3_exit_bitmap(void)
50{
51 kmem_cache_destroy(ntfs_enode_cachep);
52}
53
54static inline u32 wnd_bits(const struct wnd_bitmap *wnd, size_t i)
55{
56 return i + 1 == wnd->nwnd ? wnd->bits_last : wnd->sb->s_blocksize * 8;
57}
58
59/*
Kari Argillandere8b8e972021-08-03 14:57:09 +030060 * wnd_scan
61 *
62 * b_pos + b_len - biggest fragment.
63 * Scan range [wpos wbits) window @buf.
64 *
65 * Return: -1 if not found.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +030066 */
67static size_t wnd_scan(const ulong *buf, size_t wbit, u32 wpos, u32 wend,
68 size_t to_alloc, size_t *prev_tail, size_t *b_pos,
69 size_t *b_len)
70{
71 while (wpos < wend) {
72 size_t free_len;
73 u32 free_bits, end;
74 u32 used = find_next_zero_bit(buf, wend, wpos);
75
76 if (used >= wend) {
77 if (*b_len < *prev_tail) {
78 *b_pos = wbit - *prev_tail;
79 *b_len = *prev_tail;
80 }
81
82 *prev_tail = 0;
83 return -1;
84 }
85
86 if (used > wpos) {
87 wpos = used;
88 if (*b_len < *prev_tail) {
89 *b_pos = wbit - *prev_tail;
90 *b_len = *prev_tail;
91 }
92
93 *prev_tail = 0;
94 }
95
96 /*
Kari Argillandere8b8e972021-08-03 14:57:09 +030097 * Now we have a fragment [wpos, wend) staring with 0.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +030098 */
99 end = wpos + to_alloc - *prev_tail;
100 free_bits = find_next_bit(buf, min(end, wend), wpos);
101
102 free_len = *prev_tail + free_bits - wpos;
103
104 if (*b_len < free_len) {
105 *b_pos = wbit + wpos - *prev_tail;
106 *b_len = free_len;
107 }
108
109 if (free_len >= to_alloc)
110 return wbit + wpos - *prev_tail;
111
112 if (free_bits >= wend) {
113 *prev_tail += free_bits - wpos;
114 return -1;
115 }
116
117 wpos = free_bits + 1;
118
119 *prev_tail = 0;
120 }
121
122 return -1;
123}
124
125/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300126 * wnd_close - Frees all resources.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300127 */
128void wnd_close(struct wnd_bitmap *wnd)
129{
130 struct rb_node *node, *next;
131
Kari Argillander195c52b2021-08-24 21:37:07 +0300132 kfree(wnd->free_bits);
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300133 run_close(&wnd->run);
134
135 node = rb_first(&wnd->start_tree);
136
137 while (node) {
138 next = rb_next(node);
139 rb_erase(node, &wnd->start_tree);
140 kmem_cache_free(ntfs_enode_cachep,
141 rb_entry(node, struct e_node, start.node));
142 node = next;
143 }
144}
145
146static struct rb_node *rb_lookup(struct rb_root *root, size_t v)
147{
148 struct rb_node **p = &root->rb_node;
149 struct rb_node *r = NULL;
150
151 while (*p) {
152 struct rb_node_key *k;
153
154 k = rb_entry(*p, struct rb_node_key, node);
155 if (v < k->key) {
156 p = &(*p)->rb_left;
157 } else if (v > k->key) {
158 r = &k->node;
159 p = &(*p)->rb_right;
160 } else {
161 return &k->node;
162 }
163 }
164
165 return r;
166}
167
168/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300169 * rb_insert_count - Helper function to insert special kind of 'count' tree.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300170 */
171static inline bool rb_insert_count(struct rb_root *root, struct e_node *e)
172{
173 struct rb_node **p = &root->rb_node;
174 struct rb_node *parent = NULL;
175 size_t e_ckey = e->count.key;
176 size_t e_skey = e->start.key;
177
178 while (*p) {
179 struct e_node *k =
180 rb_entry(parent = *p, struct e_node, count.node);
181
182 if (e_ckey > k->count.key) {
183 p = &(*p)->rb_left;
184 } else if (e_ckey < k->count.key) {
185 p = &(*p)->rb_right;
186 } else if (e_skey < k->start.key) {
187 p = &(*p)->rb_left;
188 } else if (e_skey > k->start.key) {
189 p = &(*p)->rb_right;
190 } else {
191 WARN_ON(1);
192 return false;
193 }
194 }
195
196 rb_link_node(&e->count.node, parent, p);
197 rb_insert_color(&e->count.node, root);
198 return true;
199}
200
201/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300202 * rb_insert_start - Helper function to insert special kind of 'count' tree.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300203 */
204static inline bool rb_insert_start(struct rb_root *root, struct e_node *e)
205{
206 struct rb_node **p = &root->rb_node;
207 struct rb_node *parent = NULL;
208 size_t e_skey = e->start.key;
209
210 while (*p) {
211 struct e_node *k;
212
213 parent = *p;
214
215 k = rb_entry(parent, struct e_node, start.node);
216 if (e_skey < k->start.key) {
217 p = &(*p)->rb_left;
218 } else if (e_skey > k->start.key) {
219 p = &(*p)->rb_right;
220 } else {
221 WARN_ON(1);
222 return false;
223 }
224 }
225
226 rb_link_node(&e->start.node, parent, p);
227 rb_insert_color(&e->start.node, root);
228 return true;
229}
230
231/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300232 * wnd_add_free_ext - Adds a new extent of free space.
233 * @build: 1 when building tree.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300234 */
235static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
236 bool build)
237{
238 struct e_node *e, *e0 = NULL;
239 size_t ib, end_in = bit + len;
240 struct rb_node *n;
241
242 if (build) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300243 /* Use extent_min to filter too short extents. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300244 if (wnd->count >= NTFS_MAX_WND_EXTENTS &&
245 len <= wnd->extent_min) {
246 wnd->uptodated = -1;
247 return;
248 }
249 } else {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300250 /* Try to find extent before 'bit'. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300251 n = rb_lookup(&wnd->start_tree, bit);
252
253 if (!n) {
254 n = rb_first(&wnd->start_tree);
255 } else {
256 e = rb_entry(n, struct e_node, start.node);
257 n = rb_next(n);
258 if (e->start.key + e->count.key == bit) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300259 /* Remove left. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300260 bit = e->start.key;
261 len += e->count.key;
262 rb_erase(&e->start.node, &wnd->start_tree);
263 rb_erase(&e->count.node, &wnd->count_tree);
264 wnd->count -= 1;
265 e0 = e;
266 }
267 }
268
269 while (n) {
270 size_t next_end;
271
272 e = rb_entry(n, struct e_node, start.node);
273 next_end = e->start.key + e->count.key;
274 if (e->start.key > end_in)
275 break;
276
Kari Argillandere8b8e972021-08-03 14:57:09 +0300277 /* Remove right. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300278 n = rb_next(n);
279 len += next_end - end_in;
280 end_in = next_end;
281 rb_erase(&e->start.node, &wnd->start_tree);
282 rb_erase(&e->count.node, &wnd->count_tree);
283 wnd->count -= 1;
284
285 if (!e0)
286 e0 = e;
287 else
288 kmem_cache_free(ntfs_enode_cachep, e);
289 }
290
291 if (wnd->uptodated != 1) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300292 /* Check bits before 'bit'. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300293 ib = wnd->zone_bit == wnd->zone_end ||
294 bit < wnd->zone_end
295 ? 0
296 : wnd->zone_end;
297
298 while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) {
299 bit -= 1;
300 len += 1;
301 }
302
Kari Argillandere8b8e972021-08-03 14:57:09 +0300303 /* Check bits after 'end_in'. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300304 ib = wnd->zone_bit == wnd->zone_end ||
305 end_in > wnd->zone_bit
306 ? wnd->nbits
307 : wnd->zone_bit;
308
309 while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) {
310 end_in += 1;
311 len += 1;
312 }
313 }
314 }
Kari Argillandere8b8e972021-08-03 14:57:09 +0300315 /* Insert new fragment. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300316 if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
317 if (e0)
318 kmem_cache_free(ntfs_enode_cachep, e0);
319
320 wnd->uptodated = -1;
321
Kari Argillandere8b8e972021-08-03 14:57:09 +0300322 /* Compare with smallest fragment. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300323 n = rb_last(&wnd->count_tree);
324 e = rb_entry(n, struct e_node, count.node);
325 if (len <= e->count.key)
Kari Argillandere8b8e972021-08-03 14:57:09 +0300326 goto out; /* Do not insert small fragments. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300327
328 if (build) {
329 struct e_node *e2;
330
331 n = rb_prev(n);
332 e2 = rb_entry(n, struct e_node, count.node);
Kari Argillandere8b8e972021-08-03 14:57:09 +0300333 /* Smallest fragment will be 'e2->count.key'. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300334 wnd->extent_min = e2->count.key;
335 }
336
Kari Argillandere8b8e972021-08-03 14:57:09 +0300337 /* Replace smallest fragment by new one. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300338 rb_erase(&e->start.node, &wnd->start_tree);
339 rb_erase(&e->count.node, &wnd->count_tree);
340 wnd->count -= 1;
341 } else {
342 e = e0 ? e0 : kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
343 if (!e) {
344 wnd->uptodated = -1;
345 goto out;
346 }
347
348 if (build && len <= wnd->extent_min)
349 wnd->extent_min = len;
350 }
351 e->start.key = bit;
352 e->count.key = len;
353 if (len > wnd->extent_max)
354 wnd->extent_max = len;
355
356 rb_insert_start(&wnd->start_tree, e);
357 rb_insert_count(&wnd->count_tree, e);
358 wnd->count += 1;
359
360out:;
361}
362
363/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300364 * wnd_remove_free_ext - Remove a run from the cached free space.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300365 */
366static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
367{
368 struct rb_node *n, *n3;
369 struct e_node *e, *e3;
370 size_t end_in = bit + len;
371 size_t end3, end, new_key, new_len, max_new_len;
372
Kari Argillandere8b8e972021-08-03 14:57:09 +0300373 /* Try to find extent before 'bit'. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300374 n = rb_lookup(&wnd->start_tree, bit);
375
376 if (!n)
377 return;
378
379 e = rb_entry(n, struct e_node, start.node);
380 end = e->start.key + e->count.key;
381
382 new_key = new_len = 0;
383 len = e->count.key;
384
Kari Argillandere8b8e972021-08-03 14:57:09 +0300385 /* Range [bit,end_in) must be inside 'e' or outside 'e' and 'n'. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300386 if (e->start.key > bit)
387 ;
388 else if (end_in <= end) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300389 /* Range [bit,end_in) inside 'e'. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300390 new_key = end_in;
391 new_len = end - end_in;
392 len = bit - e->start.key;
393 } else if (bit > end) {
394 bool bmax = false;
395
396 n3 = rb_next(n);
397
398 while (n3) {
399 e3 = rb_entry(n3, struct e_node, start.node);
400 if (e3->start.key >= end_in)
401 break;
402
403 if (e3->count.key == wnd->extent_max)
404 bmax = true;
405
406 end3 = e3->start.key + e3->count.key;
407 if (end3 > end_in) {
408 e3->start.key = end_in;
409 rb_erase(&e3->count.node, &wnd->count_tree);
410 e3->count.key = end3 - end_in;
411 rb_insert_count(&wnd->count_tree, e3);
412 break;
413 }
414
415 n3 = rb_next(n3);
416 rb_erase(&e3->start.node, &wnd->start_tree);
417 rb_erase(&e3->count.node, &wnd->count_tree);
418 wnd->count -= 1;
419 kmem_cache_free(ntfs_enode_cachep, e3);
420 }
421 if (!bmax)
422 return;
423 n3 = rb_first(&wnd->count_tree);
424 wnd->extent_max =
425 n3 ? rb_entry(n3, struct e_node, count.node)->count.key
426 : 0;
427 return;
428 }
429
430 if (e->count.key != wnd->extent_max) {
431 ;
432 } else if (rb_prev(&e->count.node)) {
433 ;
434 } else {
435 n3 = rb_next(&e->count.node);
Kari Argillander6e3331e2021-09-07 17:28:42 +0300436 max_new_len = max(len, new_len);
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300437 if (!n3) {
438 wnd->extent_max = max_new_len;
439 } else {
440 e3 = rb_entry(n3, struct e_node, count.node);
441 wnd->extent_max = max(e3->count.key, max_new_len);
442 }
443 }
444
445 if (!len) {
446 if (new_len) {
447 e->start.key = new_key;
448 rb_erase(&e->count.node, &wnd->count_tree);
449 e->count.key = new_len;
450 rb_insert_count(&wnd->count_tree, e);
451 } else {
452 rb_erase(&e->start.node, &wnd->start_tree);
453 rb_erase(&e->count.node, &wnd->count_tree);
454 wnd->count -= 1;
455 kmem_cache_free(ntfs_enode_cachep, e);
456 }
457 goto out;
458 }
459 rb_erase(&e->count.node, &wnd->count_tree);
460 e->count.key = len;
461 rb_insert_count(&wnd->count_tree, e);
462
463 if (!new_len)
464 goto out;
465
466 if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
467 wnd->uptodated = -1;
468
Kari Argillandere8b8e972021-08-03 14:57:09 +0300469 /* Get minimal extent. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300470 e = rb_entry(rb_last(&wnd->count_tree), struct e_node,
471 count.node);
472 if (e->count.key > new_len)
473 goto out;
474
Kari Argillandere8b8e972021-08-03 14:57:09 +0300475 /* Replace minimum. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300476 rb_erase(&e->start.node, &wnd->start_tree);
477 rb_erase(&e->count.node, &wnd->count_tree);
478 wnd->count -= 1;
479 } else {
480 e = kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
481 if (!e)
482 wnd->uptodated = -1;
483 }
484
485 if (e) {
486 e->start.key = new_key;
487 e->count.key = new_len;
488 rb_insert_start(&wnd->start_tree, e);
489 rb_insert_count(&wnd->count_tree, e);
490 wnd->count += 1;
491 }
492
493out:
494 if (!wnd->count && 1 != wnd->uptodated)
495 wnd_rescan(wnd);
496}
497
498/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300499 * wnd_rescan - Scan all bitmap. Used while initialization.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300500 */
501static int wnd_rescan(struct wnd_bitmap *wnd)
502{
503 int err = 0;
504 size_t prev_tail = 0;
505 struct super_block *sb = wnd->sb;
506 struct ntfs_sb_info *sbi = sb->s_fs_info;
507 u64 lbo, len = 0;
508 u32 blocksize = sb->s_blocksize;
509 u8 cluster_bits = sbi->cluster_bits;
510 u32 wbits = 8 * sb->s_blocksize;
511 u32 used, frb;
512 const ulong *buf;
513 size_t wpos, wbit, iw, vbo;
514 struct buffer_head *bh = NULL;
515 CLST lcn, clen;
516
517 wnd->uptodated = 0;
518 wnd->extent_max = 0;
519 wnd->extent_min = MINUS_ONE_T;
520 wnd->total_zeroes = 0;
521
522 vbo = 0;
523
524 for (iw = 0; iw < wnd->nwnd; iw++) {
525 if (iw + 1 == wnd->nwnd)
526 wbits = wnd->bits_last;
527
528 if (wnd->inited) {
529 if (!wnd->free_bits[iw]) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300530 /* All ones. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300531 if (prev_tail) {
532 wnd_add_free_ext(wnd,
533 vbo * 8 - prev_tail,
534 prev_tail, true);
535 prev_tail = 0;
536 }
537 goto next_wnd;
538 }
539 if (wbits == wnd->free_bits[iw]) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300540 /* All zeroes. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300541 prev_tail += wbits;
542 wnd->total_zeroes += wbits;
543 goto next_wnd;
544 }
545 }
546
547 if (!len) {
548 u32 off = vbo & sbi->cluster_mask;
549
550 if (!run_lookup_entry(&wnd->run, vbo >> cluster_bits,
551 &lcn, &clen, NULL)) {
552 err = -ENOENT;
553 goto out;
554 }
555
556 lbo = ((u64)lcn << cluster_bits) + off;
557 len = ((u64)clen << cluster_bits) - off;
558 }
559
560 bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
561 if (!bh) {
562 err = -EIO;
563 goto out;
564 }
565
566 buf = (ulong *)bh->b_data;
567
568 used = __bitmap_weight(buf, wbits);
569 if (used < wbits) {
570 frb = wbits - used;
571 wnd->free_bits[iw] = frb;
572 wnd->total_zeroes += frb;
573 }
574
575 wpos = 0;
576 wbit = vbo * 8;
577
578 if (wbit + wbits > wnd->nbits)
579 wbits = wnd->nbits - wbit;
580
581 do {
582 used = find_next_zero_bit(buf, wbits, wpos);
583
584 if (used > wpos && prev_tail) {
585 wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
586 prev_tail, true);
587 prev_tail = 0;
588 }
589
590 wpos = used;
591
592 if (wpos >= wbits) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300593 /* No free blocks. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300594 prev_tail = 0;
595 break;
596 }
597
598 frb = find_next_bit(buf, wbits, wpos);
599 if (frb >= wbits) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300600 /* Keep last free block. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300601 prev_tail += frb - wpos;
602 break;
603 }
604
605 wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
606 frb + prev_tail - wpos, true);
607
Kari Argillandere8b8e972021-08-03 14:57:09 +0300608 /* Skip free block and first '1'. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300609 wpos = frb + 1;
Kari Argillandere8b8e972021-08-03 14:57:09 +0300610 /* Reset previous tail. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300611 prev_tail = 0;
612 } while (wpos < wbits);
613
614next_wnd:
615
616 if (bh)
617 put_bh(bh);
618 bh = NULL;
619
620 vbo += blocksize;
621 if (len) {
622 len -= blocksize;
623 lbo += blocksize;
624 }
625 }
626
Kari Argillandere8b8e972021-08-03 14:57:09 +0300627 /* Add last block. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300628 if (prev_tail)
629 wnd_add_free_ext(wnd, wnd->nbits - prev_tail, prev_tail, true);
630
631 /*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300632 * Before init cycle wnd->uptodated was 0.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300633 * If any errors or limits occurs while initialization then
Kari Argillandere8b8e972021-08-03 14:57:09 +0300634 * wnd->uptodated will be -1.
635 * If 'uptodated' is still 0 then Tree is really updated.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300636 */
637 if (!wnd->uptodated)
638 wnd->uptodated = 1;
639
640 if (wnd->zone_bit != wnd->zone_end) {
641 size_t zlen = wnd->zone_end - wnd->zone_bit;
642
643 wnd->zone_end = wnd->zone_bit;
644 wnd_zone_set(wnd, wnd->zone_bit, zlen);
645 }
646
647out:
648 return err;
649}
650
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300651int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
652{
653 int err;
654 u32 blocksize = sb->s_blocksize;
655 u32 wbits = blocksize * 8;
656
657 init_rwsem(&wnd->rw_lock);
658
659 wnd->sb = sb;
660 wnd->nbits = nbits;
661 wnd->total_zeroes = nbits;
662 wnd->extent_max = MINUS_ONE_T;
663 wnd->zone_bit = wnd->zone_end = 0;
664 wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits));
665 wnd->bits_last = nbits & (wbits - 1);
666 if (!wnd->bits_last)
667 wnd->bits_last = wbits;
668
Kari Argillander345482b2021-08-24 21:37:08 +0300669 wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS);
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300670 if (!wnd->free_bits)
671 return -ENOMEM;
672
673 err = wnd_rescan(wnd);
674 if (err)
675 return err;
676
677 wnd->inited = true;
678
679 return 0;
680}
681
682/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300683 * wnd_map - Call sb_bread for requested window.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300684 */
685static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw)
686{
687 size_t vbo;
688 CLST lcn, clen;
689 struct super_block *sb = wnd->sb;
690 struct ntfs_sb_info *sbi;
691 struct buffer_head *bh;
692 u64 lbo;
693
694 sbi = sb->s_fs_info;
695 vbo = (u64)iw << sb->s_blocksize_bits;
696
697 if (!run_lookup_entry(&wnd->run, vbo >> sbi->cluster_bits, &lcn, &clen,
698 NULL)) {
699 return ERR_PTR(-ENOENT);
700 }
701
702 lbo = ((u64)lcn << sbi->cluster_bits) + (vbo & sbi->cluster_mask);
703
704 bh = ntfs_bread(wnd->sb, lbo >> sb->s_blocksize_bits);
705 if (!bh)
706 return ERR_PTR(-EIO);
707
708 return bh;
709}
710
711/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300712 * wnd_set_free - Mark the bits range from bit to bit + bits as free.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300713 */
714int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
715{
716 int err = 0;
717 struct super_block *sb = wnd->sb;
718 size_t bits0 = bits;
719 u32 wbits = 8 * sb->s_blocksize;
720 size_t iw = bit >> (sb->s_blocksize_bits + 3);
721 u32 wbit = bit & (wbits - 1);
722 struct buffer_head *bh;
723
724 while (iw < wnd->nwnd && bits) {
725 u32 tail, op;
726 ulong *buf;
727
728 if (iw + 1 == wnd->nwnd)
729 wbits = wnd->bits_last;
730
731 tail = wbits - wbit;
Kari Argillander6e3331e2021-09-07 17:28:42 +0300732 op = min_t(u32, tail, bits);
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300733
734 bh = wnd_map(wnd, iw);
735 if (IS_ERR(bh)) {
736 err = PTR_ERR(bh);
737 break;
738 }
739
740 buf = (ulong *)bh->b_data;
741
742 lock_buffer(bh);
743
744 __bitmap_clear(buf, wbit, op);
745
746 wnd->free_bits[iw] += op;
747
748 set_buffer_uptodate(bh);
749 mark_buffer_dirty(bh);
750 unlock_buffer(bh);
751 put_bh(bh);
752
753 wnd->total_zeroes += op;
754 bits -= op;
755 wbit = 0;
756 iw += 1;
757 }
758
759 wnd_add_free_ext(wnd, bit, bits0, false);
760
761 return err;
762}
763
764/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300765 * wnd_set_used - Mark the bits range from bit to bit + bits as used.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300766 */
767int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
768{
769 int err = 0;
770 struct super_block *sb = wnd->sb;
771 size_t bits0 = bits;
772 size_t iw = bit >> (sb->s_blocksize_bits + 3);
773 u32 wbits = 8 * sb->s_blocksize;
774 u32 wbit = bit & (wbits - 1);
775 struct buffer_head *bh;
776
777 while (iw < wnd->nwnd && bits) {
778 u32 tail, op;
779 ulong *buf;
780
781 if (unlikely(iw + 1 == wnd->nwnd))
782 wbits = wnd->bits_last;
783
784 tail = wbits - wbit;
Kari Argillander6e3331e2021-09-07 17:28:42 +0300785 op = min_t(u32, tail, bits);
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300786
787 bh = wnd_map(wnd, iw);
788 if (IS_ERR(bh)) {
789 err = PTR_ERR(bh);
790 break;
791 }
792 buf = (ulong *)bh->b_data;
793
794 lock_buffer(bh);
795
796 __bitmap_set(buf, wbit, op);
797 wnd->free_bits[iw] -= op;
798
799 set_buffer_uptodate(bh);
800 mark_buffer_dirty(bh);
801 unlock_buffer(bh);
802 put_bh(bh);
803
804 wnd->total_zeroes -= op;
805 bits -= op;
806 wbit = 0;
807 iw += 1;
808 }
809
810 if (!RB_EMPTY_ROOT(&wnd->start_tree))
811 wnd_remove_free_ext(wnd, bit, bits0);
812
813 return err;
814}
815
816/*
817 * wnd_is_free_hlp
818 *
Kari Argillandere8b8e972021-08-03 14:57:09 +0300819 * Return: True if all clusters [bit, bit+bits) are free (bitmap only).
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300820 */
821static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits)
822{
823 struct super_block *sb = wnd->sb;
824 size_t iw = bit >> (sb->s_blocksize_bits + 3);
825 u32 wbits = 8 * sb->s_blocksize;
826 u32 wbit = bit & (wbits - 1);
827
828 while (iw < wnd->nwnd && bits) {
829 u32 tail, op;
830
831 if (unlikely(iw + 1 == wnd->nwnd))
832 wbits = wnd->bits_last;
833
834 tail = wbits - wbit;
Kari Argillander6e3331e2021-09-07 17:28:42 +0300835 op = min_t(u32, tail, bits);
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300836
837 if (wbits != wnd->free_bits[iw]) {
838 bool ret;
839 struct buffer_head *bh = wnd_map(wnd, iw);
840
841 if (IS_ERR(bh))
842 return false;
843
844 ret = are_bits_clear((ulong *)bh->b_data, wbit, op);
845
846 put_bh(bh);
847 if (!ret)
848 return false;
849 }
850
851 bits -= op;
852 wbit = 0;
853 iw += 1;
854 }
855
856 return true;
857}
858
859/*
860 * wnd_is_free
861 *
Kari Argillandere8b8e972021-08-03 14:57:09 +0300862 * Return: True if all clusters [bit, bit+bits) are free.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300863 */
864bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
865{
866 bool ret;
867 struct rb_node *n;
868 size_t end;
869 struct e_node *e;
870
871 if (RB_EMPTY_ROOT(&wnd->start_tree))
872 goto use_wnd;
873
874 n = rb_lookup(&wnd->start_tree, bit);
875 if (!n)
876 goto use_wnd;
877
878 e = rb_entry(n, struct e_node, start.node);
879
880 end = e->start.key + e->count.key;
881
882 if (bit < end && bit + bits <= end)
883 return true;
884
885use_wnd:
886 ret = wnd_is_free_hlp(wnd, bit, bits);
887
888 return ret;
889}
890
891/*
892 * wnd_is_used
893 *
Kari Argillandere8b8e972021-08-03 14:57:09 +0300894 * Return: True if all clusters [bit, bit+bits) are used.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300895 */
896bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
897{
898 bool ret = false;
899 struct super_block *sb = wnd->sb;
900 size_t iw = bit >> (sb->s_blocksize_bits + 3);
901 u32 wbits = 8 * sb->s_blocksize;
902 u32 wbit = bit & (wbits - 1);
903 size_t end;
904 struct rb_node *n;
905 struct e_node *e;
906
907 if (RB_EMPTY_ROOT(&wnd->start_tree))
908 goto use_wnd;
909
910 end = bit + bits;
911 n = rb_lookup(&wnd->start_tree, end - 1);
912 if (!n)
913 goto use_wnd;
914
915 e = rb_entry(n, struct e_node, start.node);
916 if (e->start.key + e->count.key > bit)
917 return false;
918
919use_wnd:
920 while (iw < wnd->nwnd && bits) {
921 u32 tail, op;
922
923 if (unlikely(iw + 1 == wnd->nwnd))
924 wbits = wnd->bits_last;
925
926 tail = wbits - wbit;
Kari Argillander6e3331e2021-09-07 17:28:42 +0300927 op = min_t(u32, tail, bits);
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300928
929 if (wnd->free_bits[iw]) {
930 bool ret;
931 struct buffer_head *bh = wnd_map(wnd, iw);
932
933 if (IS_ERR(bh))
934 goto out;
935
936 ret = are_bits_set((ulong *)bh->b_data, wbit, op);
937 put_bh(bh);
938 if (!ret)
939 goto out;
940 }
941
942 bits -= op;
943 wbit = 0;
944 iw += 1;
945 }
946 ret = true;
947
948out:
949 return ret;
950}
951
952/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300953 * wnd_find - Look for free space.
954 *
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300955 * - flags - BITMAP_FIND_XXX flags
956 *
Kari Argillandere8b8e972021-08-03 14:57:09 +0300957 * Return: 0 if not found.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300958 */
959size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
960 size_t flags, size_t *allocated)
961{
962 struct super_block *sb;
963 u32 wbits, wpos, wzbit, wzend;
964 size_t fnd, max_alloc, b_len, b_pos;
965 size_t iw, prev_tail, nwnd, wbit, ebit, zbit, zend;
966 size_t to_alloc0 = to_alloc;
967 const ulong *buf;
968 const struct e_node *e;
969 const struct rb_node *pr, *cr;
970 u8 log2_bits;
971 bool fbits_valid;
972 struct buffer_head *bh;
973
Kari Argillandere8b8e972021-08-03 14:57:09 +0300974 /* Fast checking for available free space. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +0300975 if (flags & BITMAP_FIND_FULL) {
976 size_t zeroes = wnd_zeroes(wnd);
977
978 zeroes -= wnd->zone_end - wnd->zone_bit;
979 if (zeroes < to_alloc0)
980 goto no_space;
981
982 if (to_alloc0 > wnd->extent_max)
983 goto no_space;
984 } else {
985 if (to_alloc > wnd->extent_max)
986 to_alloc = wnd->extent_max;
987 }
988
989 if (wnd->zone_bit <= hint && hint < wnd->zone_end)
990 hint = wnd->zone_end;
991
992 max_alloc = wnd->nbits;
993 b_len = b_pos = 0;
994
995 if (hint >= max_alloc)
996 hint = 0;
997
998 if (RB_EMPTY_ROOT(&wnd->start_tree)) {
999 if (wnd->uptodated == 1) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001000 /* Extents tree is updated -> No free space. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001001 goto no_space;
1002 }
1003 goto scan_bitmap;
1004 }
1005
1006 e = NULL;
1007 if (!hint)
1008 goto allocate_biggest;
1009
Kari Argillandere8b8e972021-08-03 14:57:09 +03001010 /* Use hint: Enumerate extents by start >= hint. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001011 pr = NULL;
1012 cr = wnd->start_tree.rb_node;
1013
1014 for (;;) {
1015 e = rb_entry(cr, struct e_node, start.node);
1016
1017 if (e->start.key == hint)
1018 break;
1019
1020 if (e->start.key < hint) {
1021 pr = cr;
1022 cr = cr->rb_right;
1023 if (!cr)
1024 break;
1025 continue;
1026 }
1027
1028 cr = cr->rb_left;
1029 if (!cr) {
1030 e = pr ? rb_entry(pr, struct e_node, start.node) : NULL;
1031 break;
1032 }
1033 }
1034
1035 if (!e)
1036 goto allocate_biggest;
1037
1038 if (e->start.key + e->count.key > hint) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001039 /* We have found extension with 'hint' inside. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001040 size_t len = e->start.key + e->count.key - hint;
1041
1042 if (len >= to_alloc && hint + to_alloc <= max_alloc) {
1043 fnd = hint;
1044 goto found;
1045 }
1046
1047 if (!(flags & BITMAP_FIND_FULL)) {
1048 if (len > to_alloc)
1049 len = to_alloc;
1050
1051 if (hint + len <= max_alloc) {
1052 fnd = hint;
1053 to_alloc = len;
1054 goto found;
1055 }
1056 }
1057 }
1058
1059allocate_biggest:
Kari Argillandere8b8e972021-08-03 14:57:09 +03001060 /* Allocate from biggest free extent. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001061 e = rb_entry(rb_first(&wnd->count_tree), struct e_node, count.node);
1062 if (e->count.key != wnd->extent_max)
1063 wnd->extent_max = e->count.key;
1064
1065 if (e->count.key < max_alloc) {
1066 if (e->count.key >= to_alloc) {
1067 ;
1068 } else if (flags & BITMAP_FIND_FULL) {
1069 if (e->count.key < to_alloc0) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001070 /* Biggest free block is less then requested. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001071 goto no_space;
1072 }
1073 to_alloc = e->count.key;
1074 } else if (-1 != wnd->uptodated) {
1075 to_alloc = e->count.key;
1076 } else {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001077 /* Check if we can use more bits. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001078 size_t op, max_check;
1079 struct rb_root start_tree;
1080
1081 memcpy(&start_tree, &wnd->start_tree,
1082 sizeof(struct rb_root));
1083 memset(&wnd->start_tree, 0, sizeof(struct rb_root));
1084
1085 max_check = e->start.key + to_alloc;
1086 if (max_check > max_alloc)
1087 max_check = max_alloc;
1088 for (op = e->start.key + e->count.key; op < max_check;
1089 op++) {
1090 if (!wnd_is_free(wnd, op, 1))
1091 break;
1092 }
1093 memcpy(&wnd->start_tree, &start_tree,
1094 sizeof(struct rb_root));
1095 to_alloc = op - e->start.key;
1096 }
1097
Kari Argillandere8b8e972021-08-03 14:57:09 +03001098 /* Prepare to return. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001099 fnd = e->start.key;
1100 if (e->start.key + to_alloc > max_alloc)
1101 to_alloc = max_alloc - e->start.key;
1102 goto found;
1103 }
1104
1105 if (wnd->uptodated == 1) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001106 /* Extents tree is updated -> no free space. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001107 goto no_space;
1108 }
1109
1110 b_len = e->count.key;
1111 b_pos = e->start.key;
1112
1113scan_bitmap:
1114 sb = wnd->sb;
1115 log2_bits = sb->s_blocksize_bits + 3;
1116
Konstantin Komarovd3624462021-08-31 16:57:40 +03001117 /* At most two ranges [hint, max_alloc) + [0, hint). */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001118Again:
1119
Kari Argillandere8b8e972021-08-03 14:57:09 +03001120 /* TODO: Optimize request for case nbits > wbits. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001121 iw = hint >> log2_bits;
1122 wbits = sb->s_blocksize * 8;
1123 wpos = hint & (wbits - 1);
1124 prev_tail = 0;
1125 fbits_valid = true;
1126
1127 if (max_alloc == wnd->nbits) {
1128 nwnd = wnd->nwnd;
1129 } else {
1130 size_t t = max_alloc + wbits - 1;
1131
1132 nwnd = likely(t > max_alloc) ? (t >> log2_bits) : wnd->nwnd;
1133 }
1134
Kari Argillandere8b8e972021-08-03 14:57:09 +03001135 /* Enumerate all windows. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001136 for (; iw < nwnd; iw++) {
1137 wbit = iw << log2_bits;
1138
1139 if (!wnd->free_bits[iw]) {
1140 if (prev_tail > b_len) {
1141 b_pos = wbit - prev_tail;
1142 b_len = prev_tail;
1143 }
1144
Kari Argillandere8b8e972021-08-03 14:57:09 +03001145 /* Skip full used window. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001146 prev_tail = 0;
1147 wpos = 0;
1148 continue;
1149 }
1150
1151 if (unlikely(iw + 1 == nwnd)) {
1152 if (max_alloc == wnd->nbits) {
1153 wbits = wnd->bits_last;
1154 } else {
1155 size_t t = max_alloc & (wbits - 1);
1156
1157 if (t) {
1158 wbits = t;
1159 fbits_valid = false;
1160 }
1161 }
1162 }
1163
1164 if (wnd->zone_end > wnd->zone_bit) {
1165 ebit = wbit + wbits;
1166 zbit = max(wnd->zone_bit, wbit);
1167 zend = min(wnd->zone_end, ebit);
1168
Kari Argillandere8b8e972021-08-03 14:57:09 +03001169 /* Here we have a window [wbit, ebit) and zone [zbit, zend). */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001170 if (zend <= zbit) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001171 /* Zone does not overlap window. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001172 } else {
1173 wzbit = zbit - wbit;
1174 wzend = zend - wbit;
1175
Kari Argillandere8b8e972021-08-03 14:57:09 +03001176 /* Zone overlaps window. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001177 if (wnd->free_bits[iw] == wzend - wzbit) {
1178 prev_tail = 0;
1179 wpos = 0;
1180 continue;
1181 }
1182
Kari Argillandere8b8e972021-08-03 14:57:09 +03001183 /* Scan two ranges window: [wbit, zbit) and [zend, ebit). */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001184 bh = wnd_map(wnd, iw);
1185
1186 if (IS_ERR(bh)) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001187 /* TODO: Error */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001188 prev_tail = 0;
1189 wpos = 0;
1190 continue;
1191 }
1192
1193 buf = (ulong *)bh->b_data;
1194
Kari Argillandere8b8e972021-08-03 14:57:09 +03001195 /* Scan range [wbit, zbit). */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001196 if (wpos < wzbit) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001197 /* Scan range [wpos, zbit). */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001198 fnd = wnd_scan(buf, wbit, wpos, wzbit,
1199 to_alloc, &prev_tail,
1200 &b_pos, &b_len);
1201 if (fnd != MINUS_ONE_T) {
1202 put_bh(bh);
1203 goto found;
1204 }
1205 }
1206
1207 prev_tail = 0;
1208
Kari Argillandere8b8e972021-08-03 14:57:09 +03001209 /* Scan range [zend, ebit). */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001210 if (wzend < wbits) {
1211 fnd = wnd_scan(buf, wbit,
1212 max(wzend, wpos), wbits,
1213 to_alloc, &prev_tail,
1214 &b_pos, &b_len);
1215 if (fnd != MINUS_ONE_T) {
1216 put_bh(bh);
1217 goto found;
1218 }
1219 }
1220
1221 wpos = 0;
1222 put_bh(bh);
1223 continue;
1224 }
1225 }
1226
Kari Argillandere8b8e972021-08-03 14:57:09 +03001227 /* Current window does not overlap zone. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001228 if (!wpos && fbits_valid && wnd->free_bits[iw] == wbits) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001229 /* Window is empty. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001230 if (prev_tail + wbits >= to_alloc) {
1231 fnd = wbit + wpos - prev_tail;
1232 goto found;
1233 }
1234
Kari Argillandere8b8e972021-08-03 14:57:09 +03001235 /* Increase 'prev_tail' and process next window. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001236 prev_tail += wbits;
1237 wpos = 0;
1238 continue;
1239 }
1240
Konstantin Komarovd3624462021-08-31 16:57:40 +03001241 /* Read window. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001242 bh = wnd_map(wnd, iw);
1243 if (IS_ERR(bh)) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001244 // TODO: Error.
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001245 prev_tail = 0;
1246 wpos = 0;
1247 continue;
1248 }
1249
1250 buf = (ulong *)bh->b_data;
1251
Kari Argillandere8b8e972021-08-03 14:57:09 +03001252 /* Scan range [wpos, eBits). */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001253 fnd = wnd_scan(buf, wbit, wpos, wbits, to_alloc, &prev_tail,
1254 &b_pos, &b_len);
1255 put_bh(bh);
1256 if (fnd != MINUS_ONE_T)
1257 goto found;
1258 }
1259
1260 if (b_len < prev_tail) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001261 /* The last fragment. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001262 b_len = prev_tail;
1263 b_pos = max_alloc - prev_tail;
1264 }
1265
1266 if (hint) {
1267 /*
Kari Argillandere8b8e972021-08-03 14:57:09 +03001268 * We have scanned range [hint max_alloc).
1269 * Prepare to scan range [0 hint + to_alloc).
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001270 */
1271 size_t nextmax = hint + to_alloc;
1272
1273 if (likely(nextmax >= hint) && nextmax < max_alloc)
1274 max_alloc = nextmax;
1275 hint = 0;
1276 goto Again;
1277 }
1278
1279 if (!b_len)
1280 goto no_space;
1281
1282 wnd->extent_max = b_len;
1283
1284 if (flags & BITMAP_FIND_FULL)
1285 goto no_space;
1286
1287 fnd = b_pos;
1288 to_alloc = b_len;
1289
1290found:
1291 if (flags & BITMAP_FIND_MARK_AS_USED) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001292 /* TODO: Optimize remove extent (pass 'e'?). */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001293 if (wnd_set_used(wnd, fnd, to_alloc))
1294 goto no_space;
1295 } else if (wnd->extent_max != MINUS_ONE_T &&
1296 to_alloc > wnd->extent_max) {
1297 wnd->extent_max = to_alloc;
1298 }
1299
1300 *allocated = fnd;
1301 return to_alloc;
1302
1303no_space:
1304 return 0;
1305}
1306
1307/*
Kari Argillandere8b8e972021-08-03 14:57:09 +03001308 * wnd_extend - Extend bitmap ($MFT bitmap).
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001309 */
1310int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
1311{
1312 int err;
1313 struct super_block *sb = wnd->sb;
1314 struct ntfs_sb_info *sbi = sb->s_fs_info;
1315 u32 blocksize = sb->s_blocksize;
1316 u32 wbits = blocksize * 8;
1317 u32 b0, new_last;
1318 size_t bits, iw, new_wnd;
1319 size_t old_bits = wnd->nbits;
1320 u16 *new_free;
1321
1322 if (new_bits <= old_bits)
1323 return -EINVAL;
1324
Kari Argillandere8b8e972021-08-03 14:57:09 +03001325 /* Align to 8 byte boundary. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001326 new_wnd = bytes_to_block(sb, bitmap_size(new_bits));
1327 new_last = new_bits & (wbits - 1);
1328 if (!new_last)
1329 new_last = wbits;
1330
1331 if (new_wnd != wnd->nwnd) {
Kari Argillander195c52b2021-08-24 21:37:07 +03001332 new_free = kmalloc(new_wnd * sizeof(u16), GFP_NOFS);
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001333 if (!new_free)
1334 return -ENOMEM;
1335
1336 if (new_free != wnd->free_bits)
1337 memcpy(new_free, wnd->free_bits,
1338 wnd->nwnd * sizeof(short));
1339 memset(new_free + wnd->nwnd, 0,
1340 (new_wnd - wnd->nwnd) * sizeof(short));
Kari Argillander195c52b2021-08-24 21:37:07 +03001341 kfree(wnd->free_bits);
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001342 wnd->free_bits = new_free;
1343 }
1344
Kari Argillandere8b8e972021-08-03 14:57:09 +03001345 /* Zero bits [old_bits,new_bits). */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001346 bits = new_bits - old_bits;
1347 b0 = old_bits & (wbits - 1);
1348
1349 for (iw = old_bits >> (sb->s_blocksize_bits + 3); bits; iw += 1) {
1350 u32 op;
1351 size_t frb;
1352 u64 vbo, lbo, bytes;
1353 struct buffer_head *bh;
1354 ulong *buf;
1355
1356 if (iw + 1 == new_wnd)
1357 wbits = new_last;
1358
1359 op = b0 + bits > wbits ? wbits - b0 : bits;
1360 vbo = (u64)iw * blocksize;
1361
1362 err = ntfs_vbo_to_lbo(sbi, &wnd->run, vbo, &lbo, &bytes);
1363 if (err)
1364 break;
1365
1366 bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1367 if (!bh)
1368 return -EIO;
1369
1370 lock_buffer(bh);
1371 buf = (ulong *)bh->b_data;
1372
1373 __bitmap_clear(buf, b0, blocksize * 8 - b0);
1374 frb = wbits - __bitmap_weight(buf, wbits);
1375 wnd->total_zeroes += frb - wnd->free_bits[iw];
1376 wnd->free_bits[iw] = frb;
1377
1378 set_buffer_uptodate(bh);
1379 mark_buffer_dirty(bh);
1380 unlock_buffer(bh);
Kari Argillandere8b8e972021-08-03 14:57:09 +03001381 /* err = sync_dirty_buffer(bh); */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001382
1383 b0 = 0;
1384 bits -= op;
1385 }
1386
1387 wnd->nbits = new_bits;
1388 wnd->nwnd = new_wnd;
1389 wnd->bits_last = new_last;
1390
1391 wnd_add_free_ext(wnd, old_bits, new_bits - old_bits, false);
1392
1393 return 0;
1394}
1395
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001396void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len)
1397{
1398 size_t zlen;
1399
1400 zlen = wnd->zone_end - wnd->zone_bit;
1401 if (zlen)
1402 wnd_add_free_ext(wnd, wnd->zone_bit, zlen, false);
1403
1404 if (!RB_EMPTY_ROOT(&wnd->start_tree) && len)
1405 wnd_remove_free_ext(wnd, lcn, len);
1406
1407 wnd->zone_bit = lcn;
1408 wnd->zone_end = lcn + len;
1409}
1410
1411int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
1412{
1413 int err = 0;
1414 struct super_block *sb = sbi->sb;
1415 struct wnd_bitmap *wnd = &sbi->used.bitmap;
1416 u32 wbits = 8 * sb->s_blocksize;
1417 CLST len = 0, lcn = 0, done = 0;
1418 CLST minlen = bytes_to_cluster(sbi, range->minlen);
1419 CLST lcn_from = bytes_to_cluster(sbi, range->start);
1420 size_t iw = lcn_from >> (sb->s_blocksize_bits + 3);
1421 u32 wbit = lcn_from & (wbits - 1);
1422 const ulong *buf;
1423 CLST lcn_to;
1424
1425 if (!minlen)
1426 minlen = 1;
1427
1428 if (range->len == (u64)-1)
1429 lcn_to = wnd->nbits;
1430 else
1431 lcn_to = bytes_to_cluster(sbi, range->start + range->len);
1432
1433 down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
1434
1435 for (; iw < wnd->nbits; iw++, wbit = 0) {
1436 CLST lcn_wnd = iw * wbits;
1437 struct buffer_head *bh;
1438
1439 if (lcn_wnd > lcn_to)
1440 break;
1441
1442 if (!wnd->free_bits[iw])
1443 continue;
1444
1445 if (iw + 1 == wnd->nwnd)
1446 wbits = wnd->bits_last;
1447
1448 if (lcn_wnd + wbits > lcn_to)
1449 wbits = lcn_to - lcn_wnd;
1450
1451 bh = wnd_map(wnd, iw);
1452 if (IS_ERR(bh)) {
1453 err = PTR_ERR(bh);
1454 break;
1455 }
1456
1457 buf = (ulong *)bh->b_data;
1458
1459 for (; wbit < wbits; wbit++) {
1460 if (!test_bit(wbit, buf)) {
1461 if (!len)
1462 lcn = lcn_wnd + wbit;
1463 len += 1;
1464 continue;
1465 }
1466 if (len >= minlen) {
1467 err = ntfs_discard(sbi, lcn, len);
1468 if (err)
1469 goto out;
1470 done += len;
1471 }
1472 len = 0;
1473 }
1474 put_bh(bh);
1475 }
1476
Kari Argillandere8b8e972021-08-03 14:57:09 +03001477 /* Process the last fragment. */
Konstantin Komarov3f3b4422021-08-13 17:21:29 +03001478 if (len >= minlen) {
1479 err = ntfs_discard(sbi, lcn, len);
1480 if (err)
1481 goto out;
1482 done += len;
1483 }
1484
1485out:
1486 range->len = (u64)done << sbi->cluster_bits;
1487
1488 up_read(&wnd->rw_lock);
1489
1490 return err;
1491}