blob: 3ed853acfa05e6ee7e0e2c74659a3b1c32dbe7c9 [file] [log] [blame]
Josef Bacik2e405ad2019-06-20 15:37:45 -04001// SPDX-License-Identifier: GPL-2.0
2
David Sterba784352f2019-08-21 18:54:28 +02003#include "misc.h"
Josef Bacik2e405ad2019-06-20 15:37:45 -04004#include "ctree.h"
5#include "block-group.h"
Josef Bacik3eeb3222019-06-20 15:37:47 -04006#include "space-info.h"
Josef Bacik9f212462019-08-06 16:43:19 +02007#include "disk-io.h"
8#include "free-space-cache.h"
9#include "free-space-tree.h"
Josef Bacike3e05202019-06-20 15:37:55 -040010#include "disk-io.h"
11#include "volumes.h"
12#include "transaction.h"
13#include "ref-verify.h"
Josef Bacik4358d9632019-06-20 15:37:57 -040014#include "sysfs.h"
15#include "tree-log.h"
Josef Bacik77745c02019-06-20 15:38:00 -040016#include "delalloc-space.h"
Josef Bacik2e405ad2019-06-20 15:37:45 -040017
Josef Bacik878d7b62019-06-20 15:38:05 -040018/*
19 * Return target flags in extended format or 0 if restripe for this chunk_type
20 * is not in progress
21 *
22 * Should be called with balance_lock held
23 */
Josef Bacike11c0402019-06-20 15:38:07 -040024static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
Josef Bacik878d7b62019-06-20 15:38:05 -040025{
26 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
27 u64 target = 0;
28
29 if (!bctl)
30 return 0;
31
32 if (flags & BTRFS_BLOCK_GROUP_DATA &&
33 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
34 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
35 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
36 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
37 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
38 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
39 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
40 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
41 }
42
43 return target;
44}
45
46/*
47 * @flags: available profiles in extended format (see ctree.h)
48 *
49 * Return reduced profile in chunk format. If profile changing is in progress
50 * (either running or paused) picks the target profile (if it's already
51 * available), otherwise falls back to plain reducing.
52 */
53static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
54{
55 u64 num_devices = fs_info->fs_devices->rw_devices;
56 u64 target;
57 u64 raid_type;
58 u64 allowed = 0;
59
60 /*
61 * See if restripe for this chunk_type is in progress, if so try to
62 * reduce to the target profile
63 */
64 spin_lock(&fs_info->balance_lock);
Josef Bacike11c0402019-06-20 15:38:07 -040065 target = get_restripe_target(fs_info, flags);
Josef Bacik878d7b62019-06-20 15:38:05 -040066 if (target) {
67 /* Pick target profile only if it's already available */
68 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
69 spin_unlock(&fs_info->balance_lock);
70 return extended_to_chunk(target);
71 }
72 }
73 spin_unlock(&fs_info->balance_lock);
74
75 /* First, mask out the RAID levels which aren't possible */
76 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
77 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
78 allowed |= btrfs_raid_array[raid_type].bg_flag;
79 }
80 allowed &= flags;
81
82 if (allowed & BTRFS_BLOCK_GROUP_RAID6)
83 allowed = BTRFS_BLOCK_GROUP_RAID6;
84 else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
85 allowed = BTRFS_BLOCK_GROUP_RAID5;
86 else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
87 allowed = BTRFS_BLOCK_GROUP_RAID10;
88 else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
89 allowed = BTRFS_BLOCK_GROUP_RAID1;
90 else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
91 allowed = BTRFS_BLOCK_GROUP_RAID0;
92
93 flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
94
95 return extended_to_chunk(flags | allowed);
96}
97
98static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
99{
100 unsigned seq;
101 u64 flags;
102
103 do {
104 flags = orig_flags;
105 seq = read_seqbegin(&fs_info->profiles_lock);
106
107 if (flags & BTRFS_BLOCK_GROUP_DATA)
108 flags |= fs_info->avail_data_alloc_bits;
109 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
110 flags |= fs_info->avail_system_alloc_bits;
111 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
112 flags |= fs_info->avail_metadata_alloc_bits;
113 } while (read_seqretry(&fs_info->profiles_lock, seq));
114
115 return btrfs_reduce_alloc_profile(fs_info, flags);
116}
117
118u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
119{
120 return get_alloc_profile(fs_info, orig_flags);
121}
122
Josef Bacik3cad1282019-06-20 15:37:46 -0400123void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
124{
125 atomic_inc(&cache->count);
126}
127
128void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
129{
130 if (atomic_dec_and_test(&cache->count)) {
131 WARN_ON(cache->pinned > 0);
132 WARN_ON(cache->reserved > 0);
133
134 /*
135 * If not empty, someone is still holding mutex of
136 * full_stripe_lock, which can only be released by caller.
137 * And it will definitely cause use-after-free when caller
138 * tries to release full stripe lock.
139 *
140 * No better way to resolve, but only to warn.
141 */
142 WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
143 kfree(cache->free_space_ctl);
144 kfree(cache);
145 }
146}
147
Josef Bacik2e405ad2019-06-20 15:37:45 -0400148/*
Josef Bacik4358d9632019-06-20 15:37:57 -0400149 * This adds the block group to the fs_info rb tree for the block group cache
150 */
151static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
152 struct btrfs_block_group_cache *block_group)
153{
154 struct rb_node **p;
155 struct rb_node *parent = NULL;
156 struct btrfs_block_group_cache *cache;
157
158 spin_lock(&info->block_group_cache_lock);
159 p = &info->block_group_cache_tree.rb_node;
160
161 while (*p) {
162 parent = *p;
163 cache = rb_entry(parent, struct btrfs_block_group_cache,
164 cache_node);
David Sterbab3470b52019-10-23 18:48:22 +0200165 if (block_group->start < cache->start) {
Josef Bacik4358d9632019-06-20 15:37:57 -0400166 p = &(*p)->rb_left;
David Sterbab3470b52019-10-23 18:48:22 +0200167 } else if (block_group->start > cache->start) {
Josef Bacik4358d9632019-06-20 15:37:57 -0400168 p = &(*p)->rb_right;
169 } else {
170 spin_unlock(&info->block_group_cache_lock);
171 return -EEXIST;
172 }
173 }
174
175 rb_link_node(&block_group->cache_node, parent, p);
176 rb_insert_color(&block_group->cache_node,
177 &info->block_group_cache_tree);
178
David Sterbab3470b52019-10-23 18:48:22 +0200179 if (info->first_logical_byte > block_group->start)
180 info->first_logical_byte = block_group->start;
Josef Bacik4358d9632019-06-20 15:37:57 -0400181
182 spin_unlock(&info->block_group_cache_lock);
183
184 return 0;
185}
186
187/*
Josef Bacik2e405ad2019-06-20 15:37:45 -0400188 * This will return the block group at or after bytenr if contains is 0, else
189 * it will return the block group that contains the bytenr
190 */
191static struct btrfs_block_group_cache *block_group_cache_tree_search(
192 struct btrfs_fs_info *info, u64 bytenr, int contains)
193{
194 struct btrfs_block_group_cache *cache, *ret = NULL;
195 struct rb_node *n;
196 u64 end, start;
197
198 spin_lock(&info->block_group_cache_lock);
199 n = info->block_group_cache_tree.rb_node;
200
201 while (n) {
202 cache = rb_entry(n, struct btrfs_block_group_cache,
203 cache_node);
David Sterbab3470b52019-10-23 18:48:22 +0200204 end = cache->start + cache->length - 1;
205 start = cache->start;
Josef Bacik2e405ad2019-06-20 15:37:45 -0400206
207 if (bytenr < start) {
David Sterbab3470b52019-10-23 18:48:22 +0200208 if (!contains && (!ret || start < ret->start))
Josef Bacik2e405ad2019-06-20 15:37:45 -0400209 ret = cache;
210 n = n->rb_left;
211 } else if (bytenr > start) {
212 if (contains && bytenr <= end) {
213 ret = cache;
214 break;
215 }
216 n = n->rb_right;
217 } else {
218 ret = cache;
219 break;
220 }
221 }
222 if (ret) {
223 btrfs_get_block_group(ret);
David Sterbab3470b52019-10-23 18:48:22 +0200224 if (bytenr == 0 && info->first_logical_byte > ret->start)
225 info->first_logical_byte = ret->start;
Josef Bacik2e405ad2019-06-20 15:37:45 -0400226 }
227 spin_unlock(&info->block_group_cache_lock);
228
229 return ret;
230}
231
232/*
233 * Return the block group that starts at or after bytenr
234 */
235struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
236 struct btrfs_fs_info *info, u64 bytenr)
237{
238 return block_group_cache_tree_search(info, bytenr, 0);
239}
240
241/*
242 * Return the block group that contains the given bytenr
243 */
244struct btrfs_block_group_cache *btrfs_lookup_block_group(
245 struct btrfs_fs_info *info, u64 bytenr)
246{
247 return block_group_cache_tree_search(info, bytenr, 1);
248}
249
250struct btrfs_block_group_cache *btrfs_next_block_group(
251 struct btrfs_block_group_cache *cache)
252{
253 struct btrfs_fs_info *fs_info = cache->fs_info;
254 struct rb_node *node;
255
256 spin_lock(&fs_info->block_group_cache_lock);
257
258 /* If our block group was removed, we need a full search. */
259 if (RB_EMPTY_NODE(&cache->cache_node)) {
David Sterbab3470b52019-10-23 18:48:22 +0200260 const u64 next_bytenr = cache->start + cache->length;
Josef Bacik2e405ad2019-06-20 15:37:45 -0400261
262 spin_unlock(&fs_info->block_group_cache_lock);
263 btrfs_put_block_group(cache);
264 cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
265 }
266 node = rb_next(&cache->cache_node);
267 btrfs_put_block_group(cache);
268 if (node) {
269 cache = rb_entry(node, struct btrfs_block_group_cache,
270 cache_node);
271 btrfs_get_block_group(cache);
272 } else
273 cache = NULL;
274 spin_unlock(&fs_info->block_group_cache_lock);
275 return cache;
276}
Josef Bacik3eeb3222019-06-20 15:37:47 -0400277
278bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
279{
280 struct btrfs_block_group_cache *bg;
281 bool ret = true;
282
283 bg = btrfs_lookup_block_group(fs_info, bytenr);
284 if (!bg)
285 return false;
286
287 spin_lock(&bg->lock);
288 if (bg->ro)
289 ret = false;
290 else
291 atomic_inc(&bg->nocow_writers);
292 spin_unlock(&bg->lock);
293
294 /* No put on block group, done by btrfs_dec_nocow_writers */
295 if (!ret)
296 btrfs_put_block_group(bg);
297
298 return ret;
299}
300
301void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
302{
303 struct btrfs_block_group_cache *bg;
304
305 bg = btrfs_lookup_block_group(fs_info, bytenr);
306 ASSERT(bg);
307 if (atomic_dec_and_test(&bg->nocow_writers))
308 wake_up_var(&bg->nocow_writers);
309 /*
310 * Once for our lookup and once for the lookup done by a previous call
311 * to btrfs_inc_nocow_writers()
312 */
313 btrfs_put_block_group(bg);
314 btrfs_put_block_group(bg);
315}
316
317void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
318{
319 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
320}
321
322void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
323 const u64 start)
324{
325 struct btrfs_block_group_cache *bg;
326
327 bg = btrfs_lookup_block_group(fs_info, start);
328 ASSERT(bg);
329 if (atomic_dec_and_test(&bg->reservations))
330 wake_up_var(&bg->reservations);
331 btrfs_put_block_group(bg);
332}
333
334void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
335{
336 struct btrfs_space_info *space_info = bg->space_info;
337
338 ASSERT(bg->ro);
339
340 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
341 return;
342
343 /*
344 * Our block group is read only but before we set it to read only,
345 * some task might have had allocated an extent from it already, but it
346 * has not yet created a respective ordered extent (and added it to a
347 * root's list of ordered extents).
348 * Therefore wait for any task currently allocating extents, since the
349 * block group's reservations counter is incremented while a read lock
350 * on the groups' semaphore is held and decremented after releasing
351 * the read access on that semaphore and creating the ordered extent.
352 */
353 down_write(&space_info->groups_sem);
354 up_write(&space_info->groups_sem);
355
356 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
357}
Josef Bacik9f212462019-08-06 16:43:19 +0200358
359struct btrfs_caching_control *btrfs_get_caching_control(
360 struct btrfs_block_group_cache *cache)
361{
362 struct btrfs_caching_control *ctl;
363
364 spin_lock(&cache->lock);
365 if (!cache->caching_ctl) {
366 spin_unlock(&cache->lock);
367 return NULL;
368 }
369
370 ctl = cache->caching_ctl;
371 refcount_inc(&ctl->count);
372 spin_unlock(&cache->lock);
373 return ctl;
374}
375
376void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
377{
378 if (refcount_dec_and_test(&ctl->count))
379 kfree(ctl);
380}
381
382/*
383 * When we wait for progress in the block group caching, its because our
384 * allocation attempt failed at least once. So, we must sleep and let some
385 * progress happen before we try again.
386 *
387 * This function will sleep at least once waiting for new free space to show
388 * up, and then it will check the block group free space numbers for our min
389 * num_bytes. Another option is to have it go ahead and look in the rbtree for
390 * a free extent of a given size, but this is a good start.
391 *
392 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
393 * any of the information in this block group.
394 */
395void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
396 u64 num_bytes)
397{
398 struct btrfs_caching_control *caching_ctl;
399
400 caching_ctl = btrfs_get_caching_control(cache);
401 if (!caching_ctl)
402 return;
403
404 wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache) ||
405 (cache->free_space_ctl->free_space >= num_bytes));
406
407 btrfs_put_caching_control(caching_ctl);
408}
409
410int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
411{
412 struct btrfs_caching_control *caching_ctl;
413 int ret = 0;
414
415 caching_ctl = btrfs_get_caching_control(cache);
416 if (!caching_ctl)
417 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
418
419 wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache));
420 if (cache->cached == BTRFS_CACHE_ERROR)
421 ret = -EIO;
422 btrfs_put_caching_control(caching_ctl);
423 return ret;
424}
425
426#ifdef CONFIG_BTRFS_DEBUG
Josef Bacike11c0402019-06-20 15:38:07 -0400427static void fragment_free_space(struct btrfs_block_group_cache *block_group)
Josef Bacik9f212462019-08-06 16:43:19 +0200428{
429 struct btrfs_fs_info *fs_info = block_group->fs_info;
David Sterbab3470b52019-10-23 18:48:22 +0200430 u64 start = block_group->start;
431 u64 len = block_group->length;
Josef Bacik9f212462019-08-06 16:43:19 +0200432 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
433 fs_info->nodesize : fs_info->sectorsize;
434 u64 step = chunk << 1;
435
436 while (len > chunk) {
437 btrfs_remove_free_space(block_group, start, chunk);
438 start += step;
439 if (len < step)
440 len = 0;
441 else
442 len -= step;
443 }
444}
445#endif
446
447/*
448 * This is only called by btrfs_cache_block_group, since we could have freed
449 * extents we need to check the pinned_extents for any extents that can't be
450 * used yet since their free space will be released as soon as the transaction
451 * commits.
452 */
453u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
454 u64 start, u64 end)
455{
456 struct btrfs_fs_info *info = block_group->fs_info;
457 u64 extent_start, extent_end, size, total_added = 0;
458 int ret;
459
460 while (start < end) {
461 ret = find_first_extent_bit(info->pinned_extents, start,
462 &extent_start, &extent_end,
463 EXTENT_DIRTY | EXTENT_UPTODATE,
464 NULL);
465 if (ret)
466 break;
467
468 if (extent_start <= start) {
469 start = extent_end + 1;
470 } else if (extent_start > start && extent_start < end) {
471 size = extent_start - start;
472 total_added += size;
473 ret = btrfs_add_free_space(block_group, start,
474 size);
475 BUG_ON(ret); /* -ENOMEM or logic error */
476 start = extent_end + 1;
477 } else {
478 break;
479 }
480 }
481
482 if (start < end) {
483 size = end - start;
484 total_added += size;
485 ret = btrfs_add_free_space(block_group, start, size);
486 BUG_ON(ret); /* -ENOMEM or logic error */
487 }
488
489 return total_added;
490}
491
492static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
493{
494 struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
495 struct btrfs_fs_info *fs_info = block_group->fs_info;
496 struct btrfs_root *extent_root = fs_info->extent_root;
497 struct btrfs_path *path;
498 struct extent_buffer *leaf;
499 struct btrfs_key key;
500 u64 total_found = 0;
501 u64 last = 0;
502 u32 nritems;
503 int ret;
504 bool wakeup = true;
505
506 path = btrfs_alloc_path();
507 if (!path)
508 return -ENOMEM;
509
David Sterbab3470b52019-10-23 18:48:22 +0200510 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
Josef Bacik9f212462019-08-06 16:43:19 +0200511
512#ifdef CONFIG_BTRFS_DEBUG
513 /*
514 * If we're fragmenting we don't want to make anybody think we can
515 * allocate from this block group until we've had a chance to fragment
516 * the free space.
517 */
518 if (btrfs_should_fragment_free_space(block_group))
519 wakeup = false;
520#endif
521 /*
522 * We don't want to deadlock with somebody trying to allocate a new
523 * extent for the extent root while also trying to search the extent
524 * root to add free space. So we skip locking and search the commit
525 * root, since its read-only
526 */
527 path->skip_locking = 1;
528 path->search_commit_root = 1;
529 path->reada = READA_FORWARD;
530
531 key.objectid = last;
532 key.offset = 0;
533 key.type = BTRFS_EXTENT_ITEM_KEY;
534
535next:
536 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
537 if (ret < 0)
538 goto out;
539
540 leaf = path->nodes[0];
541 nritems = btrfs_header_nritems(leaf);
542
543 while (1) {
544 if (btrfs_fs_closing(fs_info) > 1) {
545 last = (u64)-1;
546 break;
547 }
548
549 if (path->slots[0] < nritems) {
550 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
551 } else {
552 ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
553 if (ret)
554 break;
555
556 if (need_resched() ||
557 rwsem_is_contended(&fs_info->commit_root_sem)) {
558 if (wakeup)
559 caching_ctl->progress = last;
560 btrfs_release_path(path);
561 up_read(&fs_info->commit_root_sem);
562 mutex_unlock(&caching_ctl->mutex);
563 cond_resched();
564 mutex_lock(&caching_ctl->mutex);
565 down_read(&fs_info->commit_root_sem);
566 goto next;
567 }
568
569 ret = btrfs_next_leaf(extent_root, path);
570 if (ret < 0)
571 goto out;
572 if (ret)
573 break;
574 leaf = path->nodes[0];
575 nritems = btrfs_header_nritems(leaf);
576 continue;
577 }
578
579 if (key.objectid < last) {
580 key.objectid = last;
581 key.offset = 0;
582 key.type = BTRFS_EXTENT_ITEM_KEY;
583
584 if (wakeup)
585 caching_ctl->progress = last;
586 btrfs_release_path(path);
587 goto next;
588 }
589
David Sterbab3470b52019-10-23 18:48:22 +0200590 if (key.objectid < block_group->start) {
Josef Bacik9f212462019-08-06 16:43:19 +0200591 path->slots[0]++;
592 continue;
593 }
594
David Sterbab3470b52019-10-23 18:48:22 +0200595 if (key.objectid >= block_group->start + block_group->length)
Josef Bacik9f212462019-08-06 16:43:19 +0200596 break;
597
598 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
599 key.type == BTRFS_METADATA_ITEM_KEY) {
600 total_found += add_new_free_space(block_group, last,
601 key.objectid);
602 if (key.type == BTRFS_METADATA_ITEM_KEY)
603 last = key.objectid +
604 fs_info->nodesize;
605 else
606 last = key.objectid + key.offset;
607
608 if (total_found > CACHING_CTL_WAKE_UP) {
609 total_found = 0;
610 if (wakeup)
611 wake_up(&caching_ctl->wait);
612 }
613 }
614 path->slots[0]++;
615 }
616 ret = 0;
617
618 total_found += add_new_free_space(block_group, last,
David Sterbab3470b52019-10-23 18:48:22 +0200619 block_group->start + block_group->length);
Josef Bacik9f212462019-08-06 16:43:19 +0200620 caching_ctl->progress = (u64)-1;
621
622out:
623 btrfs_free_path(path);
624 return ret;
625}
626
627static noinline void caching_thread(struct btrfs_work *work)
628{
629 struct btrfs_block_group_cache *block_group;
630 struct btrfs_fs_info *fs_info;
631 struct btrfs_caching_control *caching_ctl;
632 int ret;
633
634 caching_ctl = container_of(work, struct btrfs_caching_control, work);
635 block_group = caching_ctl->block_group;
636 fs_info = block_group->fs_info;
637
638 mutex_lock(&caching_ctl->mutex);
639 down_read(&fs_info->commit_root_sem);
640
641 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
642 ret = load_free_space_tree(caching_ctl);
643 else
644 ret = load_extent_tree_free(caching_ctl);
645
646 spin_lock(&block_group->lock);
647 block_group->caching_ctl = NULL;
648 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
649 spin_unlock(&block_group->lock);
650
651#ifdef CONFIG_BTRFS_DEBUG
652 if (btrfs_should_fragment_free_space(block_group)) {
653 u64 bytes_used;
654
655 spin_lock(&block_group->space_info->lock);
656 spin_lock(&block_group->lock);
David Sterbab3470b52019-10-23 18:48:22 +0200657 bytes_used = block_group->length - block_group->used;
Josef Bacik9f212462019-08-06 16:43:19 +0200658 block_group->space_info->bytes_used += bytes_used >> 1;
659 spin_unlock(&block_group->lock);
660 spin_unlock(&block_group->space_info->lock);
Josef Bacike11c0402019-06-20 15:38:07 -0400661 fragment_free_space(block_group);
Josef Bacik9f212462019-08-06 16:43:19 +0200662 }
663#endif
664
665 caching_ctl->progress = (u64)-1;
666
667 up_read(&fs_info->commit_root_sem);
668 btrfs_free_excluded_extents(block_group);
669 mutex_unlock(&caching_ctl->mutex);
670
671 wake_up(&caching_ctl->wait);
672
673 btrfs_put_caching_control(caching_ctl);
674 btrfs_put_block_group(block_group);
675}
676
677int btrfs_cache_block_group(struct btrfs_block_group_cache *cache,
678 int load_cache_only)
679{
680 DEFINE_WAIT(wait);
681 struct btrfs_fs_info *fs_info = cache->fs_info;
682 struct btrfs_caching_control *caching_ctl;
683 int ret = 0;
684
685 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
686 if (!caching_ctl)
687 return -ENOMEM;
688
689 INIT_LIST_HEAD(&caching_ctl->list);
690 mutex_init(&caching_ctl->mutex);
691 init_waitqueue_head(&caching_ctl->wait);
692 caching_ctl->block_group = cache;
David Sterbab3470b52019-10-23 18:48:22 +0200693 caching_ctl->progress = cache->start;
Josef Bacik9f212462019-08-06 16:43:19 +0200694 refcount_set(&caching_ctl->count, 1);
Omar Sandovala0cac0e2019-09-16 11:30:57 -0700695 btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
Josef Bacik9f212462019-08-06 16:43:19 +0200696
697 spin_lock(&cache->lock);
698 /*
699 * This should be a rare occasion, but this could happen I think in the
700 * case where one thread starts to load the space cache info, and then
701 * some other thread starts a transaction commit which tries to do an
702 * allocation while the other thread is still loading the space cache
703 * info. The previous loop should have kept us from choosing this block
704 * group, but if we've moved to the state where we will wait on caching
705 * block groups we need to first check if we're doing a fast load here,
706 * so we can wait for it to finish, otherwise we could end up allocating
707 * from a block group who's cache gets evicted for one reason or
708 * another.
709 */
710 while (cache->cached == BTRFS_CACHE_FAST) {
711 struct btrfs_caching_control *ctl;
712
713 ctl = cache->caching_ctl;
714 refcount_inc(&ctl->count);
715 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
716 spin_unlock(&cache->lock);
717
718 schedule();
719
720 finish_wait(&ctl->wait, &wait);
721 btrfs_put_caching_control(ctl);
722 spin_lock(&cache->lock);
723 }
724
725 if (cache->cached != BTRFS_CACHE_NO) {
726 spin_unlock(&cache->lock);
727 kfree(caching_ctl);
728 return 0;
729 }
730 WARN_ON(cache->caching_ctl);
731 cache->caching_ctl = caching_ctl;
732 cache->cached = BTRFS_CACHE_FAST;
733 spin_unlock(&cache->lock);
734
735 if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
736 mutex_lock(&caching_ctl->mutex);
737 ret = load_free_space_cache(cache);
738
739 spin_lock(&cache->lock);
740 if (ret == 1) {
741 cache->caching_ctl = NULL;
742 cache->cached = BTRFS_CACHE_FINISHED;
743 cache->last_byte_to_unpin = (u64)-1;
744 caching_ctl->progress = (u64)-1;
745 } else {
746 if (load_cache_only) {
747 cache->caching_ctl = NULL;
748 cache->cached = BTRFS_CACHE_NO;
749 } else {
750 cache->cached = BTRFS_CACHE_STARTED;
751 cache->has_caching_ctl = 1;
752 }
753 }
754 spin_unlock(&cache->lock);
755#ifdef CONFIG_BTRFS_DEBUG
756 if (ret == 1 &&
757 btrfs_should_fragment_free_space(cache)) {
758 u64 bytes_used;
759
760 spin_lock(&cache->space_info->lock);
761 spin_lock(&cache->lock);
David Sterbab3470b52019-10-23 18:48:22 +0200762 bytes_used = cache->length - cache->used;
Josef Bacik9f212462019-08-06 16:43:19 +0200763 cache->space_info->bytes_used += bytes_used >> 1;
764 spin_unlock(&cache->lock);
765 spin_unlock(&cache->space_info->lock);
Josef Bacike11c0402019-06-20 15:38:07 -0400766 fragment_free_space(cache);
Josef Bacik9f212462019-08-06 16:43:19 +0200767 }
768#endif
769 mutex_unlock(&caching_ctl->mutex);
770
771 wake_up(&caching_ctl->wait);
772 if (ret == 1) {
773 btrfs_put_caching_control(caching_ctl);
774 btrfs_free_excluded_extents(cache);
775 return 0;
776 }
777 } else {
778 /*
779 * We're either using the free space tree or no caching at all.
780 * Set cached to the appropriate value and wakeup any waiters.
781 */
782 spin_lock(&cache->lock);
783 if (load_cache_only) {
784 cache->caching_ctl = NULL;
785 cache->cached = BTRFS_CACHE_NO;
786 } else {
787 cache->cached = BTRFS_CACHE_STARTED;
788 cache->has_caching_ctl = 1;
789 }
790 spin_unlock(&cache->lock);
791 wake_up(&caching_ctl->wait);
792 }
793
794 if (load_cache_only) {
795 btrfs_put_caching_control(caching_ctl);
796 return 0;
797 }
798
799 down_write(&fs_info->commit_root_sem);
800 refcount_inc(&caching_ctl->count);
801 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
802 up_write(&fs_info->commit_root_sem);
803
804 btrfs_get_block_group(cache);
805
806 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
807
808 return ret;
809}
Josef Bacike3e05202019-06-20 15:37:55 -0400810
811static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
812{
813 u64 extra_flags = chunk_to_extended(flags) &
814 BTRFS_EXTENDED_PROFILE_MASK;
815
816 write_seqlock(&fs_info->profiles_lock);
817 if (flags & BTRFS_BLOCK_GROUP_DATA)
818 fs_info->avail_data_alloc_bits &= ~extra_flags;
819 if (flags & BTRFS_BLOCK_GROUP_METADATA)
820 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
821 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
822 fs_info->avail_system_alloc_bits &= ~extra_flags;
823 write_sequnlock(&fs_info->profiles_lock);
824}
825
826/*
827 * Clear incompat bits for the following feature(s):
828 *
829 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
830 * in the whole filesystem
David Sterba9c907442019-10-31 15:52:01 +0100831 *
832 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
Josef Bacike3e05202019-06-20 15:37:55 -0400833 */
834static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
835{
David Sterba9c907442019-10-31 15:52:01 +0100836 bool found_raid56 = false;
837 bool found_raid1c34 = false;
838
839 if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) ||
840 (flags & BTRFS_BLOCK_GROUP_RAID1C3) ||
841 (flags & BTRFS_BLOCK_GROUP_RAID1C4)) {
Josef Bacike3e05202019-06-20 15:37:55 -0400842 struct list_head *head = &fs_info->space_info;
843 struct btrfs_space_info *sinfo;
844
845 list_for_each_entry_rcu(sinfo, head, list) {
Josef Bacike3e05202019-06-20 15:37:55 -0400846 down_read(&sinfo->groups_sem);
847 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
David Sterba9c907442019-10-31 15:52:01 +0100848 found_raid56 = true;
Josef Bacike3e05202019-06-20 15:37:55 -0400849 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
David Sterba9c907442019-10-31 15:52:01 +0100850 found_raid56 = true;
851 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3]))
852 found_raid1c34 = true;
853 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4]))
854 found_raid1c34 = true;
Josef Bacike3e05202019-06-20 15:37:55 -0400855 up_read(&sinfo->groups_sem);
Josef Bacike3e05202019-06-20 15:37:55 -0400856 }
David Sterba9c907442019-10-31 15:52:01 +0100857 if (found_raid56)
858 btrfs_clear_fs_incompat(fs_info, RAID56);
859 if (found_raid1c34)
860 btrfs_clear_fs_incompat(fs_info, RAID1C34);
Josef Bacike3e05202019-06-20 15:37:55 -0400861 }
862}
863
864int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
865 u64 group_start, struct extent_map *em)
866{
867 struct btrfs_fs_info *fs_info = trans->fs_info;
868 struct btrfs_root *root = fs_info->extent_root;
869 struct btrfs_path *path;
870 struct btrfs_block_group_cache *block_group;
871 struct btrfs_free_cluster *cluster;
872 struct btrfs_root *tree_root = fs_info->tree_root;
873 struct btrfs_key key;
874 struct inode *inode;
875 struct kobject *kobj = NULL;
876 int ret;
877 int index;
878 int factor;
879 struct btrfs_caching_control *caching_ctl = NULL;
880 bool remove_em;
881 bool remove_rsv = false;
882
883 block_group = btrfs_lookup_block_group(fs_info, group_start);
884 BUG_ON(!block_group);
885 BUG_ON(!block_group->ro);
886
887 trace_btrfs_remove_block_group(block_group);
888 /*
889 * Free the reserved super bytes from this block group before
890 * remove it.
891 */
892 btrfs_free_excluded_extents(block_group);
David Sterbab3470b52019-10-23 18:48:22 +0200893 btrfs_free_ref_tree_range(fs_info, block_group->start,
894 block_group->length);
Josef Bacike3e05202019-06-20 15:37:55 -0400895
Josef Bacike3e05202019-06-20 15:37:55 -0400896 index = btrfs_bg_flags_to_raid_index(block_group->flags);
897 factor = btrfs_bg_type_to_factor(block_group->flags);
898
899 /* make sure this block group isn't part of an allocation cluster */
900 cluster = &fs_info->data_alloc_cluster;
901 spin_lock(&cluster->refill_lock);
902 btrfs_return_cluster_to_free_space(block_group, cluster);
903 spin_unlock(&cluster->refill_lock);
904
905 /*
906 * make sure this block group isn't part of a metadata
907 * allocation cluster
908 */
909 cluster = &fs_info->meta_alloc_cluster;
910 spin_lock(&cluster->refill_lock);
911 btrfs_return_cluster_to_free_space(block_group, cluster);
912 spin_unlock(&cluster->refill_lock);
913
914 path = btrfs_alloc_path();
915 if (!path) {
916 ret = -ENOMEM;
917 goto out;
918 }
919
920 /*
921 * get the inode first so any iput calls done for the io_list
922 * aren't the final iput (no unlinks allowed now)
923 */
924 inode = lookup_free_space_inode(block_group, path);
925
926 mutex_lock(&trans->transaction->cache_write_mutex);
927 /*
928 * Make sure our free space cache IO is done before removing the
929 * free space inode
930 */
931 spin_lock(&trans->transaction->dirty_bgs_lock);
932 if (!list_empty(&block_group->io_list)) {
933 list_del_init(&block_group->io_list);
934
935 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
936
937 spin_unlock(&trans->transaction->dirty_bgs_lock);
938 btrfs_wait_cache_io(trans, block_group, path);
939 btrfs_put_block_group(block_group);
940 spin_lock(&trans->transaction->dirty_bgs_lock);
941 }
942
943 if (!list_empty(&block_group->dirty_list)) {
944 list_del_init(&block_group->dirty_list);
945 remove_rsv = true;
946 btrfs_put_block_group(block_group);
947 }
948 spin_unlock(&trans->transaction->dirty_bgs_lock);
949 mutex_unlock(&trans->transaction->cache_write_mutex);
950
951 if (!IS_ERR(inode)) {
952 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
953 if (ret) {
954 btrfs_add_delayed_iput(inode);
955 goto out;
956 }
957 clear_nlink(inode);
958 /* One for the block groups ref */
959 spin_lock(&block_group->lock);
960 if (block_group->iref) {
961 block_group->iref = 0;
962 block_group->inode = NULL;
963 spin_unlock(&block_group->lock);
964 iput(inode);
965 } else {
966 spin_unlock(&block_group->lock);
967 }
968 /* One for our lookup ref */
969 btrfs_add_delayed_iput(inode);
970 }
971
972 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
Josef Bacike3e05202019-06-20 15:37:55 -0400973 key.type = 0;
David Sterbab3470b52019-10-23 18:48:22 +0200974 key.offset = block_group->start;
Josef Bacike3e05202019-06-20 15:37:55 -0400975
976 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
977 if (ret < 0)
978 goto out;
979 if (ret > 0)
980 btrfs_release_path(path);
981 if (ret == 0) {
982 ret = btrfs_del_item(trans, tree_root, path);
983 if (ret)
984 goto out;
985 btrfs_release_path(path);
986 }
987
988 spin_lock(&fs_info->block_group_cache_lock);
989 rb_erase(&block_group->cache_node,
990 &fs_info->block_group_cache_tree);
991 RB_CLEAR_NODE(&block_group->cache_node);
992
David Sterbab3470b52019-10-23 18:48:22 +0200993 if (fs_info->first_logical_byte == block_group->start)
Josef Bacike3e05202019-06-20 15:37:55 -0400994 fs_info->first_logical_byte = (u64)-1;
995 spin_unlock(&fs_info->block_group_cache_lock);
996
997 down_write(&block_group->space_info->groups_sem);
998 /*
999 * we must use list_del_init so people can check to see if they
1000 * are still on the list after taking the semaphore
1001 */
1002 list_del_init(&block_group->list);
1003 if (list_empty(&block_group->space_info->block_groups[index])) {
1004 kobj = block_group->space_info->block_group_kobjs[index];
1005 block_group->space_info->block_group_kobjs[index] = NULL;
1006 clear_avail_alloc_bits(fs_info, block_group->flags);
1007 }
1008 up_write(&block_group->space_info->groups_sem);
1009 clear_incompat_bg_bits(fs_info, block_group->flags);
1010 if (kobj) {
1011 kobject_del(kobj);
1012 kobject_put(kobj);
1013 }
1014
1015 if (block_group->has_caching_ctl)
1016 caching_ctl = btrfs_get_caching_control(block_group);
1017 if (block_group->cached == BTRFS_CACHE_STARTED)
1018 btrfs_wait_block_group_cache_done(block_group);
1019 if (block_group->has_caching_ctl) {
1020 down_write(&fs_info->commit_root_sem);
1021 if (!caching_ctl) {
1022 struct btrfs_caching_control *ctl;
1023
1024 list_for_each_entry(ctl,
1025 &fs_info->caching_block_groups, list)
1026 if (ctl->block_group == block_group) {
1027 caching_ctl = ctl;
1028 refcount_inc(&caching_ctl->count);
1029 break;
1030 }
1031 }
1032 if (caching_ctl)
1033 list_del_init(&caching_ctl->list);
1034 up_write(&fs_info->commit_root_sem);
1035 if (caching_ctl) {
1036 /* Once for the caching bgs list and once for us. */
1037 btrfs_put_caching_control(caching_ctl);
1038 btrfs_put_caching_control(caching_ctl);
1039 }
1040 }
1041
1042 spin_lock(&trans->transaction->dirty_bgs_lock);
1043 WARN_ON(!list_empty(&block_group->dirty_list));
1044 WARN_ON(!list_empty(&block_group->io_list));
1045 spin_unlock(&trans->transaction->dirty_bgs_lock);
1046
1047 btrfs_remove_free_space_cache(block_group);
1048
1049 spin_lock(&block_group->space_info->lock);
1050 list_del_init(&block_group->ro_list);
1051
1052 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1053 WARN_ON(block_group->space_info->total_bytes
David Sterbab3470b52019-10-23 18:48:22 +02001054 < block_group->length);
Josef Bacike3e05202019-06-20 15:37:55 -04001055 WARN_ON(block_group->space_info->bytes_readonly
David Sterbab3470b52019-10-23 18:48:22 +02001056 < block_group->length);
Josef Bacike3e05202019-06-20 15:37:55 -04001057 WARN_ON(block_group->space_info->disk_total
David Sterbab3470b52019-10-23 18:48:22 +02001058 < block_group->length * factor);
Josef Bacike3e05202019-06-20 15:37:55 -04001059 }
David Sterbab3470b52019-10-23 18:48:22 +02001060 block_group->space_info->total_bytes -= block_group->length;
1061 block_group->space_info->bytes_readonly -= block_group->length;
1062 block_group->space_info->disk_total -= block_group->length * factor;
Josef Bacike3e05202019-06-20 15:37:55 -04001063
1064 spin_unlock(&block_group->space_info->lock);
1065
David Sterbab3470b52019-10-23 18:48:22 +02001066 key.objectid = block_group->start;
1067 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1068 key.offset = block_group->length;
Josef Bacike3e05202019-06-20 15:37:55 -04001069
1070 mutex_lock(&fs_info->chunk_mutex);
1071 spin_lock(&block_group->lock);
1072 block_group->removed = 1;
1073 /*
1074 * At this point trimming can't start on this block group, because we
1075 * removed the block group from the tree fs_info->block_group_cache_tree
1076 * so no one can't find it anymore and even if someone already got this
1077 * block group before we removed it from the rbtree, they have already
1078 * incremented block_group->trimming - if they didn't, they won't find
1079 * any free space entries because we already removed them all when we
1080 * called btrfs_remove_free_space_cache().
1081 *
1082 * And we must not remove the extent map from the fs_info->mapping_tree
1083 * to prevent the same logical address range and physical device space
1084 * ranges from being reused for a new block group. This is because our
1085 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
1086 * completely transactionless, so while it is trimming a range the
1087 * currently running transaction might finish and a new one start,
1088 * allowing for new block groups to be created that can reuse the same
1089 * physical device locations unless we take this special care.
1090 *
1091 * There may also be an implicit trim operation if the file system
1092 * is mounted with -odiscard. The same protections must remain
1093 * in place until the extents have been discarded completely when
1094 * the transaction commit has completed.
1095 */
1096 remove_em = (atomic_read(&block_group->trimming) == 0);
1097 spin_unlock(&block_group->lock);
1098
1099 mutex_unlock(&fs_info->chunk_mutex);
1100
1101 ret = remove_block_group_free_space(trans, block_group);
1102 if (ret)
1103 goto out;
1104
1105 btrfs_put_block_group(block_group);
1106 btrfs_put_block_group(block_group);
1107
1108 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1109 if (ret > 0)
1110 ret = -EIO;
1111 if (ret < 0)
1112 goto out;
1113
1114 ret = btrfs_del_item(trans, root, path);
1115 if (ret)
1116 goto out;
1117
1118 if (remove_em) {
1119 struct extent_map_tree *em_tree;
1120
1121 em_tree = &fs_info->mapping_tree;
1122 write_lock(&em_tree->lock);
1123 remove_extent_mapping(em_tree, em);
1124 write_unlock(&em_tree->lock);
1125 /* once for the tree */
1126 free_extent_map(em);
1127 }
1128out:
1129 if (remove_rsv)
1130 btrfs_delayed_refs_rsv_release(fs_info, 1);
1131 btrfs_free_path(path);
1132 return ret;
1133}
1134
1135struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
1136 struct btrfs_fs_info *fs_info, const u64 chunk_offset)
1137{
1138 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
1139 struct extent_map *em;
1140 struct map_lookup *map;
1141 unsigned int num_items;
1142
1143 read_lock(&em_tree->lock);
1144 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1145 read_unlock(&em_tree->lock);
1146 ASSERT(em && em->start == chunk_offset);
1147
1148 /*
1149 * We need to reserve 3 + N units from the metadata space info in order
1150 * to remove a block group (done at btrfs_remove_chunk() and at
1151 * btrfs_remove_block_group()), which are used for:
1152 *
1153 * 1 unit for adding the free space inode's orphan (located in the tree
1154 * of tree roots).
1155 * 1 unit for deleting the block group item (located in the extent
1156 * tree).
1157 * 1 unit for deleting the free space item (located in tree of tree
1158 * roots).
1159 * N units for deleting N device extent items corresponding to each
1160 * stripe (located in the device tree).
1161 *
1162 * In order to remove a block group we also need to reserve units in the
1163 * system space info in order to update the chunk tree (update one or
1164 * more device items and remove one chunk item), but this is done at
1165 * btrfs_remove_chunk() through a call to check_system_chunk().
1166 */
1167 map = em->map_lookup;
1168 num_items = 3 + map->num_stripes;
1169 free_extent_map(em);
1170
1171 return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
1172 num_items, 1);
1173}
1174
1175/*
Josef Bacik26ce2092019-06-20 15:37:59 -04001176 * Mark block group @cache read-only, so later write won't happen to block
1177 * group @cache.
1178 *
1179 * If @force is not set, this function will only mark the block group readonly
1180 * if we have enough free space (1M) in other metadata/system block groups.
1181 * If @force is not set, this function will mark the block group readonly
1182 * without checking free space.
1183 *
1184 * NOTE: This function doesn't care if other block groups can contain all the
1185 * data in this block group. That check should be done by relocation routine,
1186 * not this function.
1187 */
Josef Bacike11c0402019-06-20 15:38:07 -04001188static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
Josef Bacik26ce2092019-06-20 15:37:59 -04001189{
1190 struct btrfs_space_info *sinfo = cache->space_info;
1191 u64 num_bytes;
1192 u64 sinfo_used;
1193 u64 min_allocable_bytes;
1194 int ret = -ENOSPC;
1195
1196 /*
1197 * We need some metadata space and system metadata space for
1198 * allocating chunks in some corner cases until we force to set
1199 * it to be readonly.
1200 */
1201 if ((sinfo->flags &
1202 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
1203 !force)
1204 min_allocable_bytes = SZ_1M;
1205 else
1206 min_allocable_bytes = 0;
1207
1208 spin_lock(&sinfo->lock);
1209 spin_lock(&cache->lock);
1210
1211 if (cache->ro) {
1212 cache->ro++;
1213 ret = 0;
1214 goto out;
1215 }
1216
David Sterbab3470b52019-10-23 18:48:22 +02001217 num_bytes = cache->length - cache->reserved - cache->pinned -
David Sterbabf38be62019-10-23 18:48:11 +02001218 cache->bytes_super - cache->used;
Josef Bacik26ce2092019-06-20 15:37:59 -04001219 sinfo_used = btrfs_space_info_used(sinfo, true);
1220
1221 /*
1222 * sinfo_used + num_bytes should always <= sinfo->total_bytes.
1223 *
1224 * Here we make sure if we mark this bg RO, we still have enough
1225 * free space as buffer (if min_allocable_bytes is not 0).
1226 */
1227 if (sinfo_used + num_bytes + min_allocable_bytes <=
1228 sinfo->total_bytes) {
1229 sinfo->bytes_readonly += num_bytes;
1230 cache->ro++;
1231 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
1232 ret = 0;
1233 }
1234out:
1235 spin_unlock(&cache->lock);
1236 spin_unlock(&sinfo->lock);
1237 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
1238 btrfs_info(cache->fs_info,
David Sterbab3470b52019-10-23 18:48:22 +02001239 "unable to make block group %llu ro", cache->start);
Josef Bacik26ce2092019-06-20 15:37:59 -04001240 btrfs_info(cache->fs_info,
1241 "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu",
1242 sinfo_used, num_bytes, min_allocable_bytes);
1243 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
1244 }
1245 return ret;
1246}
1247
1248/*
Josef Bacike3e05202019-06-20 15:37:55 -04001249 * Process the unused_bgs list and remove any that don't have any allocated
1250 * space inside of them.
1251 */
1252void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
1253{
1254 struct btrfs_block_group_cache *block_group;
1255 struct btrfs_space_info *space_info;
1256 struct btrfs_trans_handle *trans;
1257 int ret = 0;
1258
1259 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1260 return;
1261
1262 spin_lock(&fs_info->unused_bgs_lock);
1263 while (!list_empty(&fs_info->unused_bgs)) {
1264 u64 start, end;
1265 int trimming;
1266
1267 block_group = list_first_entry(&fs_info->unused_bgs,
1268 struct btrfs_block_group_cache,
1269 bg_list);
1270 list_del_init(&block_group->bg_list);
1271
1272 space_info = block_group->space_info;
1273
1274 if (ret || btrfs_mixed_space_info(space_info)) {
1275 btrfs_put_block_group(block_group);
1276 continue;
1277 }
1278 spin_unlock(&fs_info->unused_bgs_lock);
1279
1280 mutex_lock(&fs_info->delete_unused_bgs_mutex);
1281
1282 /* Don't want to race with allocators so take the groups_sem */
1283 down_write(&space_info->groups_sem);
1284 spin_lock(&block_group->lock);
1285 if (block_group->reserved || block_group->pinned ||
David Sterbabf38be62019-10-23 18:48:11 +02001286 block_group->used || block_group->ro ||
Josef Bacike3e05202019-06-20 15:37:55 -04001287 list_is_singular(&block_group->list)) {
1288 /*
1289 * We want to bail if we made new allocations or have
1290 * outstanding allocations in this block group. We do
1291 * the ro check in case balance is currently acting on
1292 * this block group.
1293 */
1294 trace_btrfs_skip_unused_block_group(block_group);
1295 spin_unlock(&block_group->lock);
1296 up_write(&space_info->groups_sem);
1297 goto next;
1298 }
1299 spin_unlock(&block_group->lock);
1300
1301 /* We don't want to force the issue, only flip if it's ok. */
Josef Bacike11c0402019-06-20 15:38:07 -04001302 ret = inc_block_group_ro(block_group, 0);
Josef Bacike3e05202019-06-20 15:37:55 -04001303 up_write(&space_info->groups_sem);
1304 if (ret < 0) {
1305 ret = 0;
1306 goto next;
1307 }
1308
1309 /*
1310 * Want to do this before we do anything else so we can recover
1311 * properly if we fail to join the transaction.
1312 */
1313 trans = btrfs_start_trans_remove_block_group(fs_info,
David Sterbab3470b52019-10-23 18:48:22 +02001314 block_group->start);
Josef Bacike3e05202019-06-20 15:37:55 -04001315 if (IS_ERR(trans)) {
1316 btrfs_dec_block_group_ro(block_group);
1317 ret = PTR_ERR(trans);
1318 goto next;
1319 }
1320
1321 /*
1322 * We could have pending pinned extents for this block group,
1323 * just delete them, we don't care about them anymore.
1324 */
David Sterbab3470b52019-10-23 18:48:22 +02001325 start = block_group->start;
1326 end = start + block_group->length - 1;
Josef Bacike3e05202019-06-20 15:37:55 -04001327 /*
1328 * Hold the unused_bg_unpin_mutex lock to avoid racing with
1329 * btrfs_finish_extent_commit(). If we are at transaction N,
1330 * another task might be running finish_extent_commit() for the
1331 * previous transaction N - 1, and have seen a range belonging
1332 * to the block group in freed_extents[] before we were able to
1333 * clear the whole block group range from freed_extents[]. This
1334 * means that task can lookup for the block group after we
1335 * unpinned it from freed_extents[] and removed it, leading to
1336 * a BUG_ON() at btrfs_unpin_extent_range().
1337 */
1338 mutex_lock(&fs_info->unused_bg_unpin_mutex);
1339 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
1340 EXTENT_DIRTY);
1341 if (ret) {
1342 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1343 btrfs_dec_block_group_ro(block_group);
1344 goto end_trans;
1345 }
1346 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
1347 EXTENT_DIRTY);
1348 if (ret) {
1349 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1350 btrfs_dec_block_group_ro(block_group);
1351 goto end_trans;
1352 }
1353 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1354
1355 /* Reset pinned so btrfs_put_block_group doesn't complain */
1356 spin_lock(&space_info->lock);
1357 spin_lock(&block_group->lock);
1358
1359 btrfs_space_info_update_bytes_pinned(fs_info, space_info,
1360 -block_group->pinned);
1361 space_info->bytes_readonly += block_group->pinned;
1362 percpu_counter_add_batch(&space_info->total_bytes_pinned,
1363 -block_group->pinned,
1364 BTRFS_TOTAL_BYTES_PINNED_BATCH);
1365 block_group->pinned = 0;
1366
1367 spin_unlock(&block_group->lock);
1368 spin_unlock(&space_info->lock);
1369
1370 /* DISCARD can flip during remount */
1371 trimming = btrfs_test_opt(fs_info, DISCARD);
1372
1373 /* Implicit trim during transaction commit. */
1374 if (trimming)
1375 btrfs_get_block_group_trimming(block_group);
1376
1377 /*
1378 * Btrfs_remove_chunk will abort the transaction if things go
1379 * horribly wrong.
1380 */
David Sterbab3470b52019-10-23 18:48:22 +02001381 ret = btrfs_remove_chunk(trans, block_group->start);
Josef Bacike3e05202019-06-20 15:37:55 -04001382
1383 if (ret) {
1384 if (trimming)
1385 btrfs_put_block_group_trimming(block_group);
1386 goto end_trans;
1387 }
1388
1389 /*
1390 * If we're not mounted with -odiscard, we can just forget
1391 * about this block group. Otherwise we'll need to wait
1392 * until transaction commit to do the actual discard.
1393 */
1394 if (trimming) {
1395 spin_lock(&fs_info->unused_bgs_lock);
1396 /*
1397 * A concurrent scrub might have added us to the list
1398 * fs_info->unused_bgs, so use a list_move operation
1399 * to add the block group to the deleted_bgs list.
1400 */
1401 list_move(&block_group->bg_list,
1402 &trans->transaction->deleted_bgs);
1403 spin_unlock(&fs_info->unused_bgs_lock);
1404 btrfs_get_block_group(block_group);
1405 }
1406end_trans:
1407 btrfs_end_transaction(trans);
1408next:
1409 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
1410 btrfs_put_block_group(block_group);
1411 spin_lock(&fs_info->unused_bgs_lock);
1412 }
1413 spin_unlock(&fs_info->unused_bgs_lock);
1414}
1415
1416void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
1417{
1418 struct btrfs_fs_info *fs_info = bg->fs_info;
1419
1420 spin_lock(&fs_info->unused_bgs_lock);
1421 if (list_empty(&bg->bg_list)) {
1422 btrfs_get_block_group(bg);
1423 trace_btrfs_add_unused_block_group(bg);
1424 list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
1425 }
1426 spin_unlock(&fs_info->unused_bgs_lock);
1427}
Josef Bacik4358d9632019-06-20 15:37:57 -04001428
1429static int find_first_block_group(struct btrfs_fs_info *fs_info,
1430 struct btrfs_path *path,
1431 struct btrfs_key *key)
1432{
1433 struct btrfs_root *root = fs_info->extent_root;
1434 int ret = 0;
1435 struct btrfs_key found_key;
1436 struct extent_buffer *leaf;
1437 struct btrfs_block_group_item bg;
1438 u64 flags;
1439 int slot;
1440
1441 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1442 if (ret < 0)
1443 goto out;
1444
1445 while (1) {
1446 slot = path->slots[0];
1447 leaf = path->nodes[0];
1448 if (slot >= btrfs_header_nritems(leaf)) {
1449 ret = btrfs_next_leaf(root, path);
1450 if (ret == 0)
1451 continue;
1452 if (ret < 0)
1453 goto out;
1454 break;
1455 }
1456 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1457
1458 if (found_key.objectid >= key->objectid &&
1459 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1460 struct extent_map_tree *em_tree;
1461 struct extent_map *em;
1462
1463 em_tree = &root->fs_info->mapping_tree;
1464 read_lock(&em_tree->lock);
1465 em = lookup_extent_mapping(em_tree, found_key.objectid,
1466 found_key.offset);
1467 read_unlock(&em_tree->lock);
1468 if (!em) {
1469 btrfs_err(fs_info,
1470 "logical %llu len %llu found bg but no related chunk",
1471 found_key.objectid, found_key.offset);
1472 ret = -ENOENT;
1473 } else if (em->start != found_key.objectid ||
1474 em->len != found_key.offset) {
1475 btrfs_err(fs_info,
1476 "block group %llu len %llu mismatch with chunk %llu len %llu",
1477 found_key.objectid, found_key.offset,
1478 em->start, em->len);
1479 ret = -EUCLEAN;
1480 } else {
1481 read_extent_buffer(leaf, &bg,
1482 btrfs_item_ptr_offset(leaf, slot),
1483 sizeof(bg));
David Sterbade0dc452019-10-23 18:48:18 +02001484 flags = btrfs_stack_block_group_flags(&bg) &
Josef Bacik4358d9632019-06-20 15:37:57 -04001485 BTRFS_BLOCK_GROUP_TYPE_MASK;
1486
1487 if (flags != (em->map_lookup->type &
1488 BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1489 btrfs_err(fs_info,
1490"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
1491 found_key.objectid,
1492 found_key.offset, flags,
1493 (BTRFS_BLOCK_GROUP_TYPE_MASK &
1494 em->map_lookup->type));
1495 ret = -EUCLEAN;
1496 } else {
1497 ret = 0;
1498 }
1499 }
1500 free_extent_map(em);
1501 goto out;
1502 }
1503 path->slots[0]++;
1504 }
1505out:
1506 return ret;
1507}
1508
1509static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1510{
1511 u64 extra_flags = chunk_to_extended(flags) &
1512 BTRFS_EXTENDED_PROFILE_MASK;
1513
1514 write_seqlock(&fs_info->profiles_lock);
1515 if (flags & BTRFS_BLOCK_GROUP_DATA)
1516 fs_info->avail_data_alloc_bits |= extra_flags;
1517 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1518 fs_info->avail_metadata_alloc_bits |= extra_flags;
1519 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1520 fs_info->avail_system_alloc_bits |= extra_flags;
1521 write_sequnlock(&fs_info->profiles_lock);
1522}
1523
1524static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
1525{
1526 struct btrfs_fs_info *fs_info = cache->fs_info;
1527 u64 bytenr;
1528 u64 *logical;
1529 int stripe_len;
1530 int i, nr, ret;
1531
David Sterbab3470b52019-10-23 18:48:22 +02001532 if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
1533 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
Josef Bacik4358d9632019-06-20 15:37:57 -04001534 cache->bytes_super += stripe_len;
David Sterbab3470b52019-10-23 18:48:22 +02001535 ret = btrfs_add_excluded_extent(fs_info, cache->start,
Josef Bacik4358d9632019-06-20 15:37:57 -04001536 stripe_len);
1537 if (ret)
1538 return ret;
1539 }
1540
1541 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1542 bytenr = btrfs_sb_offset(i);
David Sterbab3470b52019-10-23 18:48:22 +02001543 ret = btrfs_rmap_block(fs_info, cache->start,
Josef Bacik4358d9632019-06-20 15:37:57 -04001544 bytenr, &logical, &nr, &stripe_len);
1545 if (ret)
1546 return ret;
1547
1548 while (nr--) {
1549 u64 start, len;
1550
David Sterbab3470b52019-10-23 18:48:22 +02001551 if (logical[nr] > cache->start + cache->length)
Josef Bacik4358d9632019-06-20 15:37:57 -04001552 continue;
1553
David Sterbab3470b52019-10-23 18:48:22 +02001554 if (logical[nr] + stripe_len <= cache->start)
Josef Bacik4358d9632019-06-20 15:37:57 -04001555 continue;
1556
1557 start = logical[nr];
David Sterbab3470b52019-10-23 18:48:22 +02001558 if (start < cache->start) {
1559 start = cache->start;
Josef Bacik4358d9632019-06-20 15:37:57 -04001560 len = (logical[nr] + stripe_len) - start;
1561 } else {
1562 len = min_t(u64, stripe_len,
David Sterbab3470b52019-10-23 18:48:22 +02001563 cache->start + cache->length - start);
Josef Bacik4358d9632019-06-20 15:37:57 -04001564 }
1565
1566 cache->bytes_super += len;
1567 ret = btrfs_add_excluded_extent(fs_info, start, len);
1568 if (ret) {
1569 kfree(logical);
1570 return ret;
1571 }
1572 }
1573
1574 kfree(logical);
1575 }
1576 return 0;
1577}
1578
1579static void link_block_group(struct btrfs_block_group_cache *cache)
1580{
1581 struct btrfs_space_info *space_info = cache->space_info;
1582 int index = btrfs_bg_flags_to_raid_index(cache->flags);
1583 bool first = false;
1584
1585 down_write(&space_info->groups_sem);
1586 if (list_empty(&space_info->block_groups[index]))
1587 first = true;
1588 list_add_tail(&cache->list, &space_info->block_groups[index]);
1589 up_write(&space_info->groups_sem);
1590
1591 if (first)
1592 btrfs_sysfs_add_block_group_type(cache);
1593}
1594
1595static struct btrfs_block_group_cache *btrfs_create_block_group_cache(
1596 struct btrfs_fs_info *fs_info, u64 start, u64 size)
1597{
1598 struct btrfs_block_group_cache *cache;
1599
1600 cache = kzalloc(sizeof(*cache), GFP_NOFS);
1601 if (!cache)
1602 return NULL;
1603
1604 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
1605 GFP_NOFS);
1606 if (!cache->free_space_ctl) {
1607 kfree(cache);
1608 return NULL;
1609 }
1610
David Sterbab3470b52019-10-23 18:48:22 +02001611 cache->start = start;
1612 cache->length = size;
Josef Bacik4358d9632019-06-20 15:37:57 -04001613
1614 cache->fs_info = fs_info;
1615 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
1616 set_free_space_tree_thresholds(cache);
1617
1618 atomic_set(&cache->count, 1);
1619 spin_lock_init(&cache->lock);
1620 init_rwsem(&cache->data_rwsem);
1621 INIT_LIST_HEAD(&cache->list);
1622 INIT_LIST_HEAD(&cache->cluster_list);
1623 INIT_LIST_HEAD(&cache->bg_list);
1624 INIT_LIST_HEAD(&cache->ro_list);
1625 INIT_LIST_HEAD(&cache->dirty_list);
1626 INIT_LIST_HEAD(&cache->io_list);
1627 btrfs_init_free_space_ctl(cache);
1628 atomic_set(&cache->trimming, 0);
1629 mutex_init(&cache->free_space_lock);
1630 btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
1631
1632 return cache;
1633}
1634
1635/*
1636 * Iterate all chunks and verify that each of them has the corresponding block
1637 * group
1638 */
1639static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
1640{
1641 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
1642 struct extent_map *em;
1643 struct btrfs_block_group_cache *bg;
1644 u64 start = 0;
1645 int ret = 0;
1646
1647 while (1) {
1648 read_lock(&map_tree->lock);
1649 /*
1650 * lookup_extent_mapping will return the first extent map
1651 * intersecting the range, so setting @len to 1 is enough to
1652 * get the first chunk.
1653 */
1654 em = lookup_extent_mapping(map_tree, start, 1);
1655 read_unlock(&map_tree->lock);
1656 if (!em)
1657 break;
1658
1659 bg = btrfs_lookup_block_group(fs_info, em->start);
1660 if (!bg) {
1661 btrfs_err(fs_info,
1662 "chunk start=%llu len=%llu doesn't have corresponding block group",
1663 em->start, em->len);
1664 ret = -EUCLEAN;
1665 free_extent_map(em);
1666 break;
1667 }
David Sterbab3470b52019-10-23 18:48:22 +02001668 if (bg->start != em->start || bg->length != em->len ||
Josef Bacik4358d9632019-06-20 15:37:57 -04001669 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
1670 (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1671 btrfs_err(fs_info,
1672"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
1673 em->start, em->len,
1674 em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
David Sterbab3470b52019-10-23 18:48:22 +02001675 bg->start, bg->length,
Josef Bacik4358d9632019-06-20 15:37:57 -04001676 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
1677 ret = -EUCLEAN;
1678 free_extent_map(em);
1679 btrfs_put_block_group(bg);
1680 break;
1681 }
1682 start = em->start + em->len;
1683 free_extent_map(em);
1684 btrfs_put_block_group(bg);
1685 }
1686 return ret;
1687}
1688
Qu Wenruoffb9e0f2019-10-10 10:39:27 +08001689static int read_one_block_group(struct btrfs_fs_info *info,
1690 struct btrfs_path *path,
Qu Wenruod49a2dd2019-11-05 09:35:35 +08001691 const struct btrfs_key *key,
Qu Wenruoffb9e0f2019-10-10 10:39:27 +08001692 int need_clear)
1693{
1694 struct extent_buffer *leaf = path->nodes[0];
1695 struct btrfs_block_group_cache *cache;
1696 struct btrfs_space_info *space_info;
Qu Wenruoffb9e0f2019-10-10 10:39:27 +08001697 struct btrfs_block_group_item bgi;
1698 const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
1699 int slot = path->slots[0];
1700 int ret;
1701
Qu Wenruod49a2dd2019-11-05 09:35:35 +08001702 ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
Qu Wenruoffb9e0f2019-10-10 10:39:27 +08001703
Qu Wenruod49a2dd2019-11-05 09:35:35 +08001704 cache = btrfs_create_block_group_cache(info, key->objectid, key->offset);
Qu Wenruoffb9e0f2019-10-10 10:39:27 +08001705 if (!cache)
1706 return -ENOMEM;
1707
1708 if (need_clear) {
1709 /*
1710 * When we mount with old space cache, we need to
1711 * set BTRFS_DC_CLEAR and set dirty flag.
1712 *
1713 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
1714 * truncate the old free space cache inode and
1715 * setup a new one.
1716 * b) Setting 'dirty flag' makes sure that we flush
1717 * the new space cache info onto disk.
1718 */
1719 if (btrfs_test_opt(info, SPACE_CACHE))
1720 cache->disk_cache_state = BTRFS_DC_CLEAR;
1721 }
1722 read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
1723 sizeof(bgi));
1724 cache->used = btrfs_stack_block_group_used(&bgi);
1725 cache->flags = btrfs_stack_block_group_flags(&bgi);
1726 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
1727 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
1728 btrfs_err(info,
1729"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
1730 cache->start);
1731 ret = -EINVAL;
1732 goto error;
1733 }
1734
1735 /*
1736 * We need to exclude the super stripes now so that the space info has
1737 * super bytes accounted for, otherwise we'll think we have more space
1738 * than we actually do.
1739 */
1740 ret = exclude_super_stripes(cache);
1741 if (ret) {
1742 /* We may have excluded something, so call this just in case. */
1743 btrfs_free_excluded_extents(cache);
1744 goto error;
1745 }
1746
1747 /*
1748 * Check for two cases, either we are full, and therefore don't need
1749 * to bother with the caching work since we won't find any space, or we
1750 * are empty, and we can just add all the space in and be done with it.
1751 * This saves us _a_lot_ of time, particularly in the full case.
1752 */
Qu Wenruod49a2dd2019-11-05 09:35:35 +08001753 if (key->offset == cache->used) {
Qu Wenruoffb9e0f2019-10-10 10:39:27 +08001754 cache->last_byte_to_unpin = (u64)-1;
1755 cache->cached = BTRFS_CACHE_FINISHED;
1756 btrfs_free_excluded_extents(cache);
1757 } else if (cache->used == 0) {
1758 cache->last_byte_to_unpin = (u64)-1;
1759 cache->cached = BTRFS_CACHE_FINISHED;
Qu Wenruod49a2dd2019-11-05 09:35:35 +08001760 add_new_free_space(cache, key->objectid,
1761 key->objectid + key->offset);
Qu Wenruoffb9e0f2019-10-10 10:39:27 +08001762 btrfs_free_excluded_extents(cache);
1763 }
1764
1765 ret = btrfs_add_block_group_cache(info, cache);
1766 if (ret) {
1767 btrfs_remove_free_space_cache(cache);
1768 goto error;
1769 }
1770 trace_btrfs_add_block_group(info, cache, 0);
Qu Wenruod49a2dd2019-11-05 09:35:35 +08001771 btrfs_update_space_info(info, cache->flags, key->offset,
Qu Wenruoffb9e0f2019-10-10 10:39:27 +08001772 cache->used, cache->bytes_super, &space_info);
1773
1774 cache->space_info = space_info;
1775
1776 link_block_group(cache);
1777
1778 set_avail_alloc_bits(info, cache->flags);
1779 if (btrfs_chunk_readonly(info, cache->start)) {
1780 inc_block_group_ro(cache, 1);
1781 } else if (cache->used == 0) {
1782 ASSERT(list_empty(&cache->bg_list));
1783 btrfs_mark_bg_unused(cache);
1784 }
1785 return 0;
1786error:
1787 btrfs_put_block_group(cache);
1788 return ret;
1789}
1790
Josef Bacik4358d9632019-06-20 15:37:57 -04001791int btrfs_read_block_groups(struct btrfs_fs_info *info)
1792{
1793 struct btrfs_path *path;
1794 int ret;
1795 struct btrfs_block_group_cache *cache;
1796 struct btrfs_space_info *space_info;
1797 struct btrfs_key key;
Josef Bacik4358d9632019-06-20 15:37:57 -04001798 int need_clear = 0;
1799 u64 cache_gen;
Josef Bacik4358d9632019-06-20 15:37:57 -04001800
1801 key.objectid = 0;
1802 key.offset = 0;
1803 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1804 path = btrfs_alloc_path();
1805 if (!path)
1806 return -ENOMEM;
1807 path->reada = READA_FORWARD;
1808
1809 cache_gen = btrfs_super_cache_generation(info->super_copy);
1810 if (btrfs_test_opt(info, SPACE_CACHE) &&
1811 btrfs_super_generation(info->super_copy) != cache_gen)
1812 need_clear = 1;
1813 if (btrfs_test_opt(info, CLEAR_CACHE))
1814 need_clear = 1;
1815
1816 while (1) {
1817 ret = find_first_block_group(info, path, &key);
1818 if (ret > 0)
1819 break;
1820 if (ret != 0)
1821 goto error;
1822
Qu Wenruoffb9e0f2019-10-10 10:39:27 +08001823 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
Qu Wenruod49a2dd2019-11-05 09:35:35 +08001824 ret = read_one_block_group(info, path, &key, need_clear);
Qu Wenruoffb9e0f2019-10-10 10:39:27 +08001825 if (ret < 0)
Josef Bacik4358d9632019-06-20 15:37:57 -04001826 goto error;
Qu Wenruoffb9e0f2019-10-10 10:39:27 +08001827 key.objectid += key.offset;
1828 key.offset = 0;
Josef Bacik4358d9632019-06-20 15:37:57 -04001829 btrfs_release_path(path);
Josef Bacik4358d9632019-06-20 15:37:57 -04001830 }
1831
1832 list_for_each_entry_rcu(space_info, &info->space_info, list) {
1833 if (!(btrfs_get_alloc_profile(info, space_info->flags) &
1834 (BTRFS_BLOCK_GROUP_RAID10 |
1835 BTRFS_BLOCK_GROUP_RAID1_MASK |
1836 BTRFS_BLOCK_GROUP_RAID56_MASK |
1837 BTRFS_BLOCK_GROUP_DUP)))
1838 continue;
1839 /*
1840 * Avoid allocating from un-mirrored block group if there are
1841 * mirrored block groups.
1842 */
1843 list_for_each_entry(cache,
1844 &space_info->block_groups[BTRFS_RAID_RAID0],
1845 list)
Josef Bacike11c0402019-06-20 15:38:07 -04001846 inc_block_group_ro(cache, 1);
Josef Bacik4358d9632019-06-20 15:37:57 -04001847 list_for_each_entry(cache,
1848 &space_info->block_groups[BTRFS_RAID_SINGLE],
1849 list)
Josef Bacike11c0402019-06-20 15:38:07 -04001850 inc_block_group_ro(cache, 1);
Josef Bacik4358d9632019-06-20 15:37:57 -04001851 }
1852
1853 btrfs_init_global_block_rsv(info);
1854 ret = check_chunk_block_group_mappings(info);
1855error:
1856 btrfs_free_path(path);
1857 return ret;
1858}
1859
1860void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
1861{
1862 struct btrfs_fs_info *fs_info = trans->fs_info;
1863 struct btrfs_block_group_cache *block_group;
1864 struct btrfs_root *extent_root = fs_info->extent_root;
1865 struct btrfs_block_group_item item;
1866 struct btrfs_key key;
1867 int ret = 0;
1868
1869 if (!trans->can_flush_pending_bgs)
1870 return;
1871
1872 while (!list_empty(&trans->new_bgs)) {
1873 block_group = list_first_entry(&trans->new_bgs,
1874 struct btrfs_block_group_cache,
1875 bg_list);
1876 if (ret)
1877 goto next;
1878
1879 spin_lock(&block_group->lock);
David Sterbade0dc452019-10-23 18:48:18 +02001880 btrfs_set_stack_block_group_used(&item, block_group->used);
1881 btrfs_set_stack_block_group_chunk_objectid(&item,
David Sterba3d976382019-10-23 18:48:15 +02001882 BTRFS_FIRST_CHUNK_TREE_OBJECTID);
David Sterbade0dc452019-10-23 18:48:18 +02001883 btrfs_set_stack_block_group_flags(&item, block_group->flags);
David Sterbab3470b52019-10-23 18:48:22 +02001884 key.objectid = block_group->start;
1885 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1886 key.offset = block_group->length;
Josef Bacik4358d9632019-06-20 15:37:57 -04001887 spin_unlock(&block_group->lock);
1888
1889 ret = btrfs_insert_item(trans, extent_root, &key, &item,
1890 sizeof(item));
1891 if (ret)
1892 btrfs_abort_transaction(trans, ret);
1893 ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset);
1894 if (ret)
1895 btrfs_abort_transaction(trans, ret);
1896 add_block_group_free_space(trans, block_group);
1897 /* Already aborted the transaction if it failed. */
1898next:
1899 btrfs_delayed_refs_rsv_release(fs_info, 1);
1900 list_del_init(&block_group->bg_list);
1901 }
1902 btrfs_trans_release_chunk_metadata(trans);
1903}
1904
1905int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
1906 u64 type, u64 chunk_offset, u64 size)
1907{
1908 struct btrfs_fs_info *fs_info = trans->fs_info;
1909 struct btrfs_block_group_cache *cache;
1910 int ret;
1911
1912 btrfs_set_log_full_commit(trans);
1913
1914 cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
1915 if (!cache)
1916 return -ENOMEM;
1917
David Sterbabf38be62019-10-23 18:48:11 +02001918 cache->used = bytes_used;
Josef Bacik4358d9632019-06-20 15:37:57 -04001919 cache->flags = type;
1920 cache->last_byte_to_unpin = (u64)-1;
1921 cache->cached = BTRFS_CACHE_FINISHED;
1922 cache->needs_free_space = 1;
1923 ret = exclude_super_stripes(cache);
1924 if (ret) {
1925 /* We may have excluded something, so call this just in case */
1926 btrfs_free_excluded_extents(cache);
1927 btrfs_put_block_group(cache);
1928 return ret;
1929 }
1930
1931 add_new_free_space(cache, chunk_offset, chunk_offset + size);
1932
1933 btrfs_free_excluded_extents(cache);
1934
1935#ifdef CONFIG_BTRFS_DEBUG
1936 if (btrfs_should_fragment_free_space(cache)) {
1937 u64 new_bytes_used = size - bytes_used;
1938
1939 bytes_used += new_bytes_used >> 1;
Josef Bacike11c0402019-06-20 15:38:07 -04001940 fragment_free_space(cache);
Josef Bacik4358d9632019-06-20 15:37:57 -04001941 }
1942#endif
1943 /*
1944 * Ensure the corresponding space_info object is created and
1945 * assigned to our block group. We want our bg to be added to the rbtree
1946 * with its ->space_info set.
1947 */
1948 cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
1949 ASSERT(cache->space_info);
1950
1951 ret = btrfs_add_block_group_cache(fs_info, cache);
1952 if (ret) {
1953 btrfs_remove_free_space_cache(cache);
1954 btrfs_put_block_group(cache);
1955 return ret;
1956 }
1957
1958 /*
1959 * Now that our block group has its ->space_info set and is inserted in
1960 * the rbtree, update the space info's counters.
1961 */
1962 trace_btrfs_add_block_group(fs_info, cache, 1);
1963 btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
1964 cache->bytes_super, &cache->space_info);
1965 btrfs_update_global_block_rsv(fs_info);
1966
1967 link_block_group(cache);
1968
1969 list_add_tail(&cache->bg_list, &trans->new_bgs);
1970 trans->delayed_ref_updates++;
1971 btrfs_update_delayed_refs_rsv(trans);
1972
1973 set_avail_alloc_bits(fs_info, type);
1974 return 0;
1975}
Josef Bacik26ce2092019-06-20 15:37:59 -04001976
1977static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
1978{
1979 u64 num_devices;
1980 u64 stripped;
1981
1982 /*
1983 * if restripe for this chunk_type is on pick target profile and
1984 * return, otherwise do the usual balance
1985 */
Josef Bacike11c0402019-06-20 15:38:07 -04001986 stripped = get_restripe_target(fs_info, flags);
Josef Bacik26ce2092019-06-20 15:37:59 -04001987 if (stripped)
1988 return extended_to_chunk(stripped);
1989
1990 num_devices = fs_info->fs_devices->rw_devices;
1991
1992 stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID56_MASK |
1993 BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10;
1994
1995 if (num_devices == 1) {
1996 stripped |= BTRFS_BLOCK_GROUP_DUP;
1997 stripped = flags & ~stripped;
1998
1999 /* turn raid0 into single device chunks */
2000 if (flags & BTRFS_BLOCK_GROUP_RAID0)
2001 return stripped;
2002
2003 /* turn mirroring into duplication */
2004 if (flags & (BTRFS_BLOCK_GROUP_RAID1_MASK |
2005 BTRFS_BLOCK_GROUP_RAID10))
2006 return stripped | BTRFS_BLOCK_GROUP_DUP;
2007 } else {
2008 /* they already had raid on here, just return */
2009 if (flags & stripped)
2010 return flags;
2011
2012 stripped |= BTRFS_BLOCK_GROUP_DUP;
2013 stripped = flags & ~stripped;
2014
2015 /* switch duplicated blocks with raid1 */
2016 if (flags & BTRFS_BLOCK_GROUP_DUP)
2017 return stripped | BTRFS_BLOCK_GROUP_RAID1;
2018
2019 /* this is drive concat, leave it alone */
2020 }
2021
2022 return flags;
2023}
2024
2025int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache)
2026
2027{
2028 struct btrfs_fs_info *fs_info = cache->fs_info;
2029 struct btrfs_trans_handle *trans;
2030 u64 alloc_flags;
2031 int ret;
2032
2033again:
2034 trans = btrfs_join_transaction(fs_info->extent_root);
2035 if (IS_ERR(trans))
2036 return PTR_ERR(trans);
2037
2038 /*
2039 * we're not allowed to set block groups readonly after the dirty
2040 * block groups cache has started writing. If it already started,
2041 * back off and let this transaction commit
2042 */
2043 mutex_lock(&fs_info->ro_block_group_mutex);
2044 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
2045 u64 transid = trans->transid;
2046
2047 mutex_unlock(&fs_info->ro_block_group_mutex);
2048 btrfs_end_transaction(trans);
2049
2050 ret = btrfs_wait_for_commit(fs_info, transid);
2051 if (ret)
2052 return ret;
2053 goto again;
2054 }
2055
2056 /*
2057 * if we are changing raid levels, try to allocate a corresponding
2058 * block group with the new raid level.
2059 */
2060 alloc_flags = update_block_group_flags(fs_info, cache->flags);
2061 if (alloc_flags != cache->flags) {
2062 ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2063 /*
2064 * ENOSPC is allowed here, we may have enough space
2065 * already allocated at the new raid level to
2066 * carry on
2067 */
2068 if (ret == -ENOSPC)
2069 ret = 0;
2070 if (ret < 0)
2071 goto out;
2072 }
2073
Josef Bacike11c0402019-06-20 15:38:07 -04002074 ret = inc_block_group_ro(cache, 0);
Josef Bacik26ce2092019-06-20 15:37:59 -04002075 if (!ret)
2076 goto out;
2077 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
2078 ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2079 if (ret < 0)
2080 goto out;
Josef Bacike11c0402019-06-20 15:38:07 -04002081 ret = inc_block_group_ro(cache, 0);
Josef Bacik26ce2092019-06-20 15:37:59 -04002082out:
2083 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
2084 alloc_flags = update_block_group_flags(fs_info, cache->flags);
2085 mutex_lock(&fs_info->chunk_mutex);
2086 check_system_chunk(trans, alloc_flags);
2087 mutex_unlock(&fs_info->chunk_mutex);
2088 }
2089 mutex_unlock(&fs_info->ro_block_group_mutex);
2090
2091 btrfs_end_transaction(trans);
2092 return ret;
2093}
2094
2095void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
2096{
2097 struct btrfs_space_info *sinfo = cache->space_info;
2098 u64 num_bytes;
2099
2100 BUG_ON(!cache->ro);
2101
2102 spin_lock(&sinfo->lock);
2103 spin_lock(&cache->lock);
2104 if (!--cache->ro) {
David Sterbab3470b52019-10-23 18:48:22 +02002105 num_bytes = cache->length - cache->reserved -
David Sterbabf38be62019-10-23 18:48:11 +02002106 cache->pinned - cache->bytes_super - cache->used;
Josef Bacik26ce2092019-06-20 15:37:59 -04002107 sinfo->bytes_readonly -= num_bytes;
2108 list_del_init(&cache->ro_list);
2109 }
2110 spin_unlock(&cache->lock);
2111 spin_unlock(&sinfo->lock);
2112}
Josef Bacik77745c02019-06-20 15:38:00 -04002113
2114static int write_one_cache_group(struct btrfs_trans_handle *trans,
2115 struct btrfs_path *path,
2116 struct btrfs_block_group_cache *cache)
2117{
2118 struct btrfs_fs_info *fs_info = trans->fs_info;
2119 int ret;
2120 struct btrfs_root *extent_root = fs_info->extent_root;
2121 unsigned long bi;
2122 struct extent_buffer *leaf;
David Sterbabf38be62019-10-23 18:48:11 +02002123 struct btrfs_block_group_item bgi;
David Sterbab3470b52019-10-23 18:48:22 +02002124 struct btrfs_key key;
Josef Bacik77745c02019-06-20 15:38:00 -04002125
David Sterbab3470b52019-10-23 18:48:22 +02002126 key.objectid = cache->start;
2127 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2128 key.offset = cache->length;
2129
2130 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1);
Josef Bacik77745c02019-06-20 15:38:00 -04002131 if (ret) {
2132 if (ret > 0)
2133 ret = -ENOENT;
2134 goto fail;
2135 }
2136
2137 leaf = path->nodes[0];
2138 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
David Sterbade0dc452019-10-23 18:48:18 +02002139 btrfs_set_stack_block_group_used(&bgi, cache->used);
2140 btrfs_set_stack_block_group_chunk_objectid(&bgi,
David Sterba3d976382019-10-23 18:48:15 +02002141 BTRFS_FIRST_CHUNK_TREE_OBJECTID);
David Sterbade0dc452019-10-23 18:48:18 +02002142 btrfs_set_stack_block_group_flags(&bgi, cache->flags);
David Sterbabf38be62019-10-23 18:48:11 +02002143 write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
Josef Bacik77745c02019-06-20 15:38:00 -04002144 btrfs_mark_buffer_dirty(leaf);
2145fail:
2146 btrfs_release_path(path);
2147 return ret;
2148
2149}
2150
2151static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2152 struct btrfs_trans_handle *trans,
2153 struct btrfs_path *path)
2154{
2155 struct btrfs_fs_info *fs_info = block_group->fs_info;
2156 struct btrfs_root *root = fs_info->tree_root;
2157 struct inode *inode = NULL;
2158 struct extent_changeset *data_reserved = NULL;
2159 u64 alloc_hint = 0;
2160 int dcs = BTRFS_DC_ERROR;
2161 u64 num_pages = 0;
2162 int retries = 0;
2163 int ret = 0;
2164
2165 /*
2166 * If this block group is smaller than 100 megs don't bother caching the
2167 * block group.
2168 */
David Sterbab3470b52019-10-23 18:48:22 +02002169 if (block_group->length < (100 * SZ_1M)) {
Josef Bacik77745c02019-06-20 15:38:00 -04002170 spin_lock(&block_group->lock);
2171 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2172 spin_unlock(&block_group->lock);
2173 return 0;
2174 }
2175
2176 if (trans->aborted)
2177 return 0;
2178again:
2179 inode = lookup_free_space_inode(block_group, path);
2180 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2181 ret = PTR_ERR(inode);
2182 btrfs_release_path(path);
2183 goto out;
2184 }
2185
2186 if (IS_ERR(inode)) {
2187 BUG_ON(retries);
2188 retries++;
2189
2190 if (block_group->ro)
2191 goto out_free;
2192
2193 ret = create_free_space_inode(trans, block_group, path);
2194 if (ret)
2195 goto out_free;
2196 goto again;
2197 }
2198
2199 /*
2200 * We want to set the generation to 0, that way if anything goes wrong
2201 * from here on out we know not to trust this cache when we load up next
2202 * time.
2203 */
2204 BTRFS_I(inode)->generation = 0;
2205 ret = btrfs_update_inode(trans, root, inode);
2206 if (ret) {
2207 /*
2208 * So theoretically we could recover from this, simply set the
2209 * super cache generation to 0 so we know to invalidate the
2210 * cache, but then we'd have to keep track of the block groups
2211 * that fail this way so we know we _have_ to reset this cache
2212 * before the next commit or risk reading stale cache. So to
2213 * limit our exposure to horrible edge cases lets just abort the
2214 * transaction, this only happens in really bad situations
2215 * anyway.
2216 */
2217 btrfs_abort_transaction(trans, ret);
2218 goto out_put;
2219 }
2220 WARN_ON(ret);
2221
2222 /* We've already setup this transaction, go ahead and exit */
2223 if (block_group->cache_generation == trans->transid &&
2224 i_size_read(inode)) {
2225 dcs = BTRFS_DC_SETUP;
2226 goto out_put;
2227 }
2228
2229 if (i_size_read(inode) > 0) {
2230 ret = btrfs_check_trunc_cache_free_space(fs_info,
2231 &fs_info->global_block_rsv);
2232 if (ret)
2233 goto out_put;
2234
2235 ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
2236 if (ret)
2237 goto out_put;
2238 }
2239
2240 spin_lock(&block_group->lock);
2241 if (block_group->cached != BTRFS_CACHE_FINISHED ||
2242 !btrfs_test_opt(fs_info, SPACE_CACHE)) {
2243 /*
2244 * don't bother trying to write stuff out _if_
2245 * a) we're not cached,
2246 * b) we're with nospace_cache mount option,
2247 * c) we're with v2 space_cache (FREE_SPACE_TREE).
2248 */
2249 dcs = BTRFS_DC_WRITTEN;
2250 spin_unlock(&block_group->lock);
2251 goto out_put;
2252 }
2253 spin_unlock(&block_group->lock);
2254
2255 /*
2256 * We hit an ENOSPC when setting up the cache in this transaction, just
2257 * skip doing the setup, we've already cleared the cache so we're safe.
2258 */
2259 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
2260 ret = -ENOSPC;
2261 goto out_put;
2262 }
2263
2264 /*
2265 * Try to preallocate enough space based on how big the block group is.
2266 * Keep in mind this has to include any pinned space which could end up
2267 * taking up quite a bit since it's not folded into the other space
2268 * cache.
2269 */
David Sterbab3470b52019-10-23 18:48:22 +02002270 num_pages = div_u64(block_group->length, SZ_256M);
Josef Bacik77745c02019-06-20 15:38:00 -04002271 if (!num_pages)
2272 num_pages = 1;
2273
2274 num_pages *= 16;
2275 num_pages *= PAGE_SIZE;
2276
2277 ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
2278 if (ret)
2279 goto out_put;
2280
2281 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2282 num_pages, num_pages,
2283 &alloc_hint);
2284 /*
2285 * Our cache requires contiguous chunks so that we don't modify a bunch
2286 * of metadata or split extents when writing the cache out, which means
2287 * we can enospc if we are heavily fragmented in addition to just normal
2288 * out of space conditions. So if we hit this just skip setting up any
2289 * other block groups for this transaction, maybe we'll unpin enough
2290 * space the next time around.
2291 */
2292 if (!ret)
2293 dcs = BTRFS_DC_SETUP;
2294 else if (ret == -ENOSPC)
2295 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
2296
2297out_put:
2298 iput(inode);
2299out_free:
2300 btrfs_release_path(path);
2301out:
2302 spin_lock(&block_group->lock);
2303 if (!ret && dcs == BTRFS_DC_SETUP)
2304 block_group->cache_generation = trans->transid;
2305 block_group->disk_cache_state = dcs;
2306 spin_unlock(&block_group->lock);
2307
2308 extent_changeset_free(data_reserved);
2309 return ret;
2310}
2311
2312int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
2313{
2314 struct btrfs_fs_info *fs_info = trans->fs_info;
2315 struct btrfs_block_group_cache *cache, *tmp;
2316 struct btrfs_transaction *cur_trans = trans->transaction;
2317 struct btrfs_path *path;
2318
2319 if (list_empty(&cur_trans->dirty_bgs) ||
2320 !btrfs_test_opt(fs_info, SPACE_CACHE))
2321 return 0;
2322
2323 path = btrfs_alloc_path();
2324 if (!path)
2325 return -ENOMEM;
2326
2327 /* Could add new block groups, use _safe just in case */
2328 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
2329 dirty_list) {
2330 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2331 cache_save_setup(cache, trans, path);
2332 }
2333
2334 btrfs_free_path(path);
2335 return 0;
2336}
2337
2338/*
2339 * Transaction commit does final block group cache writeback during a critical
2340 * section where nothing is allowed to change the FS. This is required in
2341 * order for the cache to actually match the block group, but can introduce a
2342 * lot of latency into the commit.
2343 *
2344 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
2345 * There's a chance we'll have to redo some of it if the block group changes
2346 * again during the commit, but it greatly reduces the commit latency by
2347 * getting rid of the easy block groups while we're still allowing others to
2348 * join the commit.
2349 */
2350int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
2351{
2352 struct btrfs_fs_info *fs_info = trans->fs_info;
2353 struct btrfs_block_group_cache *cache;
2354 struct btrfs_transaction *cur_trans = trans->transaction;
2355 int ret = 0;
2356 int should_put;
2357 struct btrfs_path *path = NULL;
2358 LIST_HEAD(dirty);
2359 struct list_head *io = &cur_trans->io_bgs;
2360 int num_started = 0;
2361 int loops = 0;
2362
2363 spin_lock(&cur_trans->dirty_bgs_lock);
2364 if (list_empty(&cur_trans->dirty_bgs)) {
2365 spin_unlock(&cur_trans->dirty_bgs_lock);
2366 return 0;
2367 }
2368 list_splice_init(&cur_trans->dirty_bgs, &dirty);
2369 spin_unlock(&cur_trans->dirty_bgs_lock);
2370
2371again:
2372 /* Make sure all the block groups on our dirty list actually exist */
2373 btrfs_create_pending_block_groups(trans);
2374
2375 if (!path) {
2376 path = btrfs_alloc_path();
2377 if (!path)
2378 return -ENOMEM;
2379 }
2380
2381 /*
2382 * cache_write_mutex is here only to save us from balance or automatic
2383 * removal of empty block groups deleting this block group while we are
2384 * writing out the cache
2385 */
2386 mutex_lock(&trans->transaction->cache_write_mutex);
2387 while (!list_empty(&dirty)) {
2388 bool drop_reserve = true;
2389
2390 cache = list_first_entry(&dirty,
2391 struct btrfs_block_group_cache,
2392 dirty_list);
2393 /*
2394 * This can happen if something re-dirties a block group that
2395 * is already under IO. Just wait for it to finish and then do
2396 * it all again
2397 */
2398 if (!list_empty(&cache->io_list)) {
2399 list_del_init(&cache->io_list);
2400 btrfs_wait_cache_io(trans, cache, path);
2401 btrfs_put_block_group(cache);
2402 }
2403
2404
2405 /*
2406 * btrfs_wait_cache_io uses the cache->dirty_list to decide if
2407 * it should update the cache_state. Don't delete until after
2408 * we wait.
2409 *
2410 * Since we're not running in the commit critical section
2411 * we need the dirty_bgs_lock to protect from update_block_group
2412 */
2413 spin_lock(&cur_trans->dirty_bgs_lock);
2414 list_del_init(&cache->dirty_list);
2415 spin_unlock(&cur_trans->dirty_bgs_lock);
2416
2417 should_put = 1;
2418
2419 cache_save_setup(cache, trans, path);
2420
2421 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
2422 cache->io_ctl.inode = NULL;
2423 ret = btrfs_write_out_cache(trans, cache, path);
2424 if (ret == 0 && cache->io_ctl.inode) {
2425 num_started++;
2426 should_put = 0;
2427
2428 /*
2429 * The cache_write_mutex is protecting the
2430 * io_list, also refer to the definition of
2431 * btrfs_transaction::io_bgs for more details
2432 */
2433 list_add_tail(&cache->io_list, io);
2434 } else {
2435 /*
2436 * If we failed to write the cache, the
2437 * generation will be bad and life goes on
2438 */
2439 ret = 0;
2440 }
2441 }
2442 if (!ret) {
2443 ret = write_one_cache_group(trans, path, cache);
2444 /*
2445 * Our block group might still be attached to the list
2446 * of new block groups in the transaction handle of some
2447 * other task (struct btrfs_trans_handle->new_bgs). This
2448 * means its block group item isn't yet in the extent
2449 * tree. If this happens ignore the error, as we will
2450 * try again later in the critical section of the
2451 * transaction commit.
2452 */
2453 if (ret == -ENOENT) {
2454 ret = 0;
2455 spin_lock(&cur_trans->dirty_bgs_lock);
2456 if (list_empty(&cache->dirty_list)) {
2457 list_add_tail(&cache->dirty_list,
2458 &cur_trans->dirty_bgs);
2459 btrfs_get_block_group(cache);
2460 drop_reserve = false;
2461 }
2462 spin_unlock(&cur_trans->dirty_bgs_lock);
2463 } else if (ret) {
2464 btrfs_abort_transaction(trans, ret);
2465 }
2466 }
2467
2468 /* If it's not on the io list, we need to put the block group */
2469 if (should_put)
2470 btrfs_put_block_group(cache);
2471 if (drop_reserve)
2472 btrfs_delayed_refs_rsv_release(fs_info, 1);
2473
2474 if (ret)
2475 break;
2476
2477 /*
2478 * Avoid blocking other tasks for too long. It might even save
2479 * us from writing caches for block groups that are going to be
2480 * removed.
2481 */
2482 mutex_unlock(&trans->transaction->cache_write_mutex);
2483 mutex_lock(&trans->transaction->cache_write_mutex);
2484 }
2485 mutex_unlock(&trans->transaction->cache_write_mutex);
2486
2487 /*
2488 * Go through delayed refs for all the stuff we've just kicked off
2489 * and then loop back (just once)
2490 */
2491 ret = btrfs_run_delayed_refs(trans, 0);
2492 if (!ret && loops == 0) {
2493 loops++;
2494 spin_lock(&cur_trans->dirty_bgs_lock);
2495 list_splice_init(&cur_trans->dirty_bgs, &dirty);
2496 /*
2497 * dirty_bgs_lock protects us from concurrent block group
2498 * deletes too (not just cache_write_mutex).
2499 */
2500 if (!list_empty(&dirty)) {
2501 spin_unlock(&cur_trans->dirty_bgs_lock);
2502 goto again;
2503 }
2504 spin_unlock(&cur_trans->dirty_bgs_lock);
2505 } else if (ret < 0) {
2506 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
2507 }
2508
2509 btrfs_free_path(path);
2510 return ret;
2511}
2512
2513int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
2514{
2515 struct btrfs_fs_info *fs_info = trans->fs_info;
2516 struct btrfs_block_group_cache *cache;
2517 struct btrfs_transaction *cur_trans = trans->transaction;
2518 int ret = 0;
2519 int should_put;
2520 struct btrfs_path *path;
2521 struct list_head *io = &cur_trans->io_bgs;
2522 int num_started = 0;
2523
2524 path = btrfs_alloc_path();
2525 if (!path)
2526 return -ENOMEM;
2527
2528 /*
2529 * Even though we are in the critical section of the transaction commit,
2530 * we can still have concurrent tasks adding elements to this
2531 * transaction's list of dirty block groups. These tasks correspond to
2532 * endio free space workers started when writeback finishes for a
2533 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
2534 * allocate new block groups as a result of COWing nodes of the root
2535 * tree when updating the free space inode. The writeback for the space
2536 * caches is triggered by an earlier call to
2537 * btrfs_start_dirty_block_groups() and iterations of the following
2538 * loop.
2539 * Also we want to do the cache_save_setup first and then run the
2540 * delayed refs to make sure we have the best chance at doing this all
2541 * in one shot.
2542 */
2543 spin_lock(&cur_trans->dirty_bgs_lock);
2544 while (!list_empty(&cur_trans->dirty_bgs)) {
2545 cache = list_first_entry(&cur_trans->dirty_bgs,
2546 struct btrfs_block_group_cache,
2547 dirty_list);
2548
2549 /*
2550 * This can happen if cache_save_setup re-dirties a block group
2551 * that is already under IO. Just wait for it to finish and
2552 * then do it all again
2553 */
2554 if (!list_empty(&cache->io_list)) {
2555 spin_unlock(&cur_trans->dirty_bgs_lock);
2556 list_del_init(&cache->io_list);
2557 btrfs_wait_cache_io(trans, cache, path);
2558 btrfs_put_block_group(cache);
2559 spin_lock(&cur_trans->dirty_bgs_lock);
2560 }
2561
2562 /*
2563 * Don't remove from the dirty list until after we've waited on
2564 * any pending IO
2565 */
2566 list_del_init(&cache->dirty_list);
2567 spin_unlock(&cur_trans->dirty_bgs_lock);
2568 should_put = 1;
2569
2570 cache_save_setup(cache, trans, path);
2571
2572 if (!ret)
2573 ret = btrfs_run_delayed_refs(trans,
2574 (unsigned long) -1);
2575
2576 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
2577 cache->io_ctl.inode = NULL;
2578 ret = btrfs_write_out_cache(trans, cache, path);
2579 if (ret == 0 && cache->io_ctl.inode) {
2580 num_started++;
2581 should_put = 0;
2582 list_add_tail(&cache->io_list, io);
2583 } else {
2584 /*
2585 * If we failed to write the cache, the
2586 * generation will be bad and life goes on
2587 */
2588 ret = 0;
2589 }
2590 }
2591 if (!ret) {
2592 ret = write_one_cache_group(trans, path, cache);
2593 /*
2594 * One of the free space endio workers might have
2595 * created a new block group while updating a free space
2596 * cache's inode (at inode.c:btrfs_finish_ordered_io())
2597 * and hasn't released its transaction handle yet, in
2598 * which case the new block group is still attached to
2599 * its transaction handle and its creation has not
2600 * finished yet (no block group item in the extent tree
2601 * yet, etc). If this is the case, wait for all free
2602 * space endio workers to finish and retry. This is a
2603 * a very rare case so no need for a more efficient and
2604 * complex approach.
2605 */
2606 if (ret == -ENOENT) {
2607 wait_event(cur_trans->writer_wait,
2608 atomic_read(&cur_trans->num_writers) == 1);
2609 ret = write_one_cache_group(trans, path, cache);
2610 }
2611 if (ret)
2612 btrfs_abort_transaction(trans, ret);
2613 }
2614
2615 /* If its not on the io list, we need to put the block group */
2616 if (should_put)
2617 btrfs_put_block_group(cache);
2618 btrfs_delayed_refs_rsv_release(fs_info, 1);
2619 spin_lock(&cur_trans->dirty_bgs_lock);
2620 }
2621 spin_unlock(&cur_trans->dirty_bgs_lock);
2622
2623 /*
2624 * Refer to the definition of io_bgs member for details why it's safe
2625 * to use it without any locking
2626 */
2627 while (!list_empty(io)) {
2628 cache = list_first_entry(io, struct btrfs_block_group_cache,
2629 io_list);
2630 list_del_init(&cache->io_list);
2631 btrfs_wait_cache_io(trans, cache, path);
2632 btrfs_put_block_group(cache);
2633 }
2634
2635 btrfs_free_path(path);
2636 return ret;
2637}
Josef Bacik606d1bf2019-06-20 15:38:02 -04002638
2639int btrfs_update_block_group(struct btrfs_trans_handle *trans,
2640 u64 bytenr, u64 num_bytes, int alloc)
2641{
2642 struct btrfs_fs_info *info = trans->fs_info;
2643 struct btrfs_block_group_cache *cache = NULL;
2644 u64 total = num_bytes;
2645 u64 old_val;
2646 u64 byte_in_group;
2647 int factor;
2648 int ret = 0;
2649
2650 /* Block accounting for super block */
2651 spin_lock(&info->delalloc_root_lock);
2652 old_val = btrfs_super_bytes_used(info->super_copy);
2653 if (alloc)
2654 old_val += num_bytes;
2655 else
2656 old_val -= num_bytes;
2657 btrfs_set_super_bytes_used(info->super_copy, old_val);
2658 spin_unlock(&info->delalloc_root_lock);
2659
2660 while (total) {
2661 cache = btrfs_lookup_block_group(info, bytenr);
2662 if (!cache) {
2663 ret = -ENOENT;
2664 break;
2665 }
2666 factor = btrfs_bg_type_to_factor(cache->flags);
2667
2668 /*
2669 * If this block group has free space cache written out, we
2670 * need to make sure to load it if we are removing space. This
2671 * is because we need the unpinning stage to actually add the
2672 * space back to the block group, otherwise we will leak space.
2673 */
Josef Bacika60adce2019-09-24 16:50:44 -04002674 if (!alloc && !btrfs_block_group_cache_done(cache))
Josef Bacik606d1bf2019-06-20 15:38:02 -04002675 btrfs_cache_block_group(cache, 1);
2676
David Sterbab3470b52019-10-23 18:48:22 +02002677 byte_in_group = bytenr - cache->start;
2678 WARN_ON(byte_in_group > cache->length);
Josef Bacik606d1bf2019-06-20 15:38:02 -04002679
2680 spin_lock(&cache->space_info->lock);
2681 spin_lock(&cache->lock);
2682
2683 if (btrfs_test_opt(info, SPACE_CACHE) &&
2684 cache->disk_cache_state < BTRFS_DC_CLEAR)
2685 cache->disk_cache_state = BTRFS_DC_CLEAR;
2686
David Sterbabf38be62019-10-23 18:48:11 +02002687 old_val = cache->used;
David Sterbab3470b52019-10-23 18:48:22 +02002688 num_bytes = min(total, cache->length - byte_in_group);
Josef Bacik606d1bf2019-06-20 15:38:02 -04002689 if (alloc) {
2690 old_val += num_bytes;
David Sterbabf38be62019-10-23 18:48:11 +02002691 cache->used = old_val;
Josef Bacik606d1bf2019-06-20 15:38:02 -04002692 cache->reserved -= num_bytes;
2693 cache->space_info->bytes_reserved -= num_bytes;
2694 cache->space_info->bytes_used += num_bytes;
2695 cache->space_info->disk_used += num_bytes * factor;
2696 spin_unlock(&cache->lock);
2697 spin_unlock(&cache->space_info->lock);
2698 } else {
2699 old_val -= num_bytes;
David Sterbabf38be62019-10-23 18:48:11 +02002700 cache->used = old_val;
Josef Bacik606d1bf2019-06-20 15:38:02 -04002701 cache->pinned += num_bytes;
2702 btrfs_space_info_update_bytes_pinned(info,
2703 cache->space_info, num_bytes);
2704 cache->space_info->bytes_used -= num_bytes;
2705 cache->space_info->disk_used -= num_bytes * factor;
2706 spin_unlock(&cache->lock);
2707 spin_unlock(&cache->space_info->lock);
2708
Josef Bacik606d1bf2019-06-20 15:38:02 -04002709 percpu_counter_add_batch(
2710 &cache->space_info->total_bytes_pinned,
2711 num_bytes,
2712 BTRFS_TOTAL_BYTES_PINNED_BATCH);
2713 set_extent_dirty(info->pinned_extents,
2714 bytenr, bytenr + num_bytes - 1,
2715 GFP_NOFS | __GFP_NOFAIL);
2716 }
2717
2718 spin_lock(&trans->transaction->dirty_bgs_lock);
2719 if (list_empty(&cache->dirty_list)) {
2720 list_add_tail(&cache->dirty_list,
2721 &trans->transaction->dirty_bgs);
2722 trans->delayed_ref_updates++;
2723 btrfs_get_block_group(cache);
2724 }
2725 spin_unlock(&trans->transaction->dirty_bgs_lock);
2726
2727 /*
2728 * No longer have used bytes in this block group, queue it for
2729 * deletion. We do this after adding the block group to the
2730 * dirty list to avoid races between cleaner kthread and space
2731 * cache writeout.
2732 */
2733 if (!alloc && old_val == 0)
2734 btrfs_mark_bg_unused(cache);
2735
2736 btrfs_put_block_group(cache);
2737 total -= num_bytes;
2738 bytenr += num_bytes;
2739 }
2740
2741 /* Modified block groups are accounted for in the delayed_refs_rsv. */
2742 btrfs_update_delayed_refs_rsv(trans);
2743 return ret;
2744}
2745
2746/**
2747 * btrfs_add_reserved_bytes - update the block_group and space info counters
2748 * @cache: The cache we are manipulating
2749 * @ram_bytes: The number of bytes of file content, and will be same to
2750 * @num_bytes except for the compress path.
2751 * @num_bytes: The number of bytes in question
2752 * @delalloc: The blocks are allocated for the delalloc write
2753 *
2754 * This is called by the allocator when it reserves space. If this is a
2755 * reservation and the block group has become read only we cannot make the
2756 * reservation and return -EAGAIN, otherwise this function always succeeds.
2757 */
2758int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
2759 u64 ram_bytes, u64 num_bytes, int delalloc)
2760{
2761 struct btrfs_space_info *space_info = cache->space_info;
2762 int ret = 0;
2763
2764 spin_lock(&space_info->lock);
2765 spin_lock(&cache->lock);
2766 if (cache->ro) {
2767 ret = -EAGAIN;
2768 } else {
2769 cache->reserved += num_bytes;
2770 space_info->bytes_reserved += num_bytes;
Josef Bacika43c3832019-08-22 15:10:56 -04002771 trace_btrfs_space_reservation(cache->fs_info, "space_info",
2772 space_info->flags, num_bytes, 1);
Josef Bacik606d1bf2019-06-20 15:38:02 -04002773 btrfs_space_info_update_bytes_may_use(cache->fs_info,
2774 space_info, -ram_bytes);
2775 if (delalloc)
2776 cache->delalloc_bytes += num_bytes;
2777 }
2778 spin_unlock(&cache->lock);
2779 spin_unlock(&space_info->lock);
2780 return ret;
2781}
2782
2783/**
2784 * btrfs_free_reserved_bytes - update the block_group and space info counters
2785 * @cache: The cache we are manipulating
2786 * @num_bytes: The number of bytes in question
2787 * @delalloc: The blocks are allocated for the delalloc write
2788 *
2789 * This is called by somebody who is freeing space that was never actually used
2790 * on disk. For example if you reserve some space for a new leaf in transaction
2791 * A and before transaction A commits you free that leaf, you call this with
2792 * reserve set to 0 in order to clear the reservation.
2793 */
2794void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
2795 u64 num_bytes, int delalloc)
2796{
2797 struct btrfs_space_info *space_info = cache->space_info;
2798
2799 spin_lock(&space_info->lock);
2800 spin_lock(&cache->lock);
2801 if (cache->ro)
2802 space_info->bytes_readonly += num_bytes;
2803 cache->reserved -= num_bytes;
2804 space_info->bytes_reserved -= num_bytes;
2805 space_info->max_extent_size = 0;
2806
2807 if (delalloc)
2808 cache->delalloc_bytes -= num_bytes;
2809 spin_unlock(&cache->lock);
2810 spin_unlock(&space_info->lock);
2811}
Josef Bacik07730d82019-06-20 15:38:04 -04002812
2813static void force_metadata_allocation(struct btrfs_fs_info *info)
2814{
2815 struct list_head *head = &info->space_info;
2816 struct btrfs_space_info *found;
2817
2818 rcu_read_lock();
2819 list_for_each_entry_rcu(found, head, list) {
2820 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
2821 found->force_alloc = CHUNK_ALLOC_FORCE;
2822 }
2823 rcu_read_unlock();
2824}
2825
2826static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
2827 struct btrfs_space_info *sinfo, int force)
2828{
2829 u64 bytes_used = btrfs_space_info_used(sinfo, false);
2830 u64 thresh;
2831
2832 if (force == CHUNK_ALLOC_FORCE)
2833 return 1;
2834
2835 /*
2836 * in limited mode, we want to have some free space up to
2837 * about 1% of the FS size.
2838 */
2839 if (force == CHUNK_ALLOC_LIMITED) {
2840 thresh = btrfs_super_total_bytes(fs_info->super_copy);
2841 thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
2842
2843 if (sinfo->total_bytes - bytes_used < thresh)
2844 return 1;
2845 }
2846
2847 if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
2848 return 0;
2849 return 1;
2850}
2851
2852int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
2853{
2854 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
2855
2856 return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2857}
2858
2859/*
2860 * If force is CHUNK_ALLOC_FORCE:
2861 * - return 1 if it successfully allocates a chunk,
2862 * - return errors including -ENOSPC otherwise.
2863 * If force is NOT CHUNK_ALLOC_FORCE:
2864 * - return 0 if it doesn't need to allocate a new chunk,
2865 * - return 1 if it successfully allocates a chunk,
2866 * - return errors including -ENOSPC otherwise.
2867 */
2868int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
2869 enum btrfs_chunk_alloc_enum force)
2870{
2871 struct btrfs_fs_info *fs_info = trans->fs_info;
2872 struct btrfs_space_info *space_info;
2873 bool wait_for_alloc = false;
2874 bool should_alloc = false;
2875 int ret = 0;
2876
2877 /* Don't re-enter if we're already allocating a chunk */
2878 if (trans->allocating_chunk)
2879 return -ENOSPC;
2880
2881 space_info = btrfs_find_space_info(fs_info, flags);
2882 ASSERT(space_info);
2883
2884 do {
2885 spin_lock(&space_info->lock);
2886 if (force < space_info->force_alloc)
2887 force = space_info->force_alloc;
2888 should_alloc = should_alloc_chunk(fs_info, space_info, force);
2889 if (space_info->full) {
2890 /* No more free physical space */
2891 if (should_alloc)
2892 ret = -ENOSPC;
2893 else
2894 ret = 0;
2895 spin_unlock(&space_info->lock);
2896 return ret;
2897 } else if (!should_alloc) {
2898 spin_unlock(&space_info->lock);
2899 return 0;
2900 } else if (space_info->chunk_alloc) {
2901 /*
2902 * Someone is already allocating, so we need to block
2903 * until this someone is finished and then loop to
2904 * recheck if we should continue with our allocation
2905 * attempt.
2906 */
2907 wait_for_alloc = true;
2908 spin_unlock(&space_info->lock);
2909 mutex_lock(&fs_info->chunk_mutex);
2910 mutex_unlock(&fs_info->chunk_mutex);
2911 } else {
2912 /* Proceed with allocation */
2913 space_info->chunk_alloc = 1;
2914 wait_for_alloc = false;
2915 spin_unlock(&space_info->lock);
2916 }
2917
2918 cond_resched();
2919 } while (wait_for_alloc);
2920
2921 mutex_lock(&fs_info->chunk_mutex);
2922 trans->allocating_chunk = true;
2923
2924 /*
2925 * If we have mixed data/metadata chunks we want to make sure we keep
2926 * allocating mixed chunks instead of individual chunks.
2927 */
2928 if (btrfs_mixed_space_info(space_info))
2929 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
2930
2931 /*
2932 * if we're doing a data chunk, go ahead and make sure that
2933 * we keep a reasonable number of metadata chunks allocated in the
2934 * FS as well.
2935 */
2936 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
2937 fs_info->data_chunk_allocations++;
2938 if (!(fs_info->data_chunk_allocations %
2939 fs_info->metadata_ratio))
2940 force_metadata_allocation(fs_info);
2941 }
2942
2943 /*
2944 * Check if we have enough space in SYSTEM chunk because we may need
2945 * to update devices.
2946 */
2947 check_system_chunk(trans, flags);
2948
2949 ret = btrfs_alloc_chunk(trans, flags);
2950 trans->allocating_chunk = false;
2951
2952 spin_lock(&space_info->lock);
2953 if (ret < 0) {
2954 if (ret == -ENOSPC)
2955 space_info->full = 1;
2956 else
2957 goto out;
2958 } else {
2959 ret = 1;
2960 space_info->max_extent_size = 0;
2961 }
2962
2963 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
2964out:
2965 space_info->chunk_alloc = 0;
2966 spin_unlock(&space_info->lock);
2967 mutex_unlock(&fs_info->chunk_mutex);
2968 /*
2969 * When we allocate a new chunk we reserve space in the chunk block
2970 * reserve to make sure we can COW nodes/leafs in the chunk tree or
2971 * add new nodes/leafs to it if we end up needing to do it when
2972 * inserting the chunk item and updating device items as part of the
2973 * second phase of chunk allocation, performed by
2974 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
2975 * large number of new block groups to create in our transaction
2976 * handle's new_bgs list to avoid exhausting the chunk block reserve
2977 * in extreme cases - like having a single transaction create many new
2978 * block groups when starting to write out the free space caches of all
2979 * the block groups that were made dirty during the lifetime of the
2980 * transaction.
2981 */
2982 if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
2983 btrfs_create_pending_block_groups(trans);
2984
2985 return ret;
2986}
2987
2988static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
2989{
2990 u64 num_dev;
2991
2992 num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
2993 if (!num_dev)
2994 num_dev = fs_info->fs_devices->rw_devices;
2995
2996 return num_dev;
2997}
2998
2999/*
Marcos Paulo de Souzaa9143bd2019-10-07 21:50:38 -03003000 * Reserve space in the system space for allocating or removing a chunk
Josef Bacik07730d82019-06-20 15:38:04 -04003001 */
3002void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
3003{
3004 struct btrfs_fs_info *fs_info = trans->fs_info;
3005 struct btrfs_space_info *info;
3006 u64 left;
3007 u64 thresh;
3008 int ret = 0;
3009 u64 num_devs;
3010
3011 /*
3012 * Needed because we can end up allocating a system chunk and for an
3013 * atomic and race free space reservation in the chunk block reserve.
3014 */
3015 lockdep_assert_held(&fs_info->chunk_mutex);
3016
3017 info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3018 spin_lock(&info->lock);
3019 left = info->total_bytes - btrfs_space_info_used(info, true);
3020 spin_unlock(&info->lock);
3021
3022 num_devs = get_profile_num_devs(fs_info, type);
3023
3024 /* num_devs device items to update and 1 chunk item to add or remove */
Josef Bacik2bd36e72019-08-22 15:14:33 -04003025 thresh = btrfs_calc_metadata_size(fs_info, num_devs) +
3026 btrfs_calc_insert_metadata_size(fs_info, 1);
Josef Bacik07730d82019-06-20 15:38:04 -04003027
3028 if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
3029 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
3030 left, thresh, type);
3031 btrfs_dump_space_info(fs_info, info, 0, 0);
3032 }
3033
3034 if (left < thresh) {
3035 u64 flags = btrfs_system_alloc_profile(fs_info);
3036
3037 /*
3038 * Ignore failure to create system chunk. We might end up not
3039 * needing it, as we might not need to COW all nodes/leafs from
3040 * the paths we visit in the chunk tree (they were already COWed
3041 * or created in the current transaction for example).
3042 */
3043 ret = btrfs_alloc_chunk(trans, flags);
3044 }
3045
3046 if (!ret) {
3047 ret = btrfs_block_rsv_add(fs_info->chunk_root,
3048 &fs_info->chunk_block_rsv,
3049 thresh, BTRFS_RESERVE_NO_FLUSH);
3050 if (!ret)
3051 trans->chunk_bytes_reserved += thresh;
3052 }
3053}
3054
Josef Bacik3e43c272019-06-20 15:38:06 -04003055void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
3056{
3057 struct btrfs_block_group_cache *block_group;
3058 u64 last = 0;
3059
3060 while (1) {
3061 struct inode *inode;
3062
3063 block_group = btrfs_lookup_first_block_group(info, last);
3064 while (block_group) {
3065 btrfs_wait_block_group_cache_done(block_group);
3066 spin_lock(&block_group->lock);
3067 if (block_group->iref)
3068 break;
3069 spin_unlock(&block_group->lock);
3070 block_group = btrfs_next_block_group(block_group);
3071 }
3072 if (!block_group) {
3073 if (last == 0)
3074 break;
3075 last = 0;
3076 continue;
3077 }
3078
3079 inode = block_group->inode;
3080 block_group->iref = 0;
3081 block_group->inode = NULL;
3082 spin_unlock(&block_group->lock);
3083 ASSERT(block_group->io_ctl.inode == NULL);
3084 iput(inode);
David Sterbab3470b52019-10-23 18:48:22 +02003085 last = block_group->start + block_group->length;
Josef Bacik3e43c272019-06-20 15:38:06 -04003086 btrfs_put_block_group(block_group);
3087 }
3088}
3089
3090/*
3091 * Must be called only after stopping all workers, since we could have block
3092 * group caching kthreads running, and therefore they could race with us if we
3093 * freed the block groups before stopping them.
3094 */
3095int btrfs_free_block_groups(struct btrfs_fs_info *info)
3096{
3097 struct btrfs_block_group_cache *block_group;
3098 struct btrfs_space_info *space_info;
3099 struct btrfs_caching_control *caching_ctl;
3100 struct rb_node *n;
3101
3102 down_write(&info->commit_root_sem);
3103 while (!list_empty(&info->caching_block_groups)) {
3104 caching_ctl = list_entry(info->caching_block_groups.next,
3105 struct btrfs_caching_control, list);
3106 list_del(&caching_ctl->list);
3107 btrfs_put_caching_control(caching_ctl);
3108 }
3109 up_write(&info->commit_root_sem);
3110
3111 spin_lock(&info->unused_bgs_lock);
3112 while (!list_empty(&info->unused_bgs)) {
3113 block_group = list_first_entry(&info->unused_bgs,
3114 struct btrfs_block_group_cache,
3115 bg_list);
3116 list_del_init(&block_group->bg_list);
3117 btrfs_put_block_group(block_group);
3118 }
3119 spin_unlock(&info->unused_bgs_lock);
3120
3121 spin_lock(&info->block_group_cache_lock);
3122 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
3123 block_group = rb_entry(n, struct btrfs_block_group_cache,
3124 cache_node);
3125 rb_erase(&block_group->cache_node,
3126 &info->block_group_cache_tree);
3127 RB_CLEAR_NODE(&block_group->cache_node);
3128 spin_unlock(&info->block_group_cache_lock);
3129
3130 down_write(&block_group->space_info->groups_sem);
3131 list_del(&block_group->list);
3132 up_write(&block_group->space_info->groups_sem);
3133
3134 /*
3135 * We haven't cached this block group, which means we could
3136 * possibly have excluded extents on this block group.
3137 */
3138 if (block_group->cached == BTRFS_CACHE_NO ||
3139 block_group->cached == BTRFS_CACHE_ERROR)
3140 btrfs_free_excluded_extents(block_group);
3141
3142 btrfs_remove_free_space_cache(block_group);
3143 ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
3144 ASSERT(list_empty(&block_group->dirty_list));
3145 ASSERT(list_empty(&block_group->io_list));
3146 ASSERT(list_empty(&block_group->bg_list));
3147 ASSERT(atomic_read(&block_group->count) == 1);
3148 btrfs_put_block_group(block_group);
3149
3150 spin_lock(&info->block_group_cache_lock);
3151 }
3152 spin_unlock(&info->block_group_cache_lock);
3153
3154 /*
3155 * Now that all the block groups are freed, go through and free all the
3156 * space_info structs. This is only called during the final stages of
3157 * unmount, and so we know nobody is using them. We call
3158 * synchronize_rcu() once before we start, just to be on the safe side.
3159 */
3160 synchronize_rcu();
3161
3162 btrfs_release_global_block_rsv(info);
3163
3164 while (!list_empty(&info->space_info)) {
3165 space_info = list_entry(info->space_info.next,
3166 struct btrfs_space_info,
3167 list);
3168
3169 /*
3170 * Do not hide this behind enospc_debug, this is actually
3171 * important and indicates a real bug if this happens.
3172 */
3173 if (WARN_ON(space_info->bytes_pinned > 0 ||
3174 space_info->bytes_reserved > 0 ||
3175 space_info->bytes_may_use > 0))
3176 btrfs_dump_space_info(info, space_info, 0, 0);
3177 list_del(&space_info->list);
3178 btrfs_sysfs_remove_space_info(space_info);
3179 }
3180 return 0;
3181}