| // SPDX-License-Identifier: GPL-2.0 |
| |
| #include "ctree.h" |
| #include "block-group.h" |
| |
| void btrfs_get_block_group(struct btrfs_block_group_cache *cache) |
| { |
| atomic_inc(&cache->count); |
| } |
| |
| void btrfs_put_block_group(struct btrfs_block_group_cache *cache) |
| { |
| if (atomic_dec_and_test(&cache->count)) { |
| WARN_ON(cache->pinned > 0); |
| WARN_ON(cache->reserved > 0); |
| |
| /* |
| * If not empty, someone is still holding mutex of |
| * full_stripe_lock, which can only be released by caller. |
| * And it will definitely cause use-after-free when caller |
| * tries to release full stripe lock. |
| * |
| * No better way to resolve, but only to warn. |
| */ |
| WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root)); |
| kfree(cache->free_space_ctl); |
| kfree(cache); |
| } |
| } |
| |
| /* |
| * This will return the block group at or after bytenr if contains is 0, else |
| * it will return the block group that contains the bytenr |
| */ |
| static struct btrfs_block_group_cache *block_group_cache_tree_search( |
| struct btrfs_fs_info *info, u64 bytenr, int contains) |
| { |
| struct btrfs_block_group_cache *cache, *ret = NULL; |
| struct rb_node *n; |
| u64 end, start; |
| |
| spin_lock(&info->block_group_cache_lock); |
| n = info->block_group_cache_tree.rb_node; |
| |
| while (n) { |
| cache = rb_entry(n, struct btrfs_block_group_cache, |
| cache_node); |
| end = cache->key.objectid + cache->key.offset - 1; |
| start = cache->key.objectid; |
| |
| if (bytenr < start) { |
| if (!contains && (!ret || start < ret->key.objectid)) |
| ret = cache; |
| n = n->rb_left; |
| } else if (bytenr > start) { |
| if (contains && bytenr <= end) { |
| ret = cache; |
| break; |
| } |
| n = n->rb_right; |
| } else { |
| ret = cache; |
| break; |
| } |
| } |
| if (ret) { |
| btrfs_get_block_group(ret); |
| if (bytenr == 0 && info->first_logical_byte > ret->key.objectid) |
| info->first_logical_byte = ret->key.objectid; |
| } |
| spin_unlock(&info->block_group_cache_lock); |
| |
| return ret; |
| } |
| |
| /* |
| * Return the block group that starts at or after bytenr |
| */ |
| struct btrfs_block_group_cache *btrfs_lookup_first_block_group( |
| struct btrfs_fs_info *info, u64 bytenr) |
| { |
| return block_group_cache_tree_search(info, bytenr, 0); |
| } |
| |
| /* |
| * Return the block group that contains the given bytenr |
| */ |
| struct btrfs_block_group_cache *btrfs_lookup_block_group( |
| struct btrfs_fs_info *info, u64 bytenr) |
| { |
| return block_group_cache_tree_search(info, bytenr, 1); |
| } |
| |
| struct btrfs_block_group_cache *btrfs_next_block_group( |
| struct btrfs_block_group_cache *cache) |
| { |
| struct btrfs_fs_info *fs_info = cache->fs_info; |
| struct rb_node *node; |
| |
| spin_lock(&fs_info->block_group_cache_lock); |
| |
| /* If our block group was removed, we need a full search. */ |
| if (RB_EMPTY_NODE(&cache->cache_node)) { |
| const u64 next_bytenr = cache->key.objectid + cache->key.offset; |
| |
| spin_unlock(&fs_info->block_group_cache_lock); |
| btrfs_put_block_group(cache); |
| cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache; |
| } |
| node = rb_next(&cache->cache_node); |
| btrfs_put_block_group(cache); |
| if (node) { |
| cache = rb_entry(node, struct btrfs_block_group_cache, |
| cache_node); |
| btrfs_get_block_group(cache); |
| } else |
| cache = NULL; |
| spin_unlock(&fs_info->block_group_cache_lock); |
| return cache; |
| } |