Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | |
| 3 | #ifndef BTRFS_BLOCK_GROUP_H |
| 4 | #define BTRFS_BLOCK_GROUP_H |
| 5 | |
| 6 | enum btrfs_disk_cache_state { |
| 7 | BTRFS_DC_WRITTEN, |
| 8 | BTRFS_DC_ERROR, |
| 9 | BTRFS_DC_CLEAR, |
| 10 | BTRFS_DC_SETUP, |
| 11 | }; |
| 12 | |
| 13 | struct btrfs_caching_control { |
| 14 | struct list_head list; |
| 15 | struct mutex mutex; |
| 16 | wait_queue_head_t wait; |
| 17 | struct btrfs_work work; |
| 18 | struct btrfs_block_group_cache *block_group; |
| 19 | u64 progress; |
| 20 | refcount_t count; |
| 21 | }; |
| 22 | |
| 23 | /* Once caching_thread() finds this much free space, it will wake up waiters. */ |
| 24 | #define CACHING_CTL_WAKE_UP SZ_2M |
| 25 | |
| 26 | struct btrfs_block_group_cache { |
| 27 | struct btrfs_key key; |
| 28 | struct btrfs_block_group_item item; |
| 29 | struct btrfs_fs_info *fs_info; |
| 30 | struct inode *inode; |
| 31 | spinlock_t lock; |
| 32 | u64 pinned; |
| 33 | u64 reserved; |
| 34 | u64 delalloc_bytes; |
| 35 | u64 bytes_super; |
| 36 | u64 flags; |
| 37 | u64 cache_generation; |
| 38 | |
| 39 | /* |
| 40 | * If the free space extent count exceeds this number, convert the block |
| 41 | * group to bitmaps. |
| 42 | */ |
| 43 | u32 bitmap_high_thresh; |
| 44 | |
| 45 | /* |
| 46 | * If the free space extent count drops below this number, convert the |
| 47 | * block group back to extents. |
| 48 | */ |
| 49 | u32 bitmap_low_thresh; |
| 50 | |
| 51 | /* |
| 52 | * It is just used for the delayed data space allocation because |
| 53 | * only the data space allocation and the relative metadata update |
| 54 | * can be done cross the transaction. |
| 55 | */ |
| 56 | struct rw_semaphore data_rwsem; |
| 57 | |
| 58 | /* For raid56, this is a full stripe, without parity */ |
| 59 | unsigned long full_stripe_len; |
| 60 | |
| 61 | unsigned int ro; |
| 62 | unsigned int iref:1; |
| 63 | unsigned int has_caching_ctl:1; |
| 64 | unsigned int removed:1; |
| 65 | |
| 66 | int disk_cache_state; |
| 67 | |
| 68 | /* Cache tracking stuff */ |
| 69 | int cached; |
| 70 | struct btrfs_caching_control *caching_ctl; |
| 71 | u64 last_byte_to_unpin; |
| 72 | |
| 73 | struct btrfs_space_info *space_info; |
| 74 | |
| 75 | /* Free space cache stuff */ |
| 76 | struct btrfs_free_space_ctl *free_space_ctl; |
| 77 | |
| 78 | /* Block group cache stuff */ |
| 79 | struct rb_node cache_node; |
| 80 | |
| 81 | /* For block groups in the same raid type */ |
| 82 | struct list_head list; |
| 83 | |
| 84 | /* Usage count */ |
| 85 | atomic_t count; |
| 86 | |
| 87 | /* |
| 88 | * List of struct btrfs_free_clusters for this block group. |
| 89 | * Today it will only have one thing on it, but that may change |
| 90 | */ |
| 91 | struct list_head cluster_list; |
| 92 | |
| 93 | /* For delayed block group creation or deletion of empty block groups */ |
| 94 | struct list_head bg_list; |
| 95 | |
| 96 | /* For read-only block groups */ |
| 97 | struct list_head ro_list; |
| 98 | |
| 99 | atomic_t trimming; |
| 100 | |
| 101 | /* For dirty block groups */ |
| 102 | struct list_head dirty_list; |
| 103 | struct list_head io_list; |
| 104 | |
| 105 | struct btrfs_io_ctl io_ctl; |
| 106 | |
| 107 | /* |
| 108 | * Incremented when doing extent allocations and holding a read lock |
| 109 | * on the space_info's groups_sem semaphore. |
| 110 | * Decremented when an ordered extent that represents an IO against this |
| 111 | * block group's range is created (after it's added to its inode's |
| 112 | * root's list of ordered extents) or immediately after the allocation |
| 113 | * if it's a metadata extent or fallocate extent (for these cases we |
| 114 | * don't create ordered extents). |
| 115 | */ |
| 116 | atomic_t reservations; |
| 117 | |
| 118 | /* |
| 119 | * Incremented while holding the spinlock *lock* by a task checking if |
| 120 | * it can perform a nocow write (incremented if the value for the *ro* |
| 121 | * field is 0). Decremented by such tasks once they create an ordered |
| 122 | * extent or before that if some error happens before reaching that step. |
| 123 | * This is to prevent races between block group relocation and nocow |
| 124 | * writes through direct IO. |
| 125 | */ |
| 126 | atomic_t nocow_writers; |
| 127 | |
| 128 | /* Lock for free space tree operations. */ |
| 129 | struct mutex free_space_lock; |
| 130 | |
| 131 | /* |
| 132 | * Does the block group need to be added to the free space tree? |
| 133 | * Protected by free_space_lock. |
| 134 | */ |
| 135 | int needs_free_space; |
| 136 | |
| 137 | /* Record locked full stripes for RAID5/6 block group */ |
| 138 | struct btrfs_full_stripe_locks_tree full_stripe_locks_root; |
| 139 | }; |
| 140 | |
| 141 | #ifdef CONFIG_BTRFS_DEBUG |
| 142 | static inline int btrfs_should_fragment_free_space( |
| 143 | struct btrfs_block_group_cache *block_group) |
| 144 | { |
| 145 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
| 146 | |
| 147 | return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) && |
| 148 | block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || |
| 149 | (btrfs_test_opt(fs_info, FRAGMENT_DATA) && |
| 150 | block_group->flags & BTRFS_BLOCK_GROUP_DATA); |
| 151 | } |
| 152 | #endif |
| 153 | |
Josef Bacik | 2e405ad | 2019-06-20 15:37:45 -0400 | [diff] [blame] | 154 | struct btrfs_block_group_cache *btrfs_lookup_first_block_group( |
| 155 | struct btrfs_fs_info *info, u64 bytenr); |
| 156 | struct btrfs_block_group_cache *btrfs_lookup_block_group( |
| 157 | struct btrfs_fs_info *info, u64 bytenr); |
| 158 | struct btrfs_block_group_cache *btrfs_next_block_group( |
| 159 | struct btrfs_block_group_cache *cache); |
Josef Bacik | 3cad128 | 2019-06-20 15:37:46 -0400 | [diff] [blame] | 160 | void btrfs_get_block_group(struct btrfs_block_group_cache *cache); |
| 161 | void btrfs_put_block_group(struct btrfs_block_group_cache *cache); |
Josef Bacik | 3eeb322 | 2019-06-20 15:37:47 -0400 | [diff] [blame] | 162 | void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, |
| 163 | const u64 start); |
| 164 | void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg); |
| 165 | bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); |
| 166 | void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); |
| 167 | void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg); |
Josef Bacik | 676f1f7 | 2019-06-20 15:37:48 -0400 | [diff] [blame] | 168 | void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, |
| 169 | u64 num_bytes); |
| 170 | int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache); |
| 171 | int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, |
| 172 | int load_cache_only); |
Josef Bacik | e3cb339 | 2019-06-20 15:37:50 -0400 | [diff] [blame^] | 173 | void btrfs_put_caching_control(struct btrfs_caching_control *ctl); |
| 174 | struct btrfs_caching_control *btrfs_get_caching_control( |
| 175 | struct btrfs_block_group_cache *cache); |
Josef Bacik | 676f1f7 | 2019-06-20 15:37:48 -0400 | [diff] [blame] | 176 | |
| 177 | static inline int btrfs_block_group_cache_done( |
| 178 | struct btrfs_block_group_cache *cache) |
| 179 | { |
| 180 | smp_mb(); |
| 181 | return cache->cached == BTRFS_CACHE_FINISHED || |
| 182 | cache->cached == BTRFS_CACHE_ERROR; |
| 183 | } |
Josef Bacik | 2e405ad | 2019-06-20 15:37:45 -0400 | [diff] [blame] | 184 | |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 185 | #endif /* BTRFS_BLOCK_GROUP_H */ |