Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | |
| 3 | #ifndef BTRFS_BLOCK_GROUP_H |
| 4 | #define BTRFS_BLOCK_GROUP_H |
| 5 | |
David Sterba | 67b61ae | 2019-08-21 19:57:04 +0200 | [diff] [blame] | 6 | #include "free-space-cache.h" |
| 7 | |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 8 | enum btrfs_disk_cache_state { |
| 9 | BTRFS_DC_WRITTEN, |
| 10 | BTRFS_DC_ERROR, |
| 11 | BTRFS_DC_CLEAR, |
| 12 | BTRFS_DC_SETUP, |
| 13 | }; |
| 14 | |
Josef Bacik | 07730d8 | 2019-06-20 15:38:04 -0400 | [diff] [blame] | 15 | /* |
Dennis Zhou | 2bee7eb | 2019-12-13 16:22:16 -0800 | [diff] [blame] | 16 | * This describes the state of the block_group for async discard. This is due |
| 17 | * to the two pass nature of it where extent discarding is prioritized over |
| 18 | * bitmap discarding. BTRFS_DISCARD_RESET_CURSOR is set when we are resetting |
| 19 | * between lists to prevent contention for discard state variables |
| 20 | * (eg. discard_cursor). |
| 21 | */ |
| 22 | enum btrfs_discard_state { |
| 23 | BTRFS_DISCARD_EXTENTS, |
| 24 | BTRFS_DISCARD_BITMAPS, |
| 25 | BTRFS_DISCARD_RESET_CURSOR, |
| 26 | }; |
| 27 | |
| 28 | /* |
Josef Bacik | 07730d8 | 2019-06-20 15:38:04 -0400 | [diff] [blame] | 29 | * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to |
| 30 | * only allocate a chunk if we really need one. |
| 31 | * |
| 32 | * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few |
| 33 | * chunks already allocated. This is used as part of the clustering code to |
| 34 | * help make sure we have a good pool of storage to cluster in, without filling |
| 35 | * the FS with empty chunks |
| 36 | * |
| 37 | * CHUNK_ALLOC_FORCE means it must try to allocate one |
| 38 | */ |
| 39 | enum btrfs_chunk_alloc_enum { |
| 40 | CHUNK_ALLOC_NO_FORCE, |
| 41 | CHUNK_ALLOC_LIMITED, |
| 42 | CHUNK_ALLOC_FORCE, |
| 43 | }; |
| 44 | |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 45 | struct btrfs_caching_control { |
| 46 | struct list_head list; |
| 47 | struct mutex mutex; |
| 48 | wait_queue_head_t wait; |
| 49 | struct btrfs_work work; |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 50 | struct btrfs_block_group *block_group; |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 51 | u64 progress; |
| 52 | refcount_t count; |
| 53 | }; |
| 54 | |
| 55 | /* Once caching_thread() finds this much free space, it will wake up waiters. */ |
| 56 | #define CACHING_CTL_WAKE_UP SZ_2M |
| 57 | |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 58 | struct btrfs_block_group { |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 59 | struct btrfs_fs_info *fs_info; |
| 60 | struct inode *inode; |
| 61 | spinlock_t lock; |
David Sterba | b3470b5 | 2019-10-23 18:48:22 +0200 | [diff] [blame] | 62 | u64 start; |
| 63 | u64 length; |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 64 | u64 pinned; |
| 65 | u64 reserved; |
David Sterba | bf38be6 | 2019-10-23 18:48:11 +0200 | [diff] [blame] | 66 | u64 used; |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 67 | u64 delalloc_bytes; |
| 68 | u64 bytes_super; |
| 69 | u64 flags; |
| 70 | u64 cache_generation; |
| 71 | |
| 72 | /* |
| 73 | * If the free space extent count exceeds this number, convert the block |
| 74 | * group to bitmaps. |
| 75 | */ |
| 76 | u32 bitmap_high_thresh; |
| 77 | |
| 78 | /* |
| 79 | * If the free space extent count drops below this number, convert the |
| 80 | * block group back to extents. |
| 81 | */ |
| 82 | u32 bitmap_low_thresh; |
| 83 | |
| 84 | /* |
| 85 | * It is just used for the delayed data space allocation because |
| 86 | * only the data space allocation and the relative metadata update |
| 87 | * can be done cross the transaction. |
| 88 | */ |
| 89 | struct rw_semaphore data_rwsem; |
| 90 | |
| 91 | /* For raid56, this is a full stripe, without parity */ |
| 92 | unsigned long full_stripe_len; |
| 93 | |
| 94 | unsigned int ro; |
| 95 | unsigned int iref:1; |
| 96 | unsigned int has_caching_ctl:1; |
| 97 | unsigned int removed:1; |
| 98 | |
| 99 | int disk_cache_state; |
| 100 | |
| 101 | /* Cache tracking stuff */ |
| 102 | int cached; |
| 103 | struct btrfs_caching_control *caching_ctl; |
| 104 | u64 last_byte_to_unpin; |
| 105 | |
| 106 | struct btrfs_space_info *space_info; |
| 107 | |
| 108 | /* Free space cache stuff */ |
| 109 | struct btrfs_free_space_ctl *free_space_ctl; |
| 110 | |
| 111 | /* Block group cache stuff */ |
| 112 | struct rb_node cache_node; |
| 113 | |
| 114 | /* For block groups in the same raid type */ |
| 115 | struct list_head list; |
| 116 | |
Josef Bacik | 48aaeeb | 2020-07-06 09:14:11 -0400 | [diff] [blame] | 117 | refcount_t refs; |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 118 | |
| 119 | /* |
| 120 | * List of struct btrfs_free_clusters for this block group. |
| 121 | * Today it will only have one thing on it, but that may change |
| 122 | */ |
| 123 | struct list_head cluster_list; |
| 124 | |
| 125 | /* For delayed block group creation or deletion of empty block groups */ |
| 126 | struct list_head bg_list; |
| 127 | |
| 128 | /* For read-only block groups */ |
| 129 | struct list_head ro_list; |
| 130 | |
Filipe Manana | 6b7304a | 2020-05-08 11:01:47 +0100 | [diff] [blame] | 131 | /* |
| 132 | * When non-zero it means the block group's logical address and its |
| 133 | * device extents can not be reused for future block group allocations |
| 134 | * until the counter goes down to 0. This is to prevent them from being |
| 135 | * reused while some task is still using the block group after it was |
| 136 | * deleted - we want to make sure they can only be reused for new block |
| 137 | * groups after that task is done with the deleted block group. |
| 138 | */ |
| 139 | atomic_t frozen; |
| 140 | |
Dennis Zhou | b0643e5 | 2019-12-13 16:22:14 -0800 | [diff] [blame] | 141 | /* For discard operations */ |
Dennis Zhou | b0643e5 | 2019-12-13 16:22:14 -0800 | [diff] [blame] | 142 | struct list_head discard_list; |
| 143 | int discard_index; |
| 144 | u64 discard_eligible_time; |
Dennis Zhou | 2bee7eb | 2019-12-13 16:22:16 -0800 | [diff] [blame] | 145 | u64 discard_cursor; |
| 146 | enum btrfs_discard_state discard_state; |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 147 | |
| 148 | /* For dirty block groups */ |
| 149 | struct list_head dirty_list; |
| 150 | struct list_head io_list; |
| 151 | |
| 152 | struct btrfs_io_ctl io_ctl; |
| 153 | |
| 154 | /* |
| 155 | * Incremented when doing extent allocations and holding a read lock |
| 156 | * on the space_info's groups_sem semaphore. |
| 157 | * Decremented when an ordered extent that represents an IO against this |
| 158 | * block group's range is created (after it's added to its inode's |
| 159 | * root's list of ordered extents) or immediately after the allocation |
| 160 | * if it's a metadata extent or fallocate extent (for these cases we |
| 161 | * don't create ordered extents). |
| 162 | */ |
| 163 | atomic_t reservations; |
| 164 | |
| 165 | /* |
| 166 | * Incremented while holding the spinlock *lock* by a task checking if |
| 167 | * it can perform a nocow write (incremented if the value for the *ro* |
| 168 | * field is 0). Decremented by such tasks once they create an ordered |
| 169 | * extent or before that if some error happens before reaching that step. |
| 170 | * This is to prevent races between block group relocation and nocow |
| 171 | * writes through direct IO. |
| 172 | */ |
| 173 | atomic_t nocow_writers; |
| 174 | |
| 175 | /* Lock for free space tree operations. */ |
| 176 | struct mutex free_space_lock; |
| 177 | |
| 178 | /* |
| 179 | * Does the block group need to be added to the free space tree? |
| 180 | * Protected by free_space_lock. |
| 181 | */ |
| 182 | int needs_free_space; |
| 183 | |
| 184 | /* Record locked full stripes for RAID5/6 block group */ |
| 185 | struct btrfs_full_stripe_locks_tree full_stripe_locks_root; |
Naohiro Aota | 08e11a3 | 2021-02-04 19:21:50 +0900 | [diff] [blame] | 186 | |
| 187 | /* |
| 188 | * Allocation offset for the block group to implement sequential |
| 189 | * allocation. This is used only on a zoned filesystem. |
| 190 | */ |
| 191 | u64 alloc_offset; |
Naohiro Aota | 169e0da | 2021-02-04 19:21:52 +0900 | [diff] [blame^] | 192 | u64 zone_unusable; |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 193 | }; |
| 194 | |
Dennis Zhou | b0643e5 | 2019-12-13 16:22:14 -0800 | [diff] [blame] | 195 | static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group) |
| 196 | { |
| 197 | return (block_group->start + block_group->length); |
| 198 | } |
| 199 | |
Dennis Zhou | 5cb0724 | 2020-01-02 16:26:40 -0500 | [diff] [blame] | 200 | static inline bool btrfs_is_block_group_data_only( |
| 201 | struct btrfs_block_group *block_group) |
| 202 | { |
| 203 | /* |
| 204 | * In mixed mode the fragmentation is expected to be high, lowering the |
| 205 | * efficiency, so only proper data block groups are considered. |
| 206 | */ |
| 207 | return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) && |
| 208 | !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA); |
| 209 | } |
| 210 | |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 211 | #ifdef CONFIG_BTRFS_DEBUG |
| 212 | static inline int btrfs_should_fragment_free_space( |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 213 | struct btrfs_block_group *block_group) |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 214 | { |
| 215 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
| 216 | |
| 217 | return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) && |
| 218 | block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || |
| 219 | (btrfs_test_opt(fs_info, FRAGMENT_DATA) && |
| 220 | block_group->flags & BTRFS_BLOCK_GROUP_DATA); |
| 221 | } |
| 222 | #endif |
| 223 | |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 224 | struct btrfs_block_group *btrfs_lookup_first_block_group( |
Josef Bacik | 2e405ad | 2019-06-20 15:37:45 -0400 | [diff] [blame] | 225 | struct btrfs_fs_info *info, u64 bytenr); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 226 | struct btrfs_block_group *btrfs_lookup_block_group( |
Josef Bacik | 2e405ad | 2019-06-20 15:37:45 -0400 | [diff] [blame] | 227 | struct btrfs_fs_info *info, u64 bytenr); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 228 | struct btrfs_block_group *btrfs_next_block_group( |
| 229 | struct btrfs_block_group *cache); |
| 230 | void btrfs_get_block_group(struct btrfs_block_group *cache); |
| 231 | void btrfs_put_block_group(struct btrfs_block_group *cache); |
Josef Bacik | 3eeb322 | 2019-06-20 15:37:47 -0400 | [diff] [blame] | 232 | void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, |
| 233 | const u64 start); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 234 | void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg); |
Josef Bacik | 3eeb322 | 2019-06-20 15:37:47 -0400 | [diff] [blame] | 235 | bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); |
| 236 | void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 237 | void btrfs_wait_nocow_writers(struct btrfs_block_group *bg); |
| 238 | void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, |
Josef Bacik | 676f1f7 | 2019-06-20 15:37:48 -0400 | [diff] [blame] | 239 | u64 num_bytes); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 240 | int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache); |
| 241 | int btrfs_cache_block_group(struct btrfs_block_group *cache, |
Josef Bacik | 676f1f7 | 2019-06-20 15:37:48 -0400 | [diff] [blame] | 242 | int load_cache_only); |
Josef Bacik | e3cb339 | 2019-06-20 15:37:50 -0400 | [diff] [blame] | 243 | void btrfs_put_caching_control(struct btrfs_caching_control *ctl); |
| 244 | struct btrfs_caching_control *btrfs_get_caching_control( |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 245 | struct btrfs_block_group *cache); |
| 246 | u64 add_new_free_space(struct btrfs_block_group *block_group, |
Josef Bacik | 9f21246 | 2019-08-06 16:43:19 +0200 | [diff] [blame] | 247 | u64 start, u64 end); |
Josef Bacik | e3e0520 | 2019-06-20 15:37:55 -0400 | [diff] [blame] | 248 | struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( |
| 249 | struct btrfs_fs_info *fs_info, |
| 250 | const u64 chunk_offset); |
| 251 | int btrfs_remove_block_group(struct btrfs_trans_handle *trans, |
| 252 | u64 group_start, struct extent_map *em); |
| 253 | void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 254 | void btrfs_mark_bg_unused(struct btrfs_block_group *bg); |
Josef Bacik | 4358d963 | 2019-06-20 15:37:57 -0400 | [diff] [blame] | 255 | int btrfs_read_block_groups(struct btrfs_fs_info *info); |
| 256 | int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, |
| 257 | u64 type, u64 chunk_offset, u64 size); |
| 258 | void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); |
Qu Wenruo | b12de52 | 2019-11-15 10:09:00 +0800 | [diff] [blame] | 259 | int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, |
| 260 | bool do_chunk_alloc); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 261 | void btrfs_dec_block_group_ro(struct btrfs_block_group *cache); |
Josef Bacik | 77745c0 | 2019-06-20 15:38:00 -0400 | [diff] [blame] | 262 | int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans); |
| 263 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans); |
| 264 | int btrfs_setup_space_cache(struct btrfs_trans_handle *trans); |
Josef Bacik | ade4b51 | 2019-06-20 15:38:01 -0400 | [diff] [blame] | 265 | int btrfs_update_block_group(struct btrfs_trans_handle *trans, |
| 266 | u64 bytenr, u64 num_bytes, int alloc); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 267 | int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, |
Josef Bacik | ade4b51 | 2019-06-20 15:38:01 -0400 | [diff] [blame] | 268 | u64 ram_bytes, u64 num_bytes, int delalloc); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 269 | void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, |
Josef Bacik | ade4b51 | 2019-06-20 15:38:01 -0400 | [diff] [blame] | 270 | u64 num_bytes, int delalloc); |
Josef Bacik | 07730d8 | 2019-06-20 15:38:04 -0400 | [diff] [blame] | 271 | int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, |
| 272 | enum btrfs_chunk_alloc_enum force); |
| 273 | int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type); |
| 274 | void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type); |
Josef Bacik | 878d7b6 | 2019-06-20 15:38:05 -0400 | [diff] [blame] | 275 | u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags); |
Josef Bacik | 3e43c27 | 2019-06-20 15:38:06 -0400 | [diff] [blame] | 276 | void btrfs_put_block_group_cache(struct btrfs_fs_info *info); |
| 277 | int btrfs_free_block_groups(struct btrfs_fs_info *info); |
Josef Bacik | e747853 | 2020-10-23 09:58:10 -0400 | [diff] [blame] | 278 | void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache, |
| 279 | struct btrfs_caching_control *caching_ctl); |
Josef Bacik | 878d7b6 | 2019-06-20 15:38:05 -0400 | [diff] [blame] | 280 | |
| 281 | static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info) |
| 282 | { |
| 283 | return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA); |
| 284 | } |
| 285 | |
| 286 | static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info) |
| 287 | { |
| 288 | return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA); |
| 289 | } |
| 290 | |
| 291 | static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info) |
| 292 | { |
| 293 | return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); |
| 294 | } |
Josef Bacik | 676f1f7 | 2019-06-20 15:37:48 -0400 | [diff] [blame] | 295 | |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 296 | static inline int btrfs_block_group_done(struct btrfs_block_group *cache) |
Josef Bacik | 676f1f7 | 2019-06-20 15:37:48 -0400 | [diff] [blame] | 297 | { |
| 298 | smp_mb(); |
| 299 | return cache->cached == BTRFS_CACHE_FINISHED || |
| 300 | cache->cached == BTRFS_CACHE_ERROR; |
| 301 | } |
Josef Bacik | 2e405ad | 2019-06-20 15:37:45 -0400 | [diff] [blame] | 302 | |
Filipe Manana | 684b752 | 2020-05-08 11:01:59 +0100 | [diff] [blame] | 303 | void btrfs_freeze_block_group(struct btrfs_block_group *cache); |
| 304 | void btrfs_unfreeze_block_group(struct btrfs_block_group *cache); |
| 305 | |
Nikolay Borisov | 96a1433 | 2019-12-10 19:57:51 +0200 | [diff] [blame] | 306 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
| 307 | int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, |
| 308 | u64 physical, u64 **logical, int *naddrs, int *stripe_len); |
| 309 | #endif |
| 310 | |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 311 | #endif /* BTRFS_BLOCK_GROUP_H */ |