Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | |
| 3 | #ifndef BTRFS_BLOCK_GROUP_H |
| 4 | #define BTRFS_BLOCK_GROUP_H |
| 5 | |
David Sterba | 67b61ae | 2019-08-21 19:57:04 +0200 | [diff] [blame] | 6 | #include "free-space-cache.h" |
| 7 | |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 8 | enum btrfs_disk_cache_state { |
| 9 | BTRFS_DC_WRITTEN, |
| 10 | BTRFS_DC_ERROR, |
| 11 | BTRFS_DC_CLEAR, |
| 12 | BTRFS_DC_SETUP, |
| 13 | }; |
| 14 | |
Josef Bacik | 07730d8 | 2019-06-20 15:38:04 -0400 | [diff] [blame] | 15 | /* |
Dennis Zhou | 2bee7eb | 2019-12-13 16:22:16 -0800 | [diff] [blame] | 16 | * This describes the state of the block_group for async discard. This is due |
| 17 | * to the two pass nature of it where extent discarding is prioritized over |
| 18 | * bitmap discarding. BTRFS_DISCARD_RESET_CURSOR is set when we are resetting |
| 19 | * between lists to prevent contention for discard state variables |
| 20 | * (eg. discard_cursor). |
| 21 | */ |
| 22 | enum btrfs_discard_state { |
| 23 | BTRFS_DISCARD_EXTENTS, |
| 24 | BTRFS_DISCARD_BITMAPS, |
| 25 | BTRFS_DISCARD_RESET_CURSOR, |
| 26 | }; |
| 27 | |
| 28 | /* |
Josef Bacik | 07730d8 | 2019-06-20 15:38:04 -0400 | [diff] [blame] | 29 | * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to |
| 30 | * only allocate a chunk if we really need one. |
| 31 | * |
| 32 | * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few |
| 33 | * chunks already allocated. This is used as part of the clustering code to |
| 34 | * help make sure we have a good pool of storage to cluster in, without filling |
| 35 | * the FS with empty chunks |
| 36 | * |
| 37 | * CHUNK_ALLOC_FORCE means it must try to allocate one |
| 38 | */ |
| 39 | enum btrfs_chunk_alloc_enum { |
| 40 | CHUNK_ALLOC_NO_FORCE, |
| 41 | CHUNK_ALLOC_LIMITED, |
| 42 | CHUNK_ALLOC_FORCE, |
| 43 | }; |
| 44 | |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 45 | struct btrfs_caching_control { |
| 46 | struct list_head list; |
| 47 | struct mutex mutex; |
| 48 | wait_queue_head_t wait; |
| 49 | struct btrfs_work work; |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 50 | struct btrfs_block_group *block_group; |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 51 | u64 progress; |
| 52 | refcount_t count; |
| 53 | }; |
| 54 | |
| 55 | /* Once caching_thread() finds this much free space, it will wake up waiters. */ |
| 56 | #define CACHING_CTL_WAKE_UP SZ_2M |
| 57 | |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 58 | struct btrfs_block_group { |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 59 | struct btrfs_fs_info *fs_info; |
| 60 | struct inode *inode; |
| 61 | spinlock_t lock; |
David Sterba | b3470b5 | 2019-10-23 18:48:22 +0200 | [diff] [blame] | 62 | u64 start; |
| 63 | u64 length; |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 64 | u64 pinned; |
| 65 | u64 reserved; |
David Sterba | bf38be6 | 2019-10-23 18:48:11 +0200 | [diff] [blame] | 66 | u64 used; |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 67 | u64 delalloc_bytes; |
| 68 | u64 bytes_super; |
| 69 | u64 flags; |
| 70 | u64 cache_generation; |
| 71 | |
| 72 | /* |
| 73 | * If the free space extent count exceeds this number, convert the block |
| 74 | * group to bitmaps. |
| 75 | */ |
| 76 | u32 bitmap_high_thresh; |
| 77 | |
| 78 | /* |
| 79 | * If the free space extent count drops below this number, convert the |
| 80 | * block group back to extents. |
| 81 | */ |
| 82 | u32 bitmap_low_thresh; |
| 83 | |
| 84 | /* |
| 85 | * It is just used for the delayed data space allocation because |
| 86 | * only the data space allocation and the relative metadata update |
| 87 | * can be done cross the transaction. |
| 88 | */ |
| 89 | struct rw_semaphore data_rwsem; |
| 90 | |
| 91 | /* For raid56, this is a full stripe, without parity */ |
| 92 | unsigned long full_stripe_len; |
| 93 | |
| 94 | unsigned int ro; |
| 95 | unsigned int iref:1; |
| 96 | unsigned int has_caching_ctl:1; |
| 97 | unsigned int removed:1; |
Naohiro Aota | 78ce9fc | 2021-02-04 19:22:11 +0900 | [diff] [blame] | 98 | unsigned int to_copy:1; |
Naohiro Aota | f7ef528 | 2021-02-04 19:22:16 +0900 | [diff] [blame] | 99 | unsigned int relocating_repair:1; |
Filipe Manana | 79bd371 | 2021-06-29 14:43:06 +0100 | [diff] [blame] | 100 | unsigned int chunk_item_inserted:1; |
Naohiro Aota | afba2bc | 2021-08-19 21:19:17 +0900 | [diff] [blame] | 101 | unsigned int zone_is_active:1; |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 102 | |
| 103 | int disk_cache_state; |
| 104 | |
| 105 | /* Cache tracking stuff */ |
| 106 | int cached; |
| 107 | struct btrfs_caching_control *caching_ctl; |
| 108 | u64 last_byte_to_unpin; |
| 109 | |
| 110 | struct btrfs_space_info *space_info; |
| 111 | |
| 112 | /* Free space cache stuff */ |
| 113 | struct btrfs_free_space_ctl *free_space_ctl; |
| 114 | |
| 115 | /* Block group cache stuff */ |
| 116 | struct rb_node cache_node; |
| 117 | |
| 118 | /* For block groups in the same raid type */ |
| 119 | struct list_head list; |
| 120 | |
Josef Bacik | 48aaeeb | 2020-07-06 09:14:11 -0400 | [diff] [blame] | 121 | refcount_t refs; |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 122 | |
| 123 | /* |
| 124 | * List of struct btrfs_free_clusters for this block group. |
| 125 | * Today it will only have one thing on it, but that may change |
| 126 | */ |
| 127 | struct list_head cluster_list; |
| 128 | |
| 129 | /* For delayed block group creation or deletion of empty block groups */ |
| 130 | struct list_head bg_list; |
| 131 | |
| 132 | /* For read-only block groups */ |
| 133 | struct list_head ro_list; |
| 134 | |
Filipe Manana | 6b7304a | 2020-05-08 11:01:47 +0100 | [diff] [blame] | 135 | /* |
| 136 | * When non-zero it means the block group's logical address and its |
| 137 | * device extents can not be reused for future block group allocations |
| 138 | * until the counter goes down to 0. This is to prevent them from being |
| 139 | * reused while some task is still using the block group after it was |
| 140 | * deleted - we want to make sure they can only be reused for new block |
| 141 | * groups after that task is done with the deleted block group. |
| 142 | */ |
| 143 | atomic_t frozen; |
| 144 | |
Dennis Zhou | b0643e5 | 2019-12-13 16:22:14 -0800 | [diff] [blame] | 145 | /* For discard operations */ |
Dennis Zhou | b0643e5 | 2019-12-13 16:22:14 -0800 | [diff] [blame] | 146 | struct list_head discard_list; |
| 147 | int discard_index; |
| 148 | u64 discard_eligible_time; |
Dennis Zhou | 2bee7eb | 2019-12-13 16:22:16 -0800 | [diff] [blame] | 149 | u64 discard_cursor; |
| 150 | enum btrfs_discard_state discard_state; |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 151 | |
| 152 | /* For dirty block groups */ |
| 153 | struct list_head dirty_list; |
| 154 | struct list_head io_list; |
| 155 | |
| 156 | struct btrfs_io_ctl io_ctl; |
| 157 | |
| 158 | /* |
| 159 | * Incremented when doing extent allocations and holding a read lock |
| 160 | * on the space_info's groups_sem semaphore. |
| 161 | * Decremented when an ordered extent that represents an IO against this |
| 162 | * block group's range is created (after it's added to its inode's |
| 163 | * root's list of ordered extents) or immediately after the allocation |
| 164 | * if it's a metadata extent or fallocate extent (for these cases we |
| 165 | * don't create ordered extents). |
| 166 | */ |
| 167 | atomic_t reservations; |
| 168 | |
| 169 | /* |
| 170 | * Incremented while holding the spinlock *lock* by a task checking if |
| 171 | * it can perform a nocow write (incremented if the value for the *ro* |
| 172 | * field is 0). Decremented by such tasks once they create an ordered |
| 173 | * extent or before that if some error happens before reaching that step. |
| 174 | * This is to prevent races between block group relocation and nocow |
| 175 | * writes through direct IO. |
| 176 | */ |
| 177 | atomic_t nocow_writers; |
| 178 | |
| 179 | /* Lock for free space tree operations. */ |
| 180 | struct mutex free_space_lock; |
| 181 | |
| 182 | /* |
| 183 | * Does the block group need to be added to the free space tree? |
| 184 | * Protected by free_space_lock. |
| 185 | */ |
| 186 | int needs_free_space; |
| 187 | |
Johannes Thumshirn | 08f45559 | 2021-02-04 19:22:03 +0900 | [diff] [blame] | 188 | /* Flag indicating this block group is placed on a sequential zone */ |
| 189 | bool seq_zone; |
| 190 | |
Filipe Manana | 195a49e | 2021-02-05 12:55:37 +0000 | [diff] [blame] | 191 | /* |
| 192 | * Number of extents in this block group used for swap files. |
| 193 | * All accesses protected by the spinlock 'lock'. |
| 194 | */ |
| 195 | int swap_extents; |
| 196 | |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 197 | /* Record locked full stripes for RAID5/6 block group */ |
| 198 | struct btrfs_full_stripe_locks_tree full_stripe_locks_root; |
Naohiro Aota | 08e11a3 | 2021-02-04 19:21:50 +0900 | [diff] [blame] | 199 | |
| 200 | /* |
| 201 | * Allocation offset for the block group to implement sequential |
| 202 | * allocation. This is used only on a zoned filesystem. |
| 203 | */ |
| 204 | u64 alloc_offset; |
Naohiro Aota | 169e0da | 2021-02-04 19:21:52 +0900 | [diff] [blame] | 205 | u64 zone_unusable; |
Naohiro Aota | 8eae532 | 2021-08-19 21:19:08 +0900 | [diff] [blame] | 206 | u64 zone_capacity; |
Naohiro Aota | 0bc09ca | 2021-02-04 19:22:08 +0900 | [diff] [blame] | 207 | u64 meta_write_pointer; |
Naohiro Aota | dafc340d | 2021-08-19 21:19:16 +0900 | [diff] [blame] | 208 | struct map_lookup *physical_map; |
Naohiro Aota | afba2bc | 2021-08-19 21:19:17 +0900 | [diff] [blame] | 209 | struct list_head active_bg_list; |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 210 | }; |
| 211 | |
Dennis Zhou | b0643e5 | 2019-12-13 16:22:14 -0800 | [diff] [blame] | 212 | static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group) |
| 213 | { |
| 214 | return (block_group->start + block_group->length); |
| 215 | } |
| 216 | |
Dennis Zhou | 5cb0724 | 2020-01-02 16:26:40 -0500 | [diff] [blame] | 217 | static inline bool btrfs_is_block_group_data_only( |
| 218 | struct btrfs_block_group *block_group) |
| 219 | { |
| 220 | /* |
| 221 | * In mixed mode the fragmentation is expected to be high, lowering the |
| 222 | * efficiency, so only proper data block groups are considered. |
| 223 | */ |
| 224 | return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) && |
| 225 | !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA); |
| 226 | } |
| 227 | |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 228 | #ifdef CONFIG_BTRFS_DEBUG |
| 229 | static inline int btrfs_should_fragment_free_space( |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 230 | struct btrfs_block_group *block_group) |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 231 | { |
| 232 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
| 233 | |
| 234 | return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) && |
| 235 | block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || |
| 236 | (btrfs_test_opt(fs_info, FRAGMENT_DATA) && |
| 237 | block_group->flags & BTRFS_BLOCK_GROUP_DATA); |
| 238 | } |
| 239 | #endif |
| 240 | |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 241 | struct btrfs_block_group *btrfs_lookup_first_block_group( |
Josef Bacik | 2e405ad | 2019-06-20 15:37:45 -0400 | [diff] [blame] | 242 | struct btrfs_fs_info *info, u64 bytenr); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 243 | struct btrfs_block_group *btrfs_lookup_block_group( |
Josef Bacik | 2e405ad | 2019-06-20 15:37:45 -0400 | [diff] [blame] | 244 | struct btrfs_fs_info *info, u64 bytenr); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 245 | struct btrfs_block_group *btrfs_next_block_group( |
| 246 | struct btrfs_block_group *cache); |
| 247 | void btrfs_get_block_group(struct btrfs_block_group *cache); |
| 248 | void btrfs_put_block_group(struct btrfs_block_group *cache); |
Josef Bacik | 3eeb322 | 2019-06-20 15:37:47 -0400 | [diff] [blame] | 249 | void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, |
| 250 | const u64 start); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 251 | void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg); |
Josef Bacik | 3eeb322 | 2019-06-20 15:37:47 -0400 | [diff] [blame] | 252 | bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); |
| 253 | void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 254 | void btrfs_wait_nocow_writers(struct btrfs_block_group *bg); |
| 255 | void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, |
Josef Bacik | 676f1f7 | 2019-06-20 15:37:48 -0400 | [diff] [blame] | 256 | u64 num_bytes); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 257 | int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache); |
| 258 | int btrfs_cache_block_group(struct btrfs_block_group *cache, |
Josef Bacik | 676f1f7 | 2019-06-20 15:37:48 -0400 | [diff] [blame] | 259 | int load_cache_only); |
Josef Bacik | e3cb339 | 2019-06-20 15:37:50 -0400 | [diff] [blame] | 260 | void btrfs_put_caching_control(struct btrfs_caching_control *ctl); |
| 261 | struct btrfs_caching_control *btrfs_get_caching_control( |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 262 | struct btrfs_block_group *cache); |
| 263 | u64 add_new_free_space(struct btrfs_block_group *block_group, |
Josef Bacik | 9f21246 | 2019-08-06 16:43:19 +0200 | [diff] [blame] | 264 | u64 start, u64 end); |
Josef Bacik | e3e0520 | 2019-06-20 15:37:55 -0400 | [diff] [blame] | 265 | struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( |
| 266 | struct btrfs_fs_info *fs_info, |
| 267 | const u64 chunk_offset); |
| 268 | int btrfs_remove_block_group(struct btrfs_trans_handle *trans, |
| 269 | u64 group_start, struct extent_map *em); |
| 270 | void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 271 | void btrfs_mark_bg_unused(struct btrfs_block_group *bg); |
Johannes Thumshirn | 18bb8bb | 2021-04-19 16:41:02 +0900 | [diff] [blame] | 272 | void btrfs_reclaim_bgs_work(struct work_struct *work); |
| 273 | void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info); |
| 274 | void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg); |
Josef Bacik | 4358d963 | 2019-06-20 15:37:57 -0400 | [diff] [blame] | 275 | int btrfs_read_block_groups(struct btrfs_fs_info *info); |
Filipe Manana | 79bd371 | 2021-06-29 14:43:06 +0100 | [diff] [blame] | 276 | struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, |
| 277 | u64 bytes_used, u64 type, |
| 278 | u64 chunk_offset, u64 size); |
Josef Bacik | 4358d963 | 2019-06-20 15:37:57 -0400 | [diff] [blame] | 279 | void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); |
Qu Wenruo | b12de52 | 2019-11-15 10:09:00 +0800 | [diff] [blame] | 280 | int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, |
| 281 | bool do_chunk_alloc); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 282 | void btrfs_dec_block_group_ro(struct btrfs_block_group *cache); |
Josef Bacik | 77745c0 | 2019-06-20 15:38:00 -0400 | [diff] [blame] | 283 | int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans); |
| 284 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans); |
| 285 | int btrfs_setup_space_cache(struct btrfs_trans_handle *trans); |
Josef Bacik | ade4b51 | 2019-06-20 15:38:01 -0400 | [diff] [blame] | 286 | int btrfs_update_block_group(struct btrfs_trans_handle *trans, |
Anand Jain | 11b66fa | 2021-10-13 14:05:14 +0800 | [diff] [blame] | 287 | u64 bytenr, u64 num_bytes, bool alloc); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 288 | int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, |
Josef Bacik | ade4b51 | 2019-06-20 15:38:01 -0400 | [diff] [blame] | 289 | u64 ram_bytes, u64 num_bytes, int delalloc); |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 290 | void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, |
Josef Bacik | ade4b51 | 2019-06-20 15:38:01 -0400 | [diff] [blame] | 291 | u64 num_bytes, int delalloc); |
Josef Bacik | 07730d8 | 2019-06-20 15:38:04 -0400 | [diff] [blame] | 292 | int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, |
| 293 | enum btrfs_chunk_alloc_enum force); |
| 294 | int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type); |
| 295 | void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type); |
Filipe Manana | 2bb2e00 | 2021-10-13 10:12:49 +0100 | [diff] [blame] | 296 | void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans, |
| 297 | bool is_item_insertion); |
Josef Bacik | 878d7b6 | 2019-06-20 15:38:05 -0400 | [diff] [blame] | 298 | u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags); |
Josef Bacik | 3e43c27 | 2019-06-20 15:38:06 -0400 | [diff] [blame] | 299 | void btrfs_put_block_group_cache(struct btrfs_fs_info *info); |
| 300 | int btrfs_free_block_groups(struct btrfs_fs_info *info); |
Josef Bacik | e747853 | 2020-10-23 09:58:10 -0400 | [diff] [blame] | 301 | void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache, |
| 302 | struct btrfs_caching_control *caching_ctl); |
Naohiro Aota | 138082f | 2021-02-04 19:22:02 +0900 | [diff] [blame] | 303 | int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, |
| 304 | struct block_device *bdev, u64 physical, u64 **logical, |
| 305 | int *naddrs, int *stripe_len); |
Josef Bacik | 878d7b6 | 2019-06-20 15:38:05 -0400 | [diff] [blame] | 306 | |
| 307 | static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info) |
| 308 | { |
| 309 | return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA); |
| 310 | } |
| 311 | |
| 312 | static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info) |
| 313 | { |
| 314 | return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA); |
| 315 | } |
| 316 | |
| 317 | static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info) |
| 318 | { |
| 319 | return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); |
| 320 | } |
Josef Bacik | 676f1f7 | 2019-06-20 15:37:48 -0400 | [diff] [blame] | 321 | |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 322 | static inline int btrfs_block_group_done(struct btrfs_block_group *cache) |
Josef Bacik | 676f1f7 | 2019-06-20 15:37:48 -0400 | [diff] [blame] | 323 | { |
| 324 | smp_mb(); |
| 325 | return cache->cached == BTRFS_CACHE_FINISHED || |
| 326 | cache->cached == BTRFS_CACHE_ERROR; |
| 327 | } |
Josef Bacik | 2e405ad | 2019-06-20 15:37:45 -0400 | [diff] [blame] | 328 | |
Filipe Manana | 684b752 | 2020-05-08 11:01:59 +0100 | [diff] [blame] | 329 | void btrfs_freeze_block_group(struct btrfs_block_group *cache); |
| 330 | void btrfs_unfreeze_block_group(struct btrfs_block_group *cache); |
| 331 | |
Filipe Manana | 195a49e | 2021-02-05 12:55:37 +0000 | [diff] [blame] | 332 | bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg); |
| 333 | void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount); |
| 334 | |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 335 | #endif /* BTRFS_BLOCK_GROUP_H */ |