blob: 5878b7ce3b78ed10fab7e357eda42c49b709a6ef [file] [log] [blame]
Josef Bacikaac00232019-06-20 15:37:44 -04001/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_BLOCK_GROUP_H
4#define BTRFS_BLOCK_GROUP_H
5
David Sterba67b61ae2019-08-21 19:57:04 +02006#include "free-space-cache.h"
7
Josef Bacikaac00232019-06-20 15:37:44 -04008enum btrfs_disk_cache_state {
9 BTRFS_DC_WRITTEN,
10 BTRFS_DC_ERROR,
11 BTRFS_DC_CLEAR,
12 BTRFS_DC_SETUP,
13};
14
Josef Bacik07730d82019-06-20 15:38:04 -040015/*
Dennis Zhou2bee7eb2019-12-13 16:22:16 -080016 * This describes the state of the block_group for async discard. This is due
17 * to the two pass nature of it where extent discarding is prioritized over
18 * bitmap discarding. BTRFS_DISCARD_RESET_CURSOR is set when we are resetting
19 * between lists to prevent contention for discard state variables
20 * (eg. discard_cursor).
21 */
22enum btrfs_discard_state {
23 BTRFS_DISCARD_EXTENTS,
24 BTRFS_DISCARD_BITMAPS,
25 BTRFS_DISCARD_RESET_CURSOR,
26};
27
28/*
Josef Bacik07730d82019-06-20 15:38:04 -040029 * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to
30 * only allocate a chunk if we really need one.
31 *
32 * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few
33 * chunks already allocated. This is used as part of the clustering code to
34 * help make sure we have a good pool of storage to cluster in, without filling
35 * the FS with empty chunks
36 *
37 * CHUNK_ALLOC_FORCE means it must try to allocate one
38 */
39enum btrfs_chunk_alloc_enum {
40 CHUNK_ALLOC_NO_FORCE,
41 CHUNK_ALLOC_LIMITED,
42 CHUNK_ALLOC_FORCE,
43};
44
Josef Bacikaac00232019-06-20 15:37:44 -040045struct btrfs_caching_control {
46 struct list_head list;
47 struct mutex mutex;
48 wait_queue_head_t wait;
49 struct btrfs_work work;
David Sterba32da53862019-10-29 19:20:18 +010050 struct btrfs_block_group *block_group;
Josef Bacikaac00232019-06-20 15:37:44 -040051 u64 progress;
52 refcount_t count;
53};
54
55/* Once caching_thread() finds this much free space, it will wake up waiters. */
56#define CACHING_CTL_WAKE_UP SZ_2M
57
David Sterba32da53862019-10-29 19:20:18 +010058struct btrfs_block_group {
Josef Bacikaac00232019-06-20 15:37:44 -040059 struct btrfs_fs_info *fs_info;
60 struct inode *inode;
61 spinlock_t lock;
David Sterbab3470b52019-10-23 18:48:22 +020062 u64 start;
63 u64 length;
Josef Bacikaac00232019-06-20 15:37:44 -040064 u64 pinned;
65 u64 reserved;
David Sterbabf38be62019-10-23 18:48:11 +020066 u64 used;
Josef Bacikaac00232019-06-20 15:37:44 -040067 u64 delalloc_bytes;
68 u64 bytes_super;
69 u64 flags;
70 u64 cache_generation;
71
72 /*
73 * If the free space extent count exceeds this number, convert the block
74 * group to bitmaps.
75 */
76 u32 bitmap_high_thresh;
77
78 /*
79 * If the free space extent count drops below this number, convert the
80 * block group back to extents.
81 */
82 u32 bitmap_low_thresh;
83
84 /*
85 * It is just used for the delayed data space allocation because
86 * only the data space allocation and the relative metadata update
87 * can be done cross the transaction.
88 */
89 struct rw_semaphore data_rwsem;
90
91 /* For raid56, this is a full stripe, without parity */
92 unsigned long full_stripe_len;
93
94 unsigned int ro;
95 unsigned int iref:1;
96 unsigned int has_caching_ctl:1;
97 unsigned int removed:1;
Naohiro Aota78ce9fc2021-02-04 19:22:11 +090098 unsigned int to_copy:1;
Naohiro Aotaf7ef5282021-02-04 19:22:16 +090099 unsigned int relocating_repair:1;
Filipe Manana79bd3712021-06-29 14:43:06 +0100100 unsigned int chunk_item_inserted:1;
Naohiro Aotaafba2bc2021-08-19 21:19:17 +0900101 unsigned int zone_is_active:1;
Josef Bacikaac00232019-06-20 15:37:44 -0400102
103 int disk_cache_state;
104
105 /* Cache tracking stuff */
106 int cached;
107 struct btrfs_caching_control *caching_ctl;
108 u64 last_byte_to_unpin;
109
110 struct btrfs_space_info *space_info;
111
112 /* Free space cache stuff */
113 struct btrfs_free_space_ctl *free_space_ctl;
114
115 /* Block group cache stuff */
116 struct rb_node cache_node;
117
118 /* For block groups in the same raid type */
119 struct list_head list;
120
Josef Bacik48aaeeb2020-07-06 09:14:11 -0400121 refcount_t refs;
Josef Bacikaac00232019-06-20 15:37:44 -0400122
123 /*
124 * List of struct btrfs_free_clusters for this block group.
125 * Today it will only have one thing on it, but that may change
126 */
127 struct list_head cluster_list;
128
129 /* For delayed block group creation or deletion of empty block groups */
130 struct list_head bg_list;
131
132 /* For read-only block groups */
133 struct list_head ro_list;
134
Filipe Manana6b7304a2020-05-08 11:01:47 +0100135 /*
136 * When non-zero it means the block group's logical address and its
137 * device extents can not be reused for future block group allocations
138 * until the counter goes down to 0. This is to prevent them from being
139 * reused while some task is still using the block group after it was
140 * deleted - we want to make sure they can only be reused for new block
141 * groups after that task is done with the deleted block group.
142 */
143 atomic_t frozen;
144
Dennis Zhoub0643e52019-12-13 16:22:14 -0800145 /* For discard operations */
Dennis Zhoub0643e52019-12-13 16:22:14 -0800146 struct list_head discard_list;
147 int discard_index;
148 u64 discard_eligible_time;
Dennis Zhou2bee7eb2019-12-13 16:22:16 -0800149 u64 discard_cursor;
150 enum btrfs_discard_state discard_state;
Josef Bacikaac00232019-06-20 15:37:44 -0400151
152 /* For dirty block groups */
153 struct list_head dirty_list;
154 struct list_head io_list;
155
156 struct btrfs_io_ctl io_ctl;
157
158 /*
159 * Incremented when doing extent allocations and holding a read lock
160 * on the space_info's groups_sem semaphore.
161 * Decremented when an ordered extent that represents an IO against this
162 * block group's range is created (after it's added to its inode's
163 * root's list of ordered extents) or immediately after the allocation
164 * if it's a metadata extent or fallocate extent (for these cases we
165 * don't create ordered extents).
166 */
167 atomic_t reservations;
168
169 /*
170 * Incremented while holding the spinlock *lock* by a task checking if
171 * it can perform a nocow write (incremented if the value for the *ro*
172 * field is 0). Decremented by such tasks once they create an ordered
173 * extent or before that if some error happens before reaching that step.
174 * This is to prevent races between block group relocation and nocow
175 * writes through direct IO.
176 */
177 atomic_t nocow_writers;
178
179 /* Lock for free space tree operations. */
180 struct mutex free_space_lock;
181
182 /*
183 * Does the block group need to be added to the free space tree?
184 * Protected by free_space_lock.
185 */
186 int needs_free_space;
187
Johannes Thumshirn08f455592021-02-04 19:22:03 +0900188 /* Flag indicating this block group is placed on a sequential zone */
189 bool seq_zone;
190
Filipe Manana195a49e2021-02-05 12:55:37 +0000191 /*
192 * Number of extents in this block group used for swap files.
193 * All accesses protected by the spinlock 'lock'.
194 */
195 int swap_extents;
196
Josef Bacikaac00232019-06-20 15:37:44 -0400197 /* Record locked full stripes for RAID5/6 block group */
198 struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
Naohiro Aota08e11a32021-02-04 19:21:50 +0900199
200 /*
201 * Allocation offset for the block group to implement sequential
202 * allocation. This is used only on a zoned filesystem.
203 */
204 u64 alloc_offset;
Naohiro Aota169e0da2021-02-04 19:21:52 +0900205 u64 zone_unusable;
Naohiro Aota8eae5322021-08-19 21:19:08 +0900206 u64 zone_capacity;
Naohiro Aota0bc09ca2021-02-04 19:22:08 +0900207 u64 meta_write_pointer;
Naohiro Aotadafc340d2021-08-19 21:19:16 +0900208 struct map_lookup *physical_map;
Naohiro Aotaafba2bc2021-08-19 21:19:17 +0900209 struct list_head active_bg_list;
Josef Bacikaac00232019-06-20 15:37:44 -0400210};
211
Dennis Zhoub0643e52019-12-13 16:22:14 -0800212static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
213{
214 return (block_group->start + block_group->length);
215}
216
Dennis Zhou5cb07242020-01-02 16:26:40 -0500217static inline bool btrfs_is_block_group_data_only(
218 struct btrfs_block_group *block_group)
219{
220 /*
221 * In mixed mode the fragmentation is expected to be high, lowering the
222 * efficiency, so only proper data block groups are considered.
223 */
224 return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
225 !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA);
226}
227
Josef Bacikaac00232019-06-20 15:37:44 -0400228#ifdef CONFIG_BTRFS_DEBUG
229static inline int btrfs_should_fragment_free_space(
David Sterba32da53862019-10-29 19:20:18 +0100230 struct btrfs_block_group *block_group)
Josef Bacikaac00232019-06-20 15:37:44 -0400231{
232 struct btrfs_fs_info *fs_info = block_group->fs_info;
233
234 return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
235 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
236 (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
237 block_group->flags & BTRFS_BLOCK_GROUP_DATA);
238}
239#endif
240
David Sterba32da53862019-10-29 19:20:18 +0100241struct btrfs_block_group *btrfs_lookup_first_block_group(
Josef Bacik2e405ad2019-06-20 15:37:45 -0400242 struct btrfs_fs_info *info, u64 bytenr);
David Sterba32da53862019-10-29 19:20:18 +0100243struct btrfs_block_group *btrfs_lookup_block_group(
Josef Bacik2e405ad2019-06-20 15:37:45 -0400244 struct btrfs_fs_info *info, u64 bytenr);
David Sterba32da53862019-10-29 19:20:18 +0100245struct btrfs_block_group *btrfs_next_block_group(
246 struct btrfs_block_group *cache);
247void btrfs_get_block_group(struct btrfs_block_group *cache);
248void btrfs_put_block_group(struct btrfs_block_group *cache);
Josef Bacik3eeb3222019-06-20 15:37:47 -0400249void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
250 const u64 start);
David Sterba32da53862019-10-29 19:20:18 +0100251void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg);
Josef Bacik3eeb3222019-06-20 15:37:47 -0400252bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
253void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
David Sterba32da53862019-10-29 19:20:18 +0100254void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
255void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
Josef Bacik676f1f72019-06-20 15:37:48 -0400256 u64 num_bytes);
David Sterba32da53862019-10-29 19:20:18 +0100257int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
258int btrfs_cache_block_group(struct btrfs_block_group *cache,
Josef Bacik676f1f72019-06-20 15:37:48 -0400259 int load_cache_only);
Josef Bacike3cb3392019-06-20 15:37:50 -0400260void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
261struct btrfs_caching_control *btrfs_get_caching_control(
David Sterba32da53862019-10-29 19:20:18 +0100262 struct btrfs_block_group *cache);
263u64 add_new_free_space(struct btrfs_block_group *block_group,
Josef Bacik9f212462019-08-06 16:43:19 +0200264 u64 start, u64 end);
Josef Bacike3e05202019-06-20 15:37:55 -0400265struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
266 struct btrfs_fs_info *fs_info,
267 const u64 chunk_offset);
268int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
269 u64 group_start, struct extent_map *em);
270void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
David Sterba32da53862019-10-29 19:20:18 +0100271void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
Johannes Thumshirn18bb8bb2021-04-19 16:41:02 +0900272void btrfs_reclaim_bgs_work(struct work_struct *work);
273void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info);
274void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg);
Josef Bacik4358d9632019-06-20 15:37:57 -0400275int btrfs_read_block_groups(struct btrfs_fs_info *info);
Filipe Manana79bd3712021-06-29 14:43:06 +0100276struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
277 u64 bytes_used, u64 type,
278 u64 chunk_offset, u64 size);
Josef Bacik4358d9632019-06-20 15:37:57 -0400279void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
Qu Wenruob12de522019-11-15 10:09:00 +0800280int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
281 bool do_chunk_alloc);
David Sterba32da53862019-10-29 19:20:18 +0100282void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
Josef Bacik77745c02019-06-20 15:38:00 -0400283int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
284int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
285int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
Josef Bacikade4b512019-06-20 15:38:01 -0400286int btrfs_update_block_group(struct btrfs_trans_handle *trans,
Anand Jain11b66fa2021-10-13 14:05:14 +0800287 u64 bytenr, u64 num_bytes, bool alloc);
David Sterba32da53862019-10-29 19:20:18 +0100288int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
Josef Bacikade4b512019-06-20 15:38:01 -0400289 u64 ram_bytes, u64 num_bytes, int delalloc);
David Sterba32da53862019-10-29 19:20:18 +0100290void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
Josef Bacikade4b512019-06-20 15:38:01 -0400291 u64 num_bytes, int delalloc);
Josef Bacik07730d82019-06-20 15:38:04 -0400292int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
293 enum btrfs_chunk_alloc_enum force);
294int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
295void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
Filipe Manana2bb2e002021-10-13 10:12:49 +0100296void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans,
297 bool is_item_insertion);
Josef Bacik878d7b62019-06-20 15:38:05 -0400298u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
Josef Bacik3e43c272019-06-20 15:38:06 -0400299void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
300int btrfs_free_block_groups(struct btrfs_fs_info *info);
Josef Bacike7478532020-10-23 09:58:10 -0400301void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache,
302 struct btrfs_caching_control *caching_ctl);
Naohiro Aota138082f2021-02-04 19:22:02 +0900303int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
304 struct block_device *bdev, u64 physical, u64 **logical,
305 int *naddrs, int *stripe_len);
Josef Bacik878d7b62019-06-20 15:38:05 -0400306
307static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
308{
309 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
310}
311
312static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
313{
314 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
315}
316
317static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
318{
319 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
320}
Josef Bacik676f1f72019-06-20 15:37:48 -0400321
David Sterba32da53862019-10-29 19:20:18 +0100322static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
Josef Bacik676f1f72019-06-20 15:37:48 -0400323{
324 smp_mb();
325 return cache->cached == BTRFS_CACHE_FINISHED ||
326 cache->cached == BTRFS_CACHE_ERROR;
327}
Josef Bacik2e405ad2019-06-20 15:37:45 -0400328
Filipe Manana684b7522020-05-08 11:01:59 +0100329void btrfs_freeze_block_group(struct btrfs_block_group *cache);
330void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
331
Filipe Manana195a49e2021-02-05 12:55:37 +0000332bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg);
333void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount);
334
Josef Bacikaac00232019-06-20 15:37:44 -0400335#endif /* BTRFS_BLOCK_GROUP_H */