Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | |
| 3 | #ifndef BTRFS_EXTENT_IO_TREE_H |
| 4 | #define BTRFS_EXTENT_IO_TREE_H |
| 5 | |
| 6 | struct extent_changeset; |
Josef Bacik | b3f167a | 2019-09-23 10:05:21 -0400 | [diff] [blame] | 7 | struct io_failure_record; |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 8 | |
| 9 | /* Bits for the extent state */ |
| 10 | #define EXTENT_DIRTY (1U << 0) |
| 11 | #define EXTENT_UPTODATE (1U << 1) |
| 12 | #define EXTENT_LOCKED (1U << 2) |
| 13 | #define EXTENT_NEW (1U << 3) |
| 14 | #define EXTENT_DELALLOC (1U << 4) |
| 15 | #define EXTENT_DEFRAG (1U << 5) |
| 16 | #define EXTENT_BOUNDARY (1U << 6) |
| 17 | #define EXTENT_NODATASUM (1U << 7) |
| 18 | #define EXTENT_CLEAR_META_RESV (1U << 8) |
| 19 | #define EXTENT_NEED_WAIT (1U << 9) |
| 20 | #define EXTENT_DAMAGED (1U << 10) |
| 21 | #define EXTENT_NORESERVE (1U << 11) |
| 22 | #define EXTENT_QGROUP_RESERVED (1U << 12) |
| 23 | #define EXTENT_CLEAR_DATA_RESV (1U << 13) |
Filipe Manana | 2766ff6 | 2020-11-04 11:07:34 +0000 | [diff] [blame] | 24 | /* |
| 25 | * Must be cleared only during ordered extent completion or on error paths if we |
| 26 | * did not manage to submit bios and create the ordered extents for the range. |
| 27 | * Should not be cleared during page release and page invalidation (if there is |
| 28 | * an ordered extent in flight), that is left for the ordered extent completion. |
| 29 | */ |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 30 | #define EXTENT_DELALLOC_NEW (1U << 14) |
Filipe Manana | 2766ff6 | 2020-11-04 11:07:34 +0000 | [diff] [blame] | 31 | /* |
| 32 | * When an ordered extent successfully completes for a region marked as a new |
| 33 | * delalloc range, use this flag when clearing a new delalloc range to indicate |
| 34 | * that the VFS' inode number of bytes should be incremented and the inode's new |
| 35 | * delalloc bytes decremented, in an atomic way to prevent races with stat(2). |
| 36 | */ |
| 37 | #define EXTENT_ADD_INODE_BYTES (1U << 15) |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 38 | #define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \ |
| 39 | EXTENT_CLEAR_DATA_RESV) |
Filipe Manana | 2766ff6 | 2020-11-04 11:07:34 +0000 | [diff] [blame] | 40 | #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | \ |
| 41 | EXTENT_ADD_INODE_BYTES) |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 42 | |
| 43 | /* |
| 44 | * Redefined bits above which are used only in the device allocation tree, |
| 45 | * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV |
| 46 | * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit |
| 47 | * manipulation functions |
| 48 | */ |
| 49 | #define CHUNK_ALLOCATED EXTENT_DIRTY |
| 50 | #define CHUNK_TRIMMED EXTENT_DEFRAG |
Qu Wenruo | c57dd1f | 2020-07-31 19:29:11 +0800 | [diff] [blame] | 51 | #define CHUNK_STATE_MASK (CHUNK_ALLOCATED | \ |
| 52 | CHUNK_TRIMMED) |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 53 | |
| 54 | enum { |
Nikolay Borisov | fe119a6 | 2020-01-20 16:09:18 +0200 | [diff] [blame] | 55 | IO_TREE_FS_PINNED_EXTENTS, |
| 56 | IO_TREE_FS_EXCLUDED_EXTENTS, |
Qu Wenruo | 2c53a14 | 2020-09-15 13:35:27 +0800 | [diff] [blame] | 57 | IO_TREE_BTREE_INODE_IO, |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 58 | IO_TREE_INODE_IO, |
| 59 | IO_TREE_INODE_IO_FAILURE, |
| 60 | IO_TREE_RELOC_BLOCKS, |
| 61 | IO_TREE_TRANS_DIRTY_PAGES, |
| 62 | IO_TREE_ROOT_DIRTY_LOG_PAGES, |
Josef Bacik | 41a2ee7 | 2020-01-17 09:02:21 -0500 | [diff] [blame] | 63 | IO_TREE_INODE_FILE_EXTENT, |
Filipe Manana | e289f03 | 2020-05-18 12:14:50 +0100 | [diff] [blame] | 64 | IO_TREE_LOG_CSUM_RANGE, |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 65 | IO_TREE_SELFTEST, |
Qu Wenruo | 154f7cb | 2020-08-20 15:42:46 +0800 | [diff] [blame] | 66 | IO_TREE_DEVICE_ALLOC_STATE, |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 67 | }; |
| 68 | |
| 69 | struct extent_io_tree { |
| 70 | struct rb_root state; |
| 71 | struct btrfs_fs_info *fs_info; |
| 72 | void *private_data; |
| 73 | u64 dirty_bytes; |
| 74 | bool track_uptodate; |
| 75 | |
| 76 | /* Who owns this io tree, should be one of IO_TREE_* */ |
| 77 | u8 owner; |
| 78 | |
| 79 | spinlock_t lock; |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 80 | }; |
| 81 | |
| 82 | struct extent_state { |
| 83 | u64 start; |
| 84 | u64 end; /* inclusive */ |
| 85 | struct rb_node rb_node; |
| 86 | |
| 87 | /* ADD NEW ELEMENTS AFTER THIS */ |
| 88 | wait_queue_head_t wq; |
| 89 | refcount_t refs; |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 90 | u32 state; |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 91 | |
| 92 | struct io_failure_record *failrec; |
| 93 | |
| 94 | #ifdef CONFIG_BTRFS_DEBUG |
| 95 | struct list_head leak_list; |
| 96 | #endif |
| 97 | }; |
| 98 | |
| 99 | int __init extent_state_cache_init(void); |
| 100 | void __cold extent_state_cache_exit(void); |
| 101 | |
| 102 | void extent_io_tree_init(struct btrfs_fs_info *fs_info, |
| 103 | struct extent_io_tree *tree, unsigned int owner, |
| 104 | void *private_data); |
| 105 | void extent_io_tree_release(struct extent_io_tree *tree); |
| 106 | |
| 107 | int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
| 108 | struct extent_state **cached); |
| 109 | |
| 110 | static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) |
| 111 | { |
| 112 | return lock_extent_bits(tree, start, end, NULL); |
| 113 | } |
| 114 | |
| 115 | int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end); |
| 116 | |
| 117 | int __init extent_io_init(void); |
| 118 | void __cold extent_io_exit(void); |
| 119 | |
| 120 | u64 count_range_bits(struct extent_io_tree *tree, |
| 121 | u64 *start, u64 search_end, |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 122 | u64 max_bytes, u32 bits, int contig); |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 123 | |
| 124 | void free_extent_state(struct extent_state *state); |
| 125 | int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 126 | u32 bits, int filled, struct extent_state *cached_state); |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 127 | int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 128 | u32 bits, struct extent_changeset *changeset); |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 129 | int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 130 | u32 bits, int wake, int delete, |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 131 | struct extent_state **cached); |
| 132 | int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 133 | u32 bits, int wake, int delete, |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 134 | struct extent_state **cached, gfp_t mask, |
| 135 | struct extent_changeset *changeset); |
| 136 | |
| 137 | static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) |
| 138 | { |
| 139 | return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL); |
| 140 | } |
| 141 | |
| 142 | static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start, |
| 143 | u64 end, struct extent_state **cached) |
| 144 | { |
| 145 | return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, |
| 146 | GFP_NOFS, NULL); |
| 147 | } |
| 148 | |
| 149 | static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree, |
| 150 | u64 start, u64 end, struct extent_state **cached) |
| 151 | { |
| 152 | return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, |
| 153 | GFP_ATOMIC, NULL); |
| 154 | } |
| 155 | |
| 156 | static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 157 | u64 end, u32 bits) |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 158 | { |
| 159 | int wake = 0; |
| 160 | |
| 161 | if (bits & EXTENT_LOCKED) |
| 162 | wake = 1; |
| 163 | |
| 164 | return clear_extent_bit(tree, start, end, bits, wake, 0, NULL); |
| 165 | } |
| 166 | |
| 167 | int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 168 | u32 bits, struct extent_changeset *changeset); |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 169 | int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 170 | u32 bits, unsigned exclusive_bits, u64 *failed_start, |
Nikolay Borisov | 1cab5e7 | 2020-11-05 11:08:00 +0200 | [diff] [blame] | 171 | struct extent_state **cached_state, gfp_t mask, |
| 172 | struct extent_changeset *changeset); |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 173 | int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end, |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 174 | u32 bits); |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 175 | |
| 176 | static inline int set_extent_bits(struct extent_io_tree *tree, u64 start, |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 177 | u64 end, u32 bits) |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 178 | { |
Nikolay Borisov | 1cab5e7 | 2020-11-05 11:08:00 +0200 | [diff] [blame] | 179 | return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS, |
| 180 | NULL); |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, |
| 184 | u64 end, struct extent_state **cached_state) |
| 185 | { |
| 186 | return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, |
| 187 | cached_state, GFP_NOFS, NULL); |
| 188 | } |
| 189 | |
| 190 | static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start, |
| 191 | u64 end, gfp_t mask) |
| 192 | { |
Nikolay Borisov | 1cab5e7 | 2020-11-05 11:08:00 +0200 | [diff] [blame] | 193 | return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, NULL, |
| 194 | mask, NULL); |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, |
| 198 | u64 end, struct extent_state **cached) |
| 199 | { |
| 200 | return clear_extent_bit(tree, start, end, |
| 201 | EXTENT_DIRTY | EXTENT_DELALLOC | |
| 202 | EXTENT_DO_ACCOUNTING, 0, 0, cached); |
| 203 | } |
| 204 | |
| 205 | int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 206 | u32 bits, u32 clear_bits, |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 207 | struct extent_state **cached_state); |
| 208 | |
| 209 | static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start, |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 210 | u64 end, u32 extra_bits, |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 211 | struct extent_state **cached_state) |
| 212 | { |
| 213 | return set_extent_bit(tree, start, end, |
| 214 | EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits, |
Nikolay Borisov | 1cab5e7 | 2020-11-05 11:08:00 +0200 | [diff] [blame] | 215 | 0, NULL, cached_state, GFP_NOFS, NULL); |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 216 | } |
| 217 | |
| 218 | static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start, |
| 219 | u64 end, struct extent_state **cached_state) |
| 220 | { |
| 221 | return set_extent_bit(tree, start, end, |
| 222 | EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG, |
Nikolay Borisov | 1cab5e7 | 2020-11-05 11:08:00 +0200 | [diff] [blame] | 223 | 0, NULL, cached_state, GFP_NOFS, NULL); |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 224 | } |
| 225 | |
| 226 | static inline int set_extent_new(struct extent_io_tree *tree, u64 start, |
| 227 | u64 end) |
| 228 | { |
Nikolay Borisov | 1cab5e7 | 2020-11-05 11:08:00 +0200 | [diff] [blame] | 229 | return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, NULL, |
| 230 | GFP_NOFS, NULL); |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 231 | } |
| 232 | |
| 233 | static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start, |
| 234 | u64 end, struct extent_state **cached_state, gfp_t mask) |
| 235 | { |
Nikolay Borisov | 1cab5e7 | 2020-11-05 11:08:00 +0200 | [diff] [blame] | 236 | return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, |
| 237 | cached_state, mask, NULL); |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 238 | } |
| 239 | |
| 240 | int find_first_extent_bit(struct extent_io_tree *tree, u64 start, |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 241 | u64 *start_ret, u64 *end_ret, u32 bits, |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 242 | struct extent_state **cached_state); |
| 243 | void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 244 | u64 *start_ret, u64 *end_ret, u32 bits); |
Josef Bacik | 41a2ee7 | 2020-01-17 09:02:21 -0500 | [diff] [blame] | 245 | int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start, |
Qu Wenruo | f97e27e | 2020-11-13 20:51:40 +0800 | [diff] [blame] | 246 | u64 *start_ret, u64 *end_ret, u32 bits); |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 247 | int extent_invalidatepage(struct extent_io_tree *tree, |
| 248 | struct page *page, unsigned long offset); |
Josef Bacik | 083e75e | 2019-09-23 10:05:20 -0400 | [diff] [blame] | 249 | bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, |
| 250 | u64 *end, u64 max_bytes, |
| 251 | struct extent_state **cached_state); |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 252 | |
Josef Bacik | b3f167a | 2019-09-23 10:05:21 -0400 | [diff] [blame] | 253 | /* This should be reworked in the future and put elsewhere. */ |
Nikolay Borisov | 2279a27 | 2020-07-02 15:23:28 +0300 | [diff] [blame] | 254 | struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start); |
Josef Bacik | b3f167a | 2019-09-23 10:05:21 -0400 | [diff] [blame] | 255 | int set_state_failrec(struct extent_io_tree *tree, u64 start, |
| 256 | struct io_failure_record *failrec); |
| 257 | void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, |
| 258 | u64 end); |
Josef Bacik | b3f167a | 2019-09-23 10:05:21 -0400 | [diff] [blame] | 259 | int free_io_failure(struct extent_io_tree *failure_tree, |
| 260 | struct extent_io_tree *io_tree, |
| 261 | struct io_failure_record *rec); |
| 262 | int clean_io_failure(struct btrfs_fs_info *fs_info, |
| 263 | struct extent_io_tree *failure_tree, |
| 264 | struct extent_io_tree *io_tree, u64 start, |
| 265 | struct page *page, u64 ino, unsigned int pg_offset); |
| 266 | |
Josef Bacik | 9c7d3a5 | 2019-09-23 10:05:19 -0400 | [diff] [blame] | 267 | #endif /* BTRFS_EXTENT_IO_TREE_H */ |