blob: 0399cf8e3c32c5c3a5e5141bd07cea93e2abcaa6 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
David Sterba9888c342018-04-03 19:16:55 +02002
3#ifndef BTRFS_EXTENT_IO_H
4#define BTRFS_EXTENT_IO_H
Chris Masond1310b22008-01-24 16:13:08 -05005
6#include <linux/rbtree.h>
Elena Reshetovab7ac31b2017-03-03 10:55:19 +02007#include <linux/refcount.h>
Christoph Hellwig10c5db22020-05-23 09:30:11 +02008#include <linux/fiemap.h>
Qu Wenruodeb67892020-12-02 14:48:01 +08009#include <linux/btrfs_tree.h>
Qu Wenruoac467772015-10-12 12:08:16 +080010#include "ulist.h"
Chris Masond1310b22008-01-24 16:13:08 -050011
Li Zefan261507a02010-12-17 14:21:50 +080012/*
13 * flags for bio submission. The high bits indicate the compression
14 * type for this bio
15 */
Chris Masonc8b97812008-10-29 14:49:59 -040016#define EXTENT_BIO_COMPRESSED 1
Li Zefan261507a02010-12-17 14:21:50 +080017#define EXTENT_BIO_FLAG_SHIFT 16
Chris Masonc8b97812008-10-29 14:49:59 -040018
David Sterba80cb3832018-11-27 15:03:20 +010019enum {
20 EXTENT_BUFFER_UPTODATE,
21 EXTENT_BUFFER_DIRTY,
22 EXTENT_BUFFER_CORRUPT,
23 /* this got triggered by readahead */
24 EXTENT_BUFFER_READAHEAD,
25 EXTENT_BUFFER_TREE_REF,
26 EXTENT_BUFFER_STALE,
27 EXTENT_BUFFER_WRITEBACK,
28 /* read IO error */
29 EXTENT_BUFFER_READ_ERR,
30 EXTENT_BUFFER_UNMAPPED,
31 EXTENT_BUFFER_IN_TREE,
32 /* write IO error */
33 EXTENT_BUFFER_WRITE_ERR,
Naohiro Aotad35751562021-02-04 19:21:54 +090034 EXTENT_BUFFER_NO_CHECK,
Naohiro Aotabe1a1d72021-08-19 21:19:23 +090035 EXTENT_BUFFER_ZONE_FINISH,
David Sterba80cb3832018-11-27 15:03:20 +010036};
Chris Masonb4ce94d2009-02-04 09:25:08 -050037
Liu Boda2c7002017-02-10 16:41:05 +010038/* these are flags for __process_pages_contig */
Josef Bacikc2790a22013-07-29 11:20:47 -040039#define PAGE_UNLOCK (1 << 0)
Qu Wenruo6869b0a2021-01-26 16:33:45 +080040/* Page starts writeback, clear dirty bit and set writeback bit */
41#define PAGE_START_WRITEBACK (1 << 1)
42#define PAGE_END_WRITEBACK (1 << 2)
Qu Wenruof57ad932021-04-07 19:22:13 +080043#define PAGE_SET_ORDERED (1 << 3)
Qu Wenruo6869b0a2021-01-26 16:33:45 +080044#define PAGE_SET_ERROR (1 << 4)
45#define PAGE_LOCK (1 << 5)
Chris Masona791e352009-10-08 11:27:10 -040046
Chris Masond1310b22008-01-24 16:13:08 -050047/*
48 * page->private values. Every page that is controlled by the extent
49 * map has page->private set to one.
50 */
51#define EXTENT_PAGE_PRIVATE 1
Chris Masond1310b22008-01-24 16:13:08 -050052
Omar Sandoval2fe1d552016-09-22 17:24:20 -070053/*
54 * The extent buffer bitmap operations are done with byte granularity instead of
55 * word granularity for two reasons:
56 * 1. The bitmaps must be little-endian on disk.
57 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
58 * single word in a bitmap may straddle two pages in the extent buffer.
59 */
60#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
61#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
62#define BITMAP_FIRST_BYTE_MASK(start) \
63 ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
64#define BITMAP_LAST_BYTE_MASK(nbits) \
65 (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
66
Josef Bacikea466792012-03-26 21:57:36 -040067struct btrfs_root;
Nikolay Borisov6fc0ef62017-02-20 13:51:03 +020068struct btrfs_inode;
Miao Xiefacc8a222013-07-25 19:22:34 +080069struct btrfs_io_bio;
Wan Jiabing183ebab2021-04-01 16:03:39 +080070struct btrfs_fs_info;
David Sterba47dc1962016-02-11 13:24:13 +010071struct io_failure_record;
Josef Bacik9c7d3a52019-09-23 10:05:19 -040072struct extent_io_tree;
David Sterbaa7587812017-06-23 03:05:23 +020073
Omar Sandoval77d5d682020-04-16 14:46:25 -070074typedef blk_status_t (submit_bio_hook_t)(struct inode *inode, struct bio *bio,
75 int mirror_num,
76 unsigned long bio_flags);
77
Qu Wenruo8896a082020-10-21 14:24:53 +080078typedef blk_status_t (extent_submit_bio_start_t)(struct inode *inode,
Qu Wenruo1941b642020-12-02 14:47:57 +080079 struct bio *bio, u64 dio_file_offset);
David Sterbaa7587812017-06-23 03:05:23 +020080
Qu Wenruodeb67892020-12-02 14:48:01 +080081#define INLINE_EXTENT_BUFFER_PAGES (BTRFS_MAX_METADATA_BLOCKSIZE / PAGE_SIZE)
Chris Masond1310b22008-01-24 16:13:08 -050082struct extent_buffer {
83 u64 start;
84 unsigned long len;
Chris Masonb4ce94d2009-02-04 09:25:08 -050085 unsigned long bflags;
Josef Bacikf28491e2013-12-16 13:24:27 -050086 struct btrfs_fs_info *fs_info;
Josef Bacik3083ee22012-03-09 16:01:49 -050087 spinlock_t refs_lock;
Chris Mason727011e2010-08-06 13:21:20 -040088 atomic_t refs;
Josef Bacik0b32f4b2012-03-13 09:38:00 -040089 atomic_t io_pages;
Josef Bacik5cf1ab52012-04-16 09:42:26 -040090 int read_mirror;
Miao Xie19fe0a82010-10-26 20:57:29 -040091 struct rcu_head rcu_head;
Arne Jansen5b25f702011-09-13 10:55:48 +020092 pid_t lock_owner;
Filipe Manana656f30d2014-09-26 12:25:56 +010093 /* >= 0 if eb belongs to a log tree, -1 otherwise */
David Sterbadc51616482020-10-29 15:33:45 +010094 s8 log_index;
95
96 struct rw_semaphore lock;
Chris Masonb4ce94d2009-02-04 09:25:08 -050097
David Sterbab8dae312013-02-28 14:54:18 +000098 struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
Naohiro Aotad35751562021-02-04 19:21:54 +090099 struct list_head release_list;
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000100#ifdef CONFIG_BTRFS_DEBUG
101 struct list_head leak_list;
102#endif
Chris Masond1310b22008-01-24 16:13:08 -0500103};
104
Qu Wenruoac467772015-10-12 12:08:16 +0800105/*
Qu Wenruo390ed292021-04-14 16:42:15 +0800106 * Structure to record info about the bio being assembled, and other info like
107 * how many bytes are there before stripe/ordered extent boundary.
108 */
109struct btrfs_bio_ctrl {
110 struct bio *bio;
111 unsigned long bio_flags;
112 u32 len_to_stripe_boundary;
113 u32 len_to_oe_boundary;
114};
115
116/*
Qu Wenruoac467772015-10-12 12:08:16 +0800117 * Structure to record how many bytes and which ranges are set/cleared
118 */
119struct extent_changeset {
120 /* How many bytes are set/cleared in this operation */
Qu Wenruo7bc329c2017-02-27 15:10:36 +0800121 unsigned int bytes_changed;
Qu Wenruoac467772015-10-12 12:08:16 +0800122
123 /* Changed ranges */
David Sterba53d32352017-02-13 13:42:29 +0100124 struct ulist range_changed;
Qu Wenruoac467772015-10-12 12:08:16 +0800125};
126
Qu Wenruo364ecf32017-02-27 15:10:38 +0800127static inline void extent_changeset_init(struct extent_changeset *changeset)
128{
129 changeset->bytes_changed = 0;
130 ulist_init(&changeset->range_changed);
131}
132
133static inline struct extent_changeset *extent_changeset_alloc(void)
134{
135 struct extent_changeset *ret;
136
137 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
138 if (!ret)
139 return NULL;
140
141 extent_changeset_init(ret);
142 return ret;
143}
144
145static inline void extent_changeset_release(struct extent_changeset *changeset)
146{
147 if (!changeset)
148 return;
149 changeset->bytes_changed = 0;
150 ulist_release(&changeset->range_changed);
151}
152
153static inline void extent_changeset_free(struct extent_changeset *changeset)
154{
155 if (!changeset)
156 return;
157 extent_changeset_release(changeset);
158 kfree(changeset);
159}
160
Li Zefan261507a02010-12-17 14:21:50 +0800161static inline void extent_set_compress_type(unsigned long *bio_flags,
162 int compress_type)
163{
164 *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
165}
166
167static inline int extent_compress_type(unsigned long bio_flags)
168{
169 return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
170}
171
Chris Masond1310b22008-01-24 16:13:08 -0500172struct extent_map_tree;
173
Nikolay Borisovfc4f21b12017-02-20 13:51:06 +0200174typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
Omar Sandoval39b07b52019-12-02 17:34:23 -0800175 struct page *page, size_t pg_offset,
176 u64 start, u64 len);
Chris Masond1310b22008-01-24 16:13:08 -0500177
Nikolay Borisov477a30b2018-04-19 10:46:34 +0300178int try_release_extent_mapping(struct page *page, gfp_t mask);
David Sterbaf7a52a42013-04-26 14:56:29 +0000179int try_release_extent_buffer(struct page *page);
David Sterbacd716d82015-12-03 14:41:30 +0100180
Nikolay Borisovc1be9c12020-09-14 12:37:08 +0300181int __must_check submit_one_bio(struct bio *bio, int mirror_num,
182 unsigned long bio_flags);
Nikolay Borisov0f208812020-09-14 14:39:16 +0300183int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
Qu Wenruo390ed292021-04-14 16:42:15 +0800184 struct btrfs_bio_ctrl *bio_ctrl,
Nikolay Borisov0f208812020-09-14 14:39:16 +0300185 unsigned int read_flags, u64 *prev_em_start);
Nikolay Borisov0a9b0e52017-12-08 15:55:59 +0200186int extent_write_full_page(struct page *page, struct writeback_control *wbc);
Qu Wenruo2bd0fc92021-09-27 15:21:58 +0800187int extent_write_locked_range(struct inode *inode, u64 start, u64 end);
Nikolay Borisov8ae225a2018-04-19 10:46:38 +0300188int extent_writepages(struct address_space *mapping,
Chris Masond1310b22008-01-24 16:13:08 -0500189 struct writeback_control *wbc);
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400190int btree_write_cache_pages(struct address_space *mapping,
191 struct writeback_control *wbc);
Matthew Wilcox (Oracle)ba206a02020-06-01 21:47:05 -0700192void extent_readahead(struct readahead_control *rac);
Nikolay Borisovfacee0a2020-08-31 14:42:49 +0300193int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
David Sterbabab16e22020-06-23 20:56:12 +0200194 u64 start, u64 len);
Qu Wenruo32443de2021-01-26 16:34:00 +0800195int set_page_extent_mapped(struct page *page);
196void clear_page_extent_mapped(struct page *page);
Chris Masond1310b22008-01-24 16:13:08 -0500197
Josef Bacikf28491e2013-12-16 13:24:27 -0500198struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
Josef Bacik3fbaf252020-11-05 10:45:20 -0500199 u64 start, u64 owner_root, int level);
Omar Sandoval0f331222015-09-29 20:50:31 -0700200struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
201 u64 start, unsigned long len);
David Sterba3f556f72014-06-15 03:20:26 +0200202struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -0400203 u64 start);
David Sterba2b489662020-04-29 03:04:10 +0200204struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src);
Josef Bacikf28491e2013-12-16 13:24:27 -0500205struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
Chandra Seetharaman452c75c2013-10-07 10:45:25 -0500206 u64 start);
Chris Masond1310b22008-01-24 16:13:08 -0500207void free_extent_buffer(struct extent_buffer *eb);
Josef Bacik3083ee22012-03-09 16:01:49 -0500208void free_extent_buffer_stale(struct extent_buffer *eb);
Arne Jansenbb82ab82011-06-10 14:06:53 +0200209#define WAIT_NONE 0
210#define WAIT_COMPLETE 1
211#define WAIT_PAGE_LOCK 2
Nikolay Borisovc2ccfbc2019-04-10 17:24:40 +0300212int read_extent_buffer_pages(struct extent_buffer *eb, int wait,
David Sterba6af49db2017-06-23 04:09:57 +0200213 int mirror_num);
Josef Bacikfd8b2b62013-04-24 16:41:19 -0400214void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
Josef Bacikbfb484d2020-11-05 10:45:09 -0500215void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
Josef Bacik3fbaf252020-11-05 10:45:20 -0500216 u64 bytenr, u64 owner_root, u64 gen, int level);
Josef Bacikbfb484d2020-11-05 10:45:09 -0500217void btrfs_readahead_node_child(struct extent_buffer *node, int slot);
Robin Dong479ed9a2012-09-29 02:07:47 -0600218
David Sterbacc5e31a2018-03-01 18:20:27 +0100219static inline int num_extent_pages(const struct extent_buffer *eb)
Robin Dong479ed9a2012-09-29 02:07:47 -0600220{
Qu Wenruo4a3dc932020-12-02 14:48:03 +0800221 /*
222 * For sectorsize == PAGE_SIZE case, since nodesize is always aligned to
223 * sectorsize, it's just eb->len >> PAGE_SHIFT.
224 *
225 * For sectorsize < PAGE_SIZE case, we could have nodesize < PAGE_SIZE,
226 * thus have to ensure we get at least one page.
227 */
228 return (eb->len >> PAGE_SHIFT) ?: 1;
Robin Dong479ed9a2012-09-29 02:07:47 -0600229}
230
David Sterba2b489662020-04-29 03:04:10 +0200231static inline int extent_buffer_uptodate(const struct extent_buffer *eb)
Anand Jainba020492018-02-13 12:35:44 +0800232{
233 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
234}
235
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600236int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
237 unsigned long start, unsigned long len);
238void read_extent_buffer(const struct extent_buffer *eb, void *dst,
Chris Masond1310b22008-01-24 16:13:08 -0500239 unsigned long start,
240 unsigned long len);
Josef Bacika48b73e2020-08-10 11:42:27 -0400241int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
242 void __user *dst, unsigned long start,
243 unsigned long len);
David Sterba2b489662020-04-29 03:04:10 +0200244void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *src);
245void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
David Sterbaf157bf72016-11-09 17:43:38 +0100246 const void *src);
David Sterba2b489662020-04-29 03:04:10 +0200247void write_extent_buffer(const struct extent_buffer *eb, const void *src,
Chris Masond1310b22008-01-24 16:13:08 -0500248 unsigned long start, unsigned long len);
David Sterba2b489662020-04-29 03:04:10 +0200249void copy_extent_buffer_full(const struct extent_buffer *dst,
250 const struct extent_buffer *src);
251void copy_extent_buffer(const struct extent_buffer *dst,
252 const struct extent_buffer *src,
Chris Masond1310b22008-01-24 16:13:08 -0500253 unsigned long dst_offset, unsigned long src_offset,
254 unsigned long len);
David Sterba2b489662020-04-29 03:04:10 +0200255void memcpy_extent_buffer(const struct extent_buffer *dst,
256 unsigned long dst_offset, unsigned long src_offset,
257 unsigned long len);
258void memmove_extent_buffer(const struct extent_buffer *dst,
259 unsigned long dst_offset, unsigned long src_offset,
David Sterbab159fa22016-11-08 18:09:03 +0100260 unsigned long len);
David Sterba2b489662020-04-29 03:04:10 +0200261void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
262 unsigned long len);
263int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -0700264 unsigned long pos);
David Sterba2b489662020-04-29 03:04:10 +0200265void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -0700266 unsigned long pos, unsigned long len);
David Sterba2b489662020-04-29 03:04:10 +0200267void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
268 unsigned long start, unsigned long pos,
269 unsigned long len);
270void clear_extent_buffer_dirty(const struct extent_buffer *eb);
Liu Boabb57ef2018-09-14 01:44:42 +0800271bool set_extent_buffer_dirty(struct extent_buffer *eb);
David Sterba09c25a82015-12-03 13:08:59 +0100272void set_extent_buffer_uptodate(struct extent_buffer *eb);
David Sterba69ba3922015-12-03 13:08:59 +0100273void clear_extent_buffer_uptodate(struct extent_buffer *eb);
David Sterba2b489662020-04-29 03:04:10 +0200274int extent_buffer_under_io(const struct extent_buffer *eb);
David Sterbabd1fa4f2015-12-03 13:08:59 +0100275void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
David Sterbaf6311572015-12-03 13:08:59 +0100276void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
Nikolay Borisovad7ff172020-06-03 08:55:06 +0300277void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
Nikolay Borisov74e91942019-07-17 16:18:16 +0300278 struct page *locked_page,
Qu Wenruof97e27e2020-11-13 20:51:40 +0800279 u32 bits_to_clear, unsigned long page_ops);
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800280struct bio *btrfs_bio_alloc(unsigned int nr_iovecs);
David Sterba8b6c1d52017-06-02 17:48:13 +0200281struct bio *btrfs_bio_clone(struct bio *bio);
Chaitanya Kulkarni21dda652021-07-21 21:43:33 +0900282struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +0200283
David Sterbab5227c02015-12-03 13:08:59 +0100284void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
David Sterba2b489662020-04-29 03:04:10 +0200285int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num);
Miao Xie2fe63032014-09-12 18:43:59 +0800286
287/*
288 * When IO fails, either with EIO or csum verification fails, we
289 * try other mirrors that might have a good copy of the data. This
290 * io_failure_record is used to record state as we go through all the
Qu Wenruo12458352021-05-03 10:08:56 +0800291 * mirrors. If another mirror has good data, the sector is set up to date
Miao Xie2fe63032014-09-12 18:43:59 +0800292 * and things continue. If a good mirror can't be found, the original
293 * bio end_io callback is called to indicate things have failed.
294 */
295struct io_failure_record {
296 struct page *page;
297 u64 start;
298 u64 len;
299 u64 logical;
300 unsigned long bio_flags;
301 int this_mirror;
302 int failed_mirror;
Miao Xie2fe63032014-09-12 18:43:59 +0800303};
304
Qu Wenruo150e4b02021-05-03 10:08:55 +0800305int btrfs_repair_one_sector(struct inode *inode,
306 struct bio *failed_bio, u32 bio_offset,
307 struct page *page, unsigned int pgoff,
308 u64 start, int failed_mirror,
309 submit_bio_hook_t *submit_bio_hook);
Omar Sandoval77d5d682020-04-16 14:46:25 -0700310
Josef Bacik294e30f2013-10-09 12:00:56 -0400311#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
Goldwyn Rodrigues99780592019-06-21 10:02:54 -0500312bool find_lock_delalloc_range(struct inode *inode,
Johannes Thumshirnce9f9672018-11-19 10:38:17 +0100313 struct page *locked_page, u64 *start,
314 u64 *end);
Chris Mason0d4cf4e2014-10-07 13:24:20 -0700315#endif
Josef Bacikfaa2dbf2014-05-07 17:06:09 -0400316struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -0400317 u64 start);
Josef Bacik3fd63722020-02-14 16:11:40 -0500318
319#ifdef CONFIG_BTRFS_DEBUG
320void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info);
321#else
322#define btrfs_extent_buffer_leak_debug_check(fs_info) do {} while (0)
323#endif
324
Josef Bacik294e30f2013-10-09 12:00:56 -0400325#endif