blob: c4ec104ac15705df29ebde9b07a20b95f492f9d3 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
David Sterba9888c342018-04-03 19:16:55 +02002
3#ifndef BTRFS_EXTENT_IO_H
4#define BTRFS_EXTENT_IO_H
Chris Masond1310b22008-01-24 16:13:08 -05005
6#include <linux/rbtree.h>
Elena Reshetovab7ac31b2017-03-03 10:55:19 +02007#include <linux/refcount.h>
Qu Wenruoac467772015-10-12 12:08:16 +08008#include "ulist.h"
Chris Masond1310b22008-01-24 16:13:08 -05009
10/* bits for the extent state */
David Sterba9ee49a042015-01-14 19:52:13 +010011#define EXTENT_DIRTY (1U << 0)
Nikolay Borisov4e586ca2019-03-14 15:28:30 +020012#define EXTENT_UPTODATE (1U << 1)
13#define EXTENT_LOCKED (1U << 2)
14#define EXTENT_NEW (1U << 3)
15#define EXTENT_DELALLOC (1U << 4)
16#define EXTENT_DEFRAG (1U << 5)
17#define EXTENT_BOUNDARY (1U << 6)
18#define EXTENT_NODATASUM (1U << 7)
19#define EXTENT_CLEAR_META_RESV (1U << 8)
20#define EXTENT_NEED_WAIT (1U << 9)
21#define EXTENT_DAMAGED (1U << 10)
22#define EXTENT_NORESERVE (1U << 11)
23#define EXTENT_QGROUP_RESERVED (1U << 12)
24#define EXTENT_CLEAR_DATA_RESV (1U << 13)
25#define EXTENT_DELALLOC_NEW (1U << 14)
Filipe Mananaa315e682017-03-06 23:04:20 +000026#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
27 EXTENT_CLEAR_DATA_RESV)
Nikolay Borisovba8f5202019-01-30 16:50:50 +020028#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING)
Chris Masond1310b22008-01-24 16:13:08 -050029
Li Zefan261507a02010-12-17 14:21:50 +080030/*
31 * flags for bio submission. The high bits indicate the compression
32 * type for this bio
33 */
Chris Masonc8b97812008-10-29 14:49:59 -040034#define EXTENT_BIO_COMPRESSED 1
Li Zefan261507a02010-12-17 14:21:50 +080035#define EXTENT_BIO_FLAG_SHIFT 16
Chris Masonc8b97812008-10-29 14:49:59 -040036
David Sterba80cb3832018-11-27 15:03:20 +010037enum {
38 EXTENT_BUFFER_UPTODATE,
39 EXTENT_BUFFER_DIRTY,
40 EXTENT_BUFFER_CORRUPT,
41 /* this got triggered by readahead */
42 EXTENT_BUFFER_READAHEAD,
43 EXTENT_BUFFER_TREE_REF,
44 EXTENT_BUFFER_STALE,
45 EXTENT_BUFFER_WRITEBACK,
46 /* read IO error */
47 EXTENT_BUFFER_READ_ERR,
48 EXTENT_BUFFER_UNMAPPED,
49 EXTENT_BUFFER_IN_TREE,
50 /* write IO error */
51 EXTENT_BUFFER_WRITE_ERR,
52};
Chris Masonb4ce94d2009-02-04 09:25:08 -050053
Liu Boda2c7002017-02-10 16:41:05 +010054/* these are flags for __process_pages_contig */
Josef Bacikc2790a22013-07-29 11:20:47 -040055#define PAGE_UNLOCK (1 << 0)
56#define PAGE_CLEAR_DIRTY (1 << 1)
57#define PAGE_SET_WRITEBACK (1 << 2)
58#define PAGE_END_WRITEBACK (1 << 3)
59#define PAGE_SET_PRIVATE2 (1 << 4)
Filipe Manana704de492014-10-06 22:14:22 +010060#define PAGE_SET_ERROR (1 << 5)
Liu Boda2c7002017-02-10 16:41:05 +010061#define PAGE_LOCK (1 << 6)
Chris Masona791e352009-10-08 11:27:10 -040062
Chris Masond1310b22008-01-24 16:13:08 -050063/*
64 * page->private values. Every page that is controlled by the extent
65 * map has page->private set to one.
66 */
67#define EXTENT_PAGE_PRIVATE 1
Chris Masond1310b22008-01-24 16:13:08 -050068
Omar Sandoval2fe1d552016-09-22 17:24:20 -070069/*
70 * The extent buffer bitmap operations are done with byte granularity instead of
71 * word granularity for two reasons:
72 * 1. The bitmaps must be little-endian on disk.
73 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
74 * single word in a bitmap may straddle two pages in the extent buffer.
75 */
76#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
77#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
78#define BITMAP_FIRST_BYTE_MASK(start) \
79 ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
80#define BITMAP_LAST_BYTE_MASK(nbits) \
81 (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
82
Chris Mason70dec802008-01-29 09:59:12 -050083struct extent_state;
Josef Bacikea466792012-03-26 21:57:36 -040084struct btrfs_root;
Nikolay Borisov6fc0ef62017-02-20 13:51:03 +020085struct btrfs_inode;
Miao Xiefacc8a222013-07-25 19:22:34 +080086struct btrfs_io_bio;
David Sterba47dc1962016-02-11 13:24:13 +010087struct io_failure_record;
Chris Mason70dec802008-01-29 09:59:12 -050088
Linus Torvalds8c27cb32017-07-05 16:41:23 -070089typedef blk_status_t (extent_submit_bio_hook_t)(void *private_data, struct bio *bio,
Mike Christie81a75f672016-06-05 14:31:54 -050090 int mirror_num, unsigned long bio_flags,
91 u64 bio_offset);
David Sterbaa7587812017-06-23 03:05:23 +020092
93typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
David Sterbad0779292018-03-08 13:47:33 +010094 struct bio *bio, u64 bio_offset);
David Sterbaa7587812017-06-23 03:05:23 +020095
Chris Masond1310b22008-01-24 16:13:08 -050096struct extent_io_ops {
David Sterba4d53ddd2017-02-17 15:27:44 +010097 /*
Andrea Gelmini52042d82018-11-28 12:05:13 +010098 * The following callbacks must be always defined, the function
David Sterba4d53ddd2017-02-17 15:27:44 +010099 * pointer will be called unconditionally.
100 */
Chris Mason44b8bd72008-04-16 11:14:51 -0400101 extent_submit_bio_hook_t *submit_bio_hook;
Miao Xiefacc8a222013-07-25 19:22:34 +0800102 int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
103 struct page *page, u64 start, u64 end,
104 int mirror);
Chris Masond1310b22008-01-24 16:13:08 -0500105};
106
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800107enum {
108 IO_TREE_FS_INFO_FREED_EXTENTS0,
109 IO_TREE_FS_INFO_FREED_EXTENTS1,
110 IO_TREE_INODE_IO,
111 IO_TREE_INODE_IO_FAILURE,
112 IO_TREE_RELOC_BLOCKS,
113 IO_TREE_TRANS_DIRTY_PAGES,
114 IO_TREE_ROOT_DIRTY_LOG_PAGES,
115 IO_TREE_SELFTEST,
116};
117
Chris Masond1310b22008-01-24 16:13:08 -0500118struct extent_io_tree {
119 struct rb_root state;
Qu Wenruoc258d6e2019-03-01 10:47:58 +0800120 struct btrfs_fs_info *fs_info;
Josef Bacikc6100a42017-05-05 11:57:13 -0400121 void *private_data;
Chris Masond1310b22008-01-24 16:13:08 -0500122 u64 dirty_bytes;
David Sterba7b439732019-03-11 15:58:30 +0100123 bool track_uptodate;
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800124
125 /* Who owns this io tree, should be one of IO_TREE_* */
126 u8 owner;
127
Chris Mason70dec802008-01-29 09:59:12 -0500128 spinlock_t lock;
David Sterbae8c9f182015-01-02 18:23:10 +0100129 const struct extent_io_ops *ops;
Chris Masond1310b22008-01-24 16:13:08 -0500130};
131
132struct extent_state {
133 u64 start;
134 u64 end; /* inclusive */
Chris Masond1310b22008-01-24 16:13:08 -0500135 struct rb_node rb_node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400136
137 /* ADD NEW ELEMENTS AFTER THIS */
Chris Masond1310b22008-01-24 16:13:08 -0500138 wait_queue_head_t wq;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200139 refcount_t refs;
David Sterba9ee49a042015-01-14 19:52:13 +0100140 unsigned state;
Chris Masond1310b22008-01-24 16:13:08 -0500141
David Sterba47dc1962016-02-11 13:24:13 +0100142 struct io_failure_record *failrec;
Chris Masond1310b22008-01-24 16:13:08 -0500143
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000144#ifdef CONFIG_BTRFS_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400145 struct list_head leak_list;
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000146#endif
Chris Masond1310b22008-01-24 16:13:08 -0500147};
148
Chris Mason727011e2010-08-06 13:21:20 -0400149#define INLINE_EXTENT_BUFFER_PAGES 16
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300150#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
Chris Masond1310b22008-01-24 16:13:08 -0500151struct extent_buffer {
152 u64 start;
153 unsigned long len;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500154 unsigned long bflags;
Josef Bacikf28491e2013-12-16 13:24:27 -0500155 struct btrfs_fs_info *fs_info;
Josef Bacik3083ee22012-03-09 16:01:49 -0500156 spinlock_t refs_lock;
Chris Mason727011e2010-08-06 13:21:20 -0400157 atomic_t refs;
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400158 atomic_t io_pages;
Josef Bacik5cf1ab52012-04-16 09:42:26 -0400159 int read_mirror;
Miao Xie19fe0a82010-10-26 20:57:29 -0400160 struct rcu_head rcu_head;
Arne Jansen5b25f702011-09-13 10:55:48 +0200161 pid_t lock_owner;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500162
Chris Masonbd681512011-07-16 15:23:14 -0400163 atomic_t blocking_writers;
164 atomic_t blocking_readers;
David Sterbaed1b4ed2018-08-24 16:31:17 +0200165 bool lock_nested;
Filipe Manana656f30d2014-09-26 12:25:56 +0100166 /* >= 0 if eb belongs to a log tree, -1 otherwise */
167 short log_index;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500168
Chris Masonbd681512011-07-16 15:23:14 -0400169 /* protects write locks */
170 rwlock_t lock;
171
172 /* readers use lock_wq while they wait for the write
173 * lock holders to unlock
Chris Masonb4ce94d2009-02-04 09:25:08 -0500174 */
Chris Masonbd681512011-07-16 15:23:14 -0400175 wait_queue_head_t write_lock_wq;
176
177 /* writers use read_lock_wq while they wait for readers
178 * to unlock
179 */
180 wait_queue_head_t read_lock_wq;
David Sterbab8dae312013-02-28 14:54:18 +0000181 struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000182#ifdef CONFIG_BTRFS_DEBUG
David Sterba843ccf92018-08-24 14:56:28 +0200183 atomic_t spinning_writers;
David Sterbaafd495a2018-08-24 15:57:38 +0200184 atomic_t spinning_readers;
David Sterba5c9c7992018-08-24 16:15:51 +0200185 atomic_t read_locks;
David Sterbac79adfc2018-08-24 16:24:26 +0200186 atomic_t write_locks;
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000187 struct list_head leak_list;
188#endif
Chris Masond1310b22008-01-24 16:13:08 -0500189};
190
Qu Wenruoac467772015-10-12 12:08:16 +0800191/*
192 * Structure to record how many bytes and which ranges are set/cleared
193 */
194struct extent_changeset {
195 /* How many bytes are set/cleared in this operation */
Qu Wenruo7bc329c2017-02-27 15:10:36 +0800196 unsigned int bytes_changed;
Qu Wenruoac467772015-10-12 12:08:16 +0800197
198 /* Changed ranges */
David Sterba53d32352017-02-13 13:42:29 +0100199 struct ulist range_changed;
Qu Wenruoac467772015-10-12 12:08:16 +0800200};
201
Qu Wenruo364ecf32017-02-27 15:10:38 +0800202static inline void extent_changeset_init(struct extent_changeset *changeset)
203{
204 changeset->bytes_changed = 0;
205 ulist_init(&changeset->range_changed);
206}
207
208static inline struct extent_changeset *extent_changeset_alloc(void)
209{
210 struct extent_changeset *ret;
211
212 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
213 if (!ret)
214 return NULL;
215
216 extent_changeset_init(ret);
217 return ret;
218}
219
220static inline void extent_changeset_release(struct extent_changeset *changeset)
221{
222 if (!changeset)
223 return;
224 changeset->bytes_changed = 0;
225 ulist_release(&changeset->range_changed);
226}
227
228static inline void extent_changeset_free(struct extent_changeset *changeset)
229{
230 if (!changeset)
231 return;
232 extent_changeset_release(changeset);
233 kfree(changeset);
234}
235
Li Zefan261507a02010-12-17 14:21:50 +0800236static inline void extent_set_compress_type(unsigned long *bio_flags,
237 int compress_type)
238{
239 *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
240}
241
242static inline int extent_compress_type(unsigned long bio_flags)
243{
244 return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
245}
246
Chris Masond1310b22008-01-24 16:13:08 -0500247struct extent_map_tree;
248
Nikolay Borisovfc4f21b12017-02-20 13:51:06 +0200249typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
Chris Masond1310b22008-01-24 16:13:08 -0500250 struct page *page,
David Sterba306e16c2011-04-19 14:29:38 +0200251 size_t pg_offset,
Chris Masond1310b22008-01-24 16:13:08 -0500252 u64 start, u64 len,
253 int create);
254
Qu Wenruoc258d6e2019-03-01 10:47:58 +0800255void extent_io_tree_init(struct btrfs_fs_info *fs_info,
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800256 struct extent_io_tree *tree, unsigned int owner,
257 void *private_data);
Nikolay Borisov477a30b2018-04-19 10:46:34 +0300258int try_release_extent_mapping(struct page *page, gfp_t mask);
David Sterbaf7a52a42013-04-26 14:56:29 +0000259int try_release_extent_buffer(struct page *page);
Chris Mason1edbb732009-09-02 13:24:36 -0400260int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaff13db42015-12-03 14:30:40 +0100261 struct extent_state **cached);
David Sterbacd716d82015-12-03 14:41:30 +0100262
263static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
264{
265 return lock_extent_bits(tree, start, end, NULL);
266}
267
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100268int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
Chris Masond1310b22008-01-24 16:13:08 -0500269int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
Jan Schmidt8ddc7d92011-06-13 20:02:58 +0200270 get_extent_t *get_extent, int mirror_num);
Chris Masond1310b22008-01-24 16:13:08 -0500271int __init extent_io_init(void);
David Sterbae67c7182018-02-19 17:24:18 +0100272void __cold extent_io_exit(void);
Chris Masond1310b22008-01-24 16:13:08 -0500273
274u64 count_range_bits(struct extent_io_tree *tree,
275 u64 *start, u64 search_end,
David Sterba9ee49a042015-01-14 19:52:13 +0100276 u64 max_bytes, unsigned bits, int contig);
Chris Masond1310b22008-01-24 16:13:08 -0500277
Chris Mason4845e442010-05-25 20:56:50 -0400278void free_extent_state(struct extent_state *state);
Chris Masond1310b22008-01-24 16:13:08 -0500279int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100280 unsigned bits, int filled,
David Sterba41074882013-04-29 13:38:46 +0000281 struct extent_state *cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +0800282int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaf734c442016-04-26 23:54:39 +0200283 unsigned bits, struct extent_changeset *changeset);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400284int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100285 unsigned bits, int wake, int delete,
David Sterbaae0f1622017-10-31 16:37:52 +0100286 struct extent_state **cached);
David Sterba66b0c882017-10-31 16:30:47 +0100287int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
288 unsigned bits, int wake, int delete,
289 struct extent_state **cached, gfp_t mask,
290 struct extent_changeset *changeset);
David Sterbac6317952015-12-03 14:08:11 +0100291
David Sterbae83b1d92015-12-03 14:08:11 +0100292static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
293{
David Sterbaae0f1622017-10-31 16:37:52 +0100294 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
David Sterbae83b1d92015-12-03 14:08:11 +0100295}
296
297static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
David Sterbae43bbe52017-12-12 21:43:52 +0100298 u64 end, struct extent_state **cached)
David Sterbae83b1d92015-12-03 14:08:11 +0100299{
David Sterba66b0c882017-10-31 16:30:47 +0100300 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
David Sterbae43bbe52017-12-12 21:43:52 +0100301 GFP_NOFS, NULL);
David Sterbae83b1d92015-12-03 14:08:11 +0100302}
303
David Sterbad810a4b2017-12-07 18:52:54 +0100304static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
305 u64 start, u64 end, struct extent_state **cached)
306{
307 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
308 GFP_ATOMIC, NULL);
David Sterbae83b1d92015-12-03 14:08:11 +0100309}
310
311static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
David Sterba91166212016-04-26 23:54:39 +0200312 u64 end, unsigned bits)
David Sterbae83b1d92015-12-03 14:08:11 +0100313{
314 int wake = 0;
315
316 if (bits & EXTENT_LOCKED)
317 wake = 1;
318
David Sterbaae0f1622017-10-31 16:37:52 +0100319 return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
David Sterbae83b1d92015-12-03 14:08:11 +0100320}
321
Qu Wenruod38ed272015-10-12 14:53:37 +0800322int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba2c53b912016-04-26 23:54:39 +0200323 unsigned bits, struct extent_changeset *changeset);
Chris Mason4845e442010-05-25 20:56:50 -0400324int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100325 unsigned bits, u64 *failed_start,
Chris Mason4845e442010-05-25 20:56:50 -0400326 struct extent_state **cached_state, gfp_t mask);
David Sterbac6317952015-12-03 14:08:11 +0100327
328static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
David Sterbaceeb0ae2016-04-26 23:54:39 +0200329 u64 end, unsigned bits)
David Sterbac6317952015-12-03 14:08:11 +0100330{
David Sterbaceeb0ae2016-04-26 23:54:39 +0200331 return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100332}
333
David Sterbae83b1d92015-12-03 14:08:11 +0100334static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
David Sterbaf08dc362017-10-31 17:02:39 +0100335 u64 end, struct extent_state **cached_state)
David Sterbae83b1d92015-12-03 14:08:11 +0100336{
David Sterba66b0c882017-10-31 16:30:47 +0100337 return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
David Sterbaf08dc362017-10-31 17:02:39 +0100338 cached_state, GFP_NOFS, NULL);
David Sterbae83b1d92015-12-03 14:08:11 +0100339}
David Sterbac6317952015-12-03 14:08:11 +0100340
341static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
342 u64 end, gfp_t mask)
343{
344 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
345 NULL, mask);
346}
347
David Sterbae83b1d92015-12-03 14:08:11 +0100348static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
Filipe Manana0e6ec382018-11-16 13:04:44 +0000349 u64 end, struct extent_state **cached)
David Sterbae83b1d92015-12-03 14:08:11 +0100350{
351 return clear_extent_bit(tree, start, end,
352 EXTENT_DIRTY | EXTENT_DELALLOC |
Filipe Manana0e6ec382018-11-16 13:04:44 +0000353 EXTENT_DO_ACCOUNTING, 0, 0, cached);
David Sterbae83b1d92015-12-03 14:08:11 +0100354}
355
Josef Bacik462d6fa2011-09-26 13:56:12 -0400356int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100357 unsigned bits, unsigned clear_bits,
David Sterba210aa272016-04-26 23:54:39 +0200358 struct extent_state **cached_state);
David Sterbac6317952015-12-03 14:08:11 +0100359
360static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
Filipe Mananae3b8a482017-11-04 00:16:59 +0000361 u64 end, unsigned int extra_bits,
362 struct extent_state **cached_state)
David Sterbac6317952015-12-03 14:08:11 +0100363{
364 return set_extent_bit(tree, start, end,
Filipe Mananae3b8a482017-11-04 00:16:59 +0000365 EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
David Sterba7cd8c752016-04-26 23:54:39 +0200366 NULL, cached_state, GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100367}
368
369static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
David Sterba018ed4f2016-04-26 23:54:39 +0200370 u64 end, struct extent_state **cached_state)
David Sterbac6317952015-12-03 14:08:11 +0100371{
372 return set_extent_bit(tree, start, end,
373 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
David Sterba018ed4f2016-04-26 23:54:39 +0200374 NULL, cached_state, GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100375}
376
377static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
David Sterba3744dbe2016-04-26 23:54:39 +0200378 u64 end)
David Sterbac6317952015-12-03 14:08:11 +0100379{
David Sterba3744dbe2016-04-26 23:54:39 +0200380 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
381 GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100382}
383
384static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
385 u64 end, struct extent_state **cached_state, gfp_t mask)
386{
387 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
388 cached_state, mask);
389}
390
Chris Masond1310b22008-01-24 16:13:08 -0500391int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
David Sterba9ee49a042015-01-14 19:52:13 +0100392 u64 *start_ret, u64 *end_ret, unsigned bits,
Josef Bacike6138872012-09-27 17:07:30 -0400393 struct extent_state **cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500394int extent_invalidatepage(struct extent_io_tree *tree,
395 struct page *page, unsigned long offset);
Nikolay Borisov0a9b0e52017-12-08 15:55:59 +0200396int extent_write_full_page(struct page *page, struct writeback_control *wbc);
Nikolay Borisov5e3ee232017-12-08 15:55:58 +0200397int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
Chris Mason771ed682008-11-06 22:02:51 -0500398 int mode);
Nikolay Borisov8ae225a2018-04-19 10:46:38 +0300399int extent_writepages(struct address_space *mapping,
Chris Masond1310b22008-01-24 16:13:08 -0500400 struct writeback_control *wbc);
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400401int btree_write_cache_pages(struct address_space *mapping,
402 struct writeback_control *wbc);
Nikolay Borisov2a3ff0a2018-04-19 10:46:36 +0300403int extent_readpages(struct address_space *mapping, struct list_head *pages,
404 unsigned nr_pages);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -0500405int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
David Sterba2135fb92017-06-23 04:09:57 +0200406 __u64 start, __u64 len);
Chris Masond1310b22008-01-24 16:13:08 -0500407void set_page_extent_mapped(struct page *page);
408
Josef Bacikf28491e2013-12-16 13:24:27 -0500409struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
David Sterbace3e6982014-06-15 03:00:04 +0200410 u64 start);
Omar Sandoval0f331222015-09-29 20:50:31 -0700411struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
412 u64 start, unsigned long len);
David Sterba3f556f72014-06-15 03:20:26 +0200413struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -0400414 u64 start);
Jan Schmidt815a51c2012-05-16 17:00:02 +0200415struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
Josef Bacikf28491e2013-12-16 13:24:27 -0500416struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
Chandra Seetharaman452c75c2013-10-07 10:45:25 -0500417 u64 start);
Chris Masond1310b22008-01-24 16:13:08 -0500418void free_extent_buffer(struct extent_buffer *eb);
Josef Bacik3083ee22012-03-09 16:01:49 -0500419void free_extent_buffer_stale(struct extent_buffer *eb);
Arne Jansenbb82ab82011-06-10 14:06:53 +0200420#define WAIT_NONE 0
421#define WAIT_COMPLETE 1
422#define WAIT_PAGE_LOCK 2
Chris Masond1310b22008-01-24 16:13:08 -0500423int read_extent_buffer_pages(struct extent_io_tree *tree,
Josef Bacik8436ea912016-09-02 15:40:03 -0400424 struct extent_buffer *eb, int wait,
David Sterba6af49db2017-06-23 04:09:57 +0200425 int mirror_num);
Josef Bacikfd8b2b62013-04-24 16:41:19 -0400426void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
Robin Dong479ed9a2012-09-29 02:07:47 -0600427
David Sterbacc5e31a2018-03-01 18:20:27 +0100428static inline int num_extent_pages(const struct extent_buffer *eb)
Robin Dong479ed9a2012-09-29 02:07:47 -0600429{
David Sterba8791d432018-07-04 17:49:31 +0200430 return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
431 (eb->start >> PAGE_SHIFT);
Robin Dong479ed9a2012-09-29 02:07:47 -0600432}
433
Chris Masond1310b22008-01-24 16:13:08 -0500434static inline void extent_buffer_get(struct extent_buffer *eb)
435{
436 atomic_inc(&eb->refs);
437}
438
Anand Jainba020492018-02-13 12:35:44 +0800439static inline int extent_buffer_uptodate(struct extent_buffer *eb)
440{
441 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
442}
443
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600444int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
445 unsigned long start, unsigned long len);
446void read_extent_buffer(const struct extent_buffer *eb, void *dst,
Chris Masond1310b22008-01-24 16:13:08 -0500447 unsigned long start,
448 unsigned long len);
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600449int read_extent_buffer_to_user(const struct extent_buffer *eb,
450 void __user *dst, unsigned long start,
Gerhard Heift550ac1d2014-01-30 16:24:01 +0100451 unsigned long len);
David Sterbaf157bf72016-11-09 17:43:38 +0100452void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
453void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
454 const void *src);
Chris Masond1310b22008-01-24 16:13:08 -0500455void write_extent_buffer(struct extent_buffer *eb, const void *src,
456 unsigned long start, unsigned long len);
David Sterba58e80122016-11-08 18:30:31 +0100457void copy_extent_buffer_full(struct extent_buffer *dst,
458 struct extent_buffer *src);
Chris Masond1310b22008-01-24 16:13:08 -0500459void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
460 unsigned long dst_offset, unsigned long src_offset,
461 unsigned long len);
462void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
463 unsigned long src_offset, unsigned long len);
464void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
465 unsigned long src_offset, unsigned long len);
David Sterbab159fa22016-11-08 18:09:03 +0100466void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
467 unsigned long len);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -0700468int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
469 unsigned long pos);
470void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
471 unsigned long pos, unsigned long len);
472void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
473 unsigned long pos, unsigned long len);
Chris Mason1d4284b2012-03-28 20:31:37 -0400474void clear_extent_buffer_dirty(struct extent_buffer *eb);
Liu Boabb57ef2018-09-14 01:44:42 +0800475bool set_extent_buffer_dirty(struct extent_buffer *eb);
David Sterba09c25a82015-12-03 13:08:59 +0100476void set_extent_buffer_uptodate(struct extent_buffer *eb);
David Sterba69ba3922015-12-03 13:08:59 +0100477void clear_extent_buffer_uptodate(struct extent_buffer *eb);
Josef Bacika26e8c92014-03-28 17:07:27 -0400478int extent_buffer_under_io(struct extent_buffer *eb);
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600479int map_private_extent_buffer(const struct extent_buffer *eb,
480 unsigned long offset, unsigned long min_len,
481 char **map, unsigned long *map_start,
482 unsigned long *map_len);
David Sterbabd1fa4f2015-12-03 13:08:59 +0100483void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
David Sterbaf6311572015-12-03 13:08:59 +0100484void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
David Sterbaa9d93e12015-12-03 13:08:59 +0100485void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
Qu Wenruoba8b04c2016-07-19 16:50:36 +0800486 u64 delalloc_end, struct page *locked_page,
David Sterba9ee49a042015-01-14 19:52:13 +0100487 unsigned bits_to_clear,
Josef Bacikc2790a22013-07-29 11:20:47 -0400488 unsigned long page_ops);
David Sterbac821e7f32017-06-02 18:35:36 +0200489struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte);
David Sterbac5e4c3d2017-06-12 17:29:41 +0200490struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
David Sterba8b6c1d52017-06-02 17:48:13 +0200491struct bio *btrfs_bio_clone(struct bio *bio);
Liu Boe4770942017-05-16 10:57:14 -0700492struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +0200493
Stefan Behrens3ec706c2012-11-05 15:46:42 +0100494struct btrfs_fs_info;
Nikolay Borisov9d4f7f82017-02-20 13:50:55 +0200495struct btrfs_inode;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +0200496
Josef Bacik6ec656b2017-05-05 11:57:14 -0400497int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
498 u64 length, u64 logical, struct page *page,
499 unsigned int pg_offset, int mirror_num);
Josef Bacik7870d082017-05-05 11:57:15 -0400500int clean_io_failure(struct btrfs_fs_info *fs_info,
501 struct extent_io_tree *failure_tree,
502 struct extent_io_tree *io_tree, u64 start,
503 struct page *page, u64 ino, unsigned int pg_offset);
David Sterbab5227c02015-12-03 13:08:59 +0100504void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
David Sterba20a1fbf92019-03-20 11:23:44 +0100505int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num);
Miao Xie2fe63032014-09-12 18:43:59 +0800506
507/*
508 * When IO fails, either with EIO or csum verification fails, we
509 * try other mirrors that might have a good copy of the data. This
510 * io_failure_record is used to record state as we go through all the
511 * mirrors. If another mirror has good data, the page is set up to date
512 * and things continue. If a good mirror can't be found, the original
513 * bio end_io callback is called to indicate things have failed.
514 */
515struct io_failure_record {
516 struct page *page;
517 u64 start;
518 u64 len;
519 u64 logical;
520 unsigned long bio_flags;
521 int this_mirror;
522 int failed_mirror;
523 int in_validation;
524};
525
Nikolay Borisov4ac1f4a2017-02-20 13:50:52 +0200526
Nikolay Borisov7ab79562017-02-20 13:50:57 +0200527void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
528 u64 end);
Miao Xie2fe63032014-09-12 18:43:59 +0800529int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
530 struct io_failure_record **failrec_ret);
Ming Leia0b60d72017-12-18 20:22:11 +0800531bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
Liu Boc3cfb652017-07-13 15:00:50 -0700532 struct io_failure_record *failrec, int fail_mirror);
Miao Xie2fe63032014-09-12 18:43:59 +0800533struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
534 struct io_failure_record *failrec,
535 struct page *page, int pg_offset, int icsum,
Miao Xie8b110e32014-09-12 18:44:03 +0800536 bio_end_io_t *endio_func, void *data);
Josef Bacik7870d082017-05-05 11:57:15 -0400537int free_io_failure(struct extent_io_tree *failure_tree,
538 struct extent_io_tree *io_tree,
539 struct io_failure_record *rec);
Josef Bacik294e30f2013-10-09 12:00:56 -0400540#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
Lu Fengqi3522e902018-11-29 11:33:38 +0800541bool find_lock_delalloc_range(struct inode *inode, struct extent_io_tree *tree,
Johannes Thumshirnce9f9672018-11-19 10:38:17 +0100542 struct page *locked_page, u64 *start,
543 u64 *end);
Chris Mason0d4cf4e2014-10-07 13:24:20 -0700544#endif
Josef Bacikfaa2dbf2014-05-07 17:06:09 -0400545struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -0400546 u64 start);
David Sterba9888c342018-04-03 19:16:55 +0200547
Josef Bacik294e30f2013-10-09 12:00:56 -0400548#endif