blob: 6435c2818ec385326327f05ff0a4970d27992045 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
David Sterba9888c342018-04-03 19:16:55 +02002
3#ifndef BTRFS_EXTENT_IO_H
4#define BTRFS_EXTENT_IO_H
Chris Masond1310b22008-01-24 16:13:08 -05005
6#include <linux/rbtree.h>
Elena Reshetovab7ac31b2017-03-03 10:55:19 +02007#include <linux/refcount.h>
Qu Wenruoac467772015-10-12 12:08:16 +08008#include "ulist.h"
Chris Masond1310b22008-01-24 16:13:08 -05009
10/* bits for the extent state */
David Sterba9ee49a042015-01-14 19:52:13 +010011#define EXTENT_DIRTY (1U << 0)
Nikolay Borisov4e586ca2019-03-14 15:28:30 +020012#define EXTENT_UPTODATE (1U << 1)
13#define EXTENT_LOCKED (1U << 2)
14#define EXTENT_NEW (1U << 3)
15#define EXTENT_DELALLOC (1U << 4)
16#define EXTENT_DEFRAG (1U << 5)
17#define EXTENT_BOUNDARY (1U << 6)
18#define EXTENT_NODATASUM (1U << 7)
19#define EXTENT_CLEAR_META_RESV (1U << 8)
20#define EXTENT_NEED_WAIT (1U << 9)
21#define EXTENT_DAMAGED (1U << 10)
22#define EXTENT_NORESERVE (1U << 11)
23#define EXTENT_QGROUP_RESERVED (1U << 12)
24#define EXTENT_CLEAR_DATA_RESV (1U << 13)
25#define EXTENT_DELALLOC_NEW (1U << 14)
Filipe Mananaa315e682017-03-06 23:04:20 +000026#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
27 EXTENT_CLEAR_DATA_RESV)
Nikolay Borisovba8f5202019-01-30 16:50:50 +020028#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING)
Chris Masond1310b22008-01-24 16:13:08 -050029
Nikolay Borisov930b0902019-03-25 14:31:26 +020030/* Redefined bits above which are used only in the device allocation tree */
31#define CHUNK_ALLOCATED EXTENT_DIRTY
32
Li Zefan261507a02010-12-17 14:21:50 +080033/*
34 * flags for bio submission. The high bits indicate the compression
35 * type for this bio
36 */
Chris Masonc8b97812008-10-29 14:49:59 -040037#define EXTENT_BIO_COMPRESSED 1
Li Zefan261507a02010-12-17 14:21:50 +080038#define EXTENT_BIO_FLAG_SHIFT 16
Chris Masonc8b97812008-10-29 14:49:59 -040039
David Sterba80cb3832018-11-27 15:03:20 +010040enum {
41 EXTENT_BUFFER_UPTODATE,
42 EXTENT_BUFFER_DIRTY,
43 EXTENT_BUFFER_CORRUPT,
44 /* this got triggered by readahead */
45 EXTENT_BUFFER_READAHEAD,
46 EXTENT_BUFFER_TREE_REF,
47 EXTENT_BUFFER_STALE,
48 EXTENT_BUFFER_WRITEBACK,
49 /* read IO error */
50 EXTENT_BUFFER_READ_ERR,
51 EXTENT_BUFFER_UNMAPPED,
52 EXTENT_BUFFER_IN_TREE,
53 /* write IO error */
54 EXTENT_BUFFER_WRITE_ERR,
55};
Chris Masonb4ce94d2009-02-04 09:25:08 -050056
Liu Boda2c7002017-02-10 16:41:05 +010057/* these are flags for __process_pages_contig */
Josef Bacikc2790a22013-07-29 11:20:47 -040058#define PAGE_UNLOCK (1 << 0)
59#define PAGE_CLEAR_DIRTY (1 << 1)
60#define PAGE_SET_WRITEBACK (1 << 2)
61#define PAGE_END_WRITEBACK (1 << 3)
62#define PAGE_SET_PRIVATE2 (1 << 4)
Filipe Manana704de492014-10-06 22:14:22 +010063#define PAGE_SET_ERROR (1 << 5)
Liu Boda2c7002017-02-10 16:41:05 +010064#define PAGE_LOCK (1 << 6)
Chris Masona791e352009-10-08 11:27:10 -040065
Chris Masond1310b22008-01-24 16:13:08 -050066/*
67 * page->private values. Every page that is controlled by the extent
68 * map has page->private set to one.
69 */
70#define EXTENT_PAGE_PRIVATE 1
Chris Masond1310b22008-01-24 16:13:08 -050071
Omar Sandoval2fe1d552016-09-22 17:24:20 -070072/*
73 * The extent buffer bitmap operations are done with byte granularity instead of
74 * word granularity for two reasons:
75 * 1. The bitmaps must be little-endian on disk.
76 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
77 * single word in a bitmap may straddle two pages in the extent buffer.
78 */
79#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
80#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
81#define BITMAP_FIRST_BYTE_MASK(start) \
82 ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
83#define BITMAP_LAST_BYTE_MASK(nbits) \
84 (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
85
Chris Mason70dec802008-01-29 09:59:12 -050086struct extent_state;
Josef Bacikea466792012-03-26 21:57:36 -040087struct btrfs_root;
Nikolay Borisov6fc0ef62017-02-20 13:51:03 +020088struct btrfs_inode;
Miao Xiefacc8a222013-07-25 19:22:34 +080089struct btrfs_io_bio;
David Sterba47dc1962016-02-11 13:24:13 +010090struct io_failure_record;
Chris Mason70dec802008-01-29 09:59:12 -050091
Linus Torvalds8c27cb32017-07-05 16:41:23 -070092typedef blk_status_t (extent_submit_bio_hook_t)(void *private_data, struct bio *bio,
Mike Christie81a75f672016-06-05 14:31:54 -050093 int mirror_num, unsigned long bio_flags,
94 u64 bio_offset);
David Sterbaa7587812017-06-23 03:05:23 +020095
96typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
David Sterbad0779292018-03-08 13:47:33 +010097 struct bio *bio, u64 bio_offset);
David Sterbaa7587812017-06-23 03:05:23 +020098
Chris Masond1310b22008-01-24 16:13:08 -050099struct extent_io_ops {
David Sterba4d53ddd2017-02-17 15:27:44 +0100100 /*
Andrea Gelmini52042d82018-11-28 12:05:13 +0100101 * The following callbacks must be always defined, the function
David Sterba4d53ddd2017-02-17 15:27:44 +0100102 * pointer will be called unconditionally.
103 */
Chris Mason44b8bd72008-04-16 11:14:51 -0400104 extent_submit_bio_hook_t *submit_bio_hook;
Miao Xiefacc8a222013-07-25 19:22:34 +0800105 int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
106 struct page *page, u64 start, u64 end,
107 int mirror);
Chris Masond1310b22008-01-24 16:13:08 -0500108};
109
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800110enum {
111 IO_TREE_FS_INFO_FREED_EXTENTS0,
112 IO_TREE_FS_INFO_FREED_EXTENTS1,
113 IO_TREE_INODE_IO,
114 IO_TREE_INODE_IO_FAILURE,
115 IO_TREE_RELOC_BLOCKS,
116 IO_TREE_TRANS_DIRTY_PAGES,
117 IO_TREE_ROOT_DIRTY_LOG_PAGES,
118 IO_TREE_SELFTEST,
119};
120
Chris Masond1310b22008-01-24 16:13:08 -0500121struct extent_io_tree {
122 struct rb_root state;
Qu Wenruoc258d6e2019-03-01 10:47:58 +0800123 struct btrfs_fs_info *fs_info;
Josef Bacikc6100a42017-05-05 11:57:13 -0400124 void *private_data;
Chris Masond1310b22008-01-24 16:13:08 -0500125 u64 dirty_bytes;
David Sterba7b439732019-03-11 15:58:30 +0100126 bool track_uptodate;
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800127
128 /* Who owns this io tree, should be one of IO_TREE_* */
129 u8 owner;
130
Chris Mason70dec802008-01-29 09:59:12 -0500131 spinlock_t lock;
David Sterbae8c9f182015-01-02 18:23:10 +0100132 const struct extent_io_ops *ops;
Chris Masond1310b22008-01-24 16:13:08 -0500133};
134
135struct extent_state {
136 u64 start;
137 u64 end; /* inclusive */
Chris Masond1310b22008-01-24 16:13:08 -0500138 struct rb_node rb_node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400139
140 /* ADD NEW ELEMENTS AFTER THIS */
Chris Masond1310b22008-01-24 16:13:08 -0500141 wait_queue_head_t wq;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200142 refcount_t refs;
David Sterba9ee49a042015-01-14 19:52:13 +0100143 unsigned state;
Chris Masond1310b22008-01-24 16:13:08 -0500144
David Sterba47dc1962016-02-11 13:24:13 +0100145 struct io_failure_record *failrec;
Chris Masond1310b22008-01-24 16:13:08 -0500146
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000147#ifdef CONFIG_BTRFS_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400148 struct list_head leak_list;
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000149#endif
Chris Masond1310b22008-01-24 16:13:08 -0500150};
151
Chris Mason727011e2010-08-06 13:21:20 -0400152#define INLINE_EXTENT_BUFFER_PAGES 16
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300153#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
Chris Masond1310b22008-01-24 16:13:08 -0500154struct extent_buffer {
155 u64 start;
156 unsigned long len;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500157 unsigned long bflags;
Josef Bacikf28491e2013-12-16 13:24:27 -0500158 struct btrfs_fs_info *fs_info;
Josef Bacik3083ee22012-03-09 16:01:49 -0500159 spinlock_t refs_lock;
Chris Mason727011e2010-08-06 13:21:20 -0400160 atomic_t refs;
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400161 atomic_t io_pages;
Josef Bacik5cf1ab52012-04-16 09:42:26 -0400162 int read_mirror;
Miao Xie19fe0a82010-10-26 20:57:29 -0400163 struct rcu_head rcu_head;
Arne Jansen5b25f702011-09-13 10:55:48 +0200164 pid_t lock_owner;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500165
Chris Masonbd681512011-07-16 15:23:14 -0400166 atomic_t blocking_writers;
167 atomic_t blocking_readers;
David Sterbaed1b4ed2018-08-24 16:31:17 +0200168 bool lock_nested;
Filipe Manana656f30d2014-09-26 12:25:56 +0100169 /* >= 0 if eb belongs to a log tree, -1 otherwise */
170 short log_index;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500171
Chris Masonbd681512011-07-16 15:23:14 -0400172 /* protects write locks */
173 rwlock_t lock;
174
175 /* readers use lock_wq while they wait for the write
176 * lock holders to unlock
Chris Masonb4ce94d2009-02-04 09:25:08 -0500177 */
Chris Masonbd681512011-07-16 15:23:14 -0400178 wait_queue_head_t write_lock_wq;
179
180 /* writers use read_lock_wq while they wait for readers
181 * to unlock
182 */
183 wait_queue_head_t read_lock_wq;
David Sterbab8dae312013-02-28 14:54:18 +0000184 struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000185#ifdef CONFIG_BTRFS_DEBUG
David Sterba843ccf92018-08-24 14:56:28 +0200186 atomic_t spinning_writers;
David Sterbaafd495a2018-08-24 15:57:38 +0200187 atomic_t spinning_readers;
David Sterba5c9c7992018-08-24 16:15:51 +0200188 atomic_t read_locks;
David Sterbac79adfc2018-08-24 16:24:26 +0200189 atomic_t write_locks;
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000190 struct list_head leak_list;
191#endif
Chris Masond1310b22008-01-24 16:13:08 -0500192};
193
Qu Wenruoac467772015-10-12 12:08:16 +0800194/*
195 * Structure to record how many bytes and which ranges are set/cleared
196 */
197struct extent_changeset {
198 /* How many bytes are set/cleared in this operation */
Qu Wenruo7bc329c2017-02-27 15:10:36 +0800199 unsigned int bytes_changed;
Qu Wenruoac467772015-10-12 12:08:16 +0800200
201 /* Changed ranges */
David Sterba53d32352017-02-13 13:42:29 +0100202 struct ulist range_changed;
Qu Wenruoac467772015-10-12 12:08:16 +0800203};
204
Qu Wenruo364ecf32017-02-27 15:10:38 +0800205static inline void extent_changeset_init(struct extent_changeset *changeset)
206{
207 changeset->bytes_changed = 0;
208 ulist_init(&changeset->range_changed);
209}
210
211static inline struct extent_changeset *extent_changeset_alloc(void)
212{
213 struct extent_changeset *ret;
214
215 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
216 if (!ret)
217 return NULL;
218
219 extent_changeset_init(ret);
220 return ret;
221}
222
223static inline void extent_changeset_release(struct extent_changeset *changeset)
224{
225 if (!changeset)
226 return;
227 changeset->bytes_changed = 0;
228 ulist_release(&changeset->range_changed);
229}
230
231static inline void extent_changeset_free(struct extent_changeset *changeset)
232{
233 if (!changeset)
234 return;
235 extent_changeset_release(changeset);
236 kfree(changeset);
237}
238
Li Zefan261507a02010-12-17 14:21:50 +0800239static inline void extent_set_compress_type(unsigned long *bio_flags,
240 int compress_type)
241{
242 *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
243}
244
245static inline int extent_compress_type(unsigned long bio_flags)
246{
247 return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
248}
249
Chris Masond1310b22008-01-24 16:13:08 -0500250struct extent_map_tree;
251
Nikolay Borisovfc4f21b12017-02-20 13:51:06 +0200252typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
Chris Masond1310b22008-01-24 16:13:08 -0500253 struct page *page,
David Sterba306e16c2011-04-19 14:29:38 +0200254 size_t pg_offset,
Chris Masond1310b22008-01-24 16:13:08 -0500255 u64 start, u64 len,
256 int create);
257
Qu Wenruoc258d6e2019-03-01 10:47:58 +0800258void extent_io_tree_init(struct btrfs_fs_info *fs_info,
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800259 struct extent_io_tree *tree, unsigned int owner,
260 void *private_data);
Nikolay Borisov41e7acd2019-03-25 14:31:24 +0200261void extent_io_tree_release(struct extent_io_tree *tree);
Nikolay Borisov477a30b2018-04-19 10:46:34 +0300262int try_release_extent_mapping(struct page *page, gfp_t mask);
David Sterbaf7a52a42013-04-26 14:56:29 +0000263int try_release_extent_buffer(struct page *page);
Chris Mason1edbb732009-09-02 13:24:36 -0400264int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaff13db42015-12-03 14:30:40 +0100265 struct extent_state **cached);
David Sterbacd716d82015-12-03 14:41:30 +0100266
267static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
268{
269 return lock_extent_bits(tree, start, end, NULL);
270}
271
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100272int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
Chris Masond1310b22008-01-24 16:13:08 -0500273int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
Jan Schmidt8ddc7d92011-06-13 20:02:58 +0200274 get_extent_t *get_extent, int mirror_num);
Chris Masond1310b22008-01-24 16:13:08 -0500275int __init extent_io_init(void);
David Sterbae67c7182018-02-19 17:24:18 +0100276void __cold extent_io_exit(void);
Chris Masond1310b22008-01-24 16:13:08 -0500277
278u64 count_range_bits(struct extent_io_tree *tree,
279 u64 *start, u64 search_end,
David Sterba9ee49a042015-01-14 19:52:13 +0100280 u64 max_bytes, unsigned bits, int contig);
Chris Masond1310b22008-01-24 16:13:08 -0500281
Chris Mason4845e442010-05-25 20:56:50 -0400282void free_extent_state(struct extent_state *state);
Chris Masond1310b22008-01-24 16:13:08 -0500283int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100284 unsigned bits, int filled,
David Sterba41074882013-04-29 13:38:46 +0000285 struct extent_state *cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +0800286int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaf734c442016-04-26 23:54:39 +0200287 unsigned bits, struct extent_changeset *changeset);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400288int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100289 unsigned bits, int wake, int delete,
David Sterbaae0f1622017-10-31 16:37:52 +0100290 struct extent_state **cached);
David Sterba66b0c882017-10-31 16:30:47 +0100291int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
292 unsigned bits, int wake, int delete,
293 struct extent_state **cached, gfp_t mask,
294 struct extent_changeset *changeset);
David Sterbac6317952015-12-03 14:08:11 +0100295
David Sterbae83b1d92015-12-03 14:08:11 +0100296static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
297{
David Sterbaae0f1622017-10-31 16:37:52 +0100298 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
David Sterbae83b1d92015-12-03 14:08:11 +0100299}
300
301static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
David Sterbae43bbe52017-12-12 21:43:52 +0100302 u64 end, struct extent_state **cached)
David Sterbae83b1d92015-12-03 14:08:11 +0100303{
David Sterba66b0c882017-10-31 16:30:47 +0100304 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
David Sterbae43bbe52017-12-12 21:43:52 +0100305 GFP_NOFS, NULL);
David Sterbae83b1d92015-12-03 14:08:11 +0100306}
307
David Sterbad810a4b2017-12-07 18:52:54 +0100308static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
309 u64 start, u64 end, struct extent_state **cached)
310{
311 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
312 GFP_ATOMIC, NULL);
David Sterbae83b1d92015-12-03 14:08:11 +0100313}
314
315static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
David Sterba91166212016-04-26 23:54:39 +0200316 u64 end, unsigned bits)
David Sterbae83b1d92015-12-03 14:08:11 +0100317{
318 int wake = 0;
319
320 if (bits & EXTENT_LOCKED)
321 wake = 1;
322
David Sterbaae0f1622017-10-31 16:37:52 +0100323 return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
David Sterbae83b1d92015-12-03 14:08:11 +0100324}
325
Qu Wenruod38ed272015-10-12 14:53:37 +0800326int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba2c53b912016-04-26 23:54:39 +0200327 unsigned bits, struct extent_changeset *changeset);
Chris Mason4845e442010-05-25 20:56:50 -0400328int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100329 unsigned bits, u64 *failed_start,
Chris Mason4845e442010-05-25 20:56:50 -0400330 struct extent_state **cached_state, gfp_t mask);
Nikolay Borisov4ca73652019-03-27 14:24:10 +0200331int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
332 unsigned bits);
David Sterbac6317952015-12-03 14:08:11 +0100333
334static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
David Sterbaceeb0ae2016-04-26 23:54:39 +0200335 u64 end, unsigned bits)
David Sterbac6317952015-12-03 14:08:11 +0100336{
David Sterbaceeb0ae2016-04-26 23:54:39 +0200337 return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100338}
339
David Sterbae83b1d92015-12-03 14:08:11 +0100340static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
David Sterbaf08dc362017-10-31 17:02:39 +0100341 u64 end, struct extent_state **cached_state)
David Sterbae83b1d92015-12-03 14:08:11 +0100342{
David Sterba66b0c882017-10-31 16:30:47 +0100343 return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
David Sterbaf08dc362017-10-31 17:02:39 +0100344 cached_state, GFP_NOFS, NULL);
David Sterbae83b1d92015-12-03 14:08:11 +0100345}
David Sterbac6317952015-12-03 14:08:11 +0100346
347static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
348 u64 end, gfp_t mask)
349{
350 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
351 NULL, mask);
352}
353
David Sterbae83b1d92015-12-03 14:08:11 +0100354static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
Filipe Manana0e6ec382018-11-16 13:04:44 +0000355 u64 end, struct extent_state **cached)
David Sterbae83b1d92015-12-03 14:08:11 +0100356{
357 return clear_extent_bit(tree, start, end,
358 EXTENT_DIRTY | EXTENT_DELALLOC |
Filipe Manana0e6ec382018-11-16 13:04:44 +0000359 EXTENT_DO_ACCOUNTING, 0, 0, cached);
David Sterbae83b1d92015-12-03 14:08:11 +0100360}
361
Josef Bacik462d6fa2011-09-26 13:56:12 -0400362int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100363 unsigned bits, unsigned clear_bits,
David Sterba210aa272016-04-26 23:54:39 +0200364 struct extent_state **cached_state);
David Sterbac6317952015-12-03 14:08:11 +0100365
366static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
Filipe Mananae3b8a482017-11-04 00:16:59 +0000367 u64 end, unsigned int extra_bits,
368 struct extent_state **cached_state)
David Sterbac6317952015-12-03 14:08:11 +0100369{
370 return set_extent_bit(tree, start, end,
Filipe Mananae3b8a482017-11-04 00:16:59 +0000371 EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
David Sterba7cd8c752016-04-26 23:54:39 +0200372 NULL, cached_state, GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100373}
374
375static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
David Sterba018ed4f2016-04-26 23:54:39 +0200376 u64 end, struct extent_state **cached_state)
David Sterbac6317952015-12-03 14:08:11 +0100377{
378 return set_extent_bit(tree, start, end,
379 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
David Sterba018ed4f2016-04-26 23:54:39 +0200380 NULL, cached_state, GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100381}
382
383static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
David Sterba3744dbe2016-04-26 23:54:39 +0200384 u64 end)
David Sterbac6317952015-12-03 14:08:11 +0100385{
David Sterba3744dbe2016-04-26 23:54:39 +0200386 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
387 GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100388}
389
390static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
391 u64 end, struct extent_state **cached_state, gfp_t mask)
392{
393 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
394 cached_state, mask);
395}
396
Chris Masond1310b22008-01-24 16:13:08 -0500397int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
David Sterba9ee49a042015-01-14 19:52:13 +0100398 u64 *start_ret, u64 *end_ret, unsigned bits,
Josef Bacike6138872012-09-27 17:07:30 -0400399 struct extent_state **cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500400int extent_invalidatepage(struct extent_io_tree *tree,
401 struct page *page, unsigned long offset);
Nikolay Borisov0a9b0e52017-12-08 15:55:59 +0200402int extent_write_full_page(struct page *page, struct writeback_control *wbc);
Nikolay Borisov5e3ee232017-12-08 15:55:58 +0200403int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
Chris Mason771ed682008-11-06 22:02:51 -0500404 int mode);
Nikolay Borisov8ae225a2018-04-19 10:46:38 +0300405int extent_writepages(struct address_space *mapping,
Chris Masond1310b22008-01-24 16:13:08 -0500406 struct writeback_control *wbc);
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400407int btree_write_cache_pages(struct address_space *mapping,
408 struct writeback_control *wbc);
Nikolay Borisov2a3ff0a2018-04-19 10:46:36 +0300409int extent_readpages(struct address_space *mapping, struct list_head *pages,
410 unsigned nr_pages);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -0500411int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
David Sterba2135fb92017-06-23 04:09:57 +0200412 __u64 start, __u64 len);
Chris Masond1310b22008-01-24 16:13:08 -0500413void set_page_extent_mapped(struct page *page);
414
Josef Bacikf28491e2013-12-16 13:24:27 -0500415struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
David Sterbace3e6982014-06-15 03:00:04 +0200416 u64 start);
Omar Sandoval0f331222015-09-29 20:50:31 -0700417struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
418 u64 start, unsigned long len);
David Sterba3f556f72014-06-15 03:20:26 +0200419struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -0400420 u64 start);
Jan Schmidt815a51c2012-05-16 17:00:02 +0200421struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
Josef Bacikf28491e2013-12-16 13:24:27 -0500422struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
Chandra Seetharaman452c75c2013-10-07 10:45:25 -0500423 u64 start);
Chris Masond1310b22008-01-24 16:13:08 -0500424void free_extent_buffer(struct extent_buffer *eb);
Josef Bacik3083ee22012-03-09 16:01:49 -0500425void free_extent_buffer_stale(struct extent_buffer *eb);
Arne Jansenbb82ab82011-06-10 14:06:53 +0200426#define WAIT_NONE 0
427#define WAIT_COMPLETE 1
428#define WAIT_PAGE_LOCK 2
Chris Masond1310b22008-01-24 16:13:08 -0500429int read_extent_buffer_pages(struct extent_io_tree *tree,
Josef Bacik8436ea912016-09-02 15:40:03 -0400430 struct extent_buffer *eb, int wait,
David Sterba6af49db2017-06-23 04:09:57 +0200431 int mirror_num);
Josef Bacikfd8b2b62013-04-24 16:41:19 -0400432void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
Robin Dong479ed9a2012-09-29 02:07:47 -0600433
David Sterbacc5e31a2018-03-01 18:20:27 +0100434static inline int num_extent_pages(const struct extent_buffer *eb)
Robin Dong479ed9a2012-09-29 02:07:47 -0600435{
David Sterba8791d432018-07-04 17:49:31 +0200436 return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
437 (eb->start >> PAGE_SHIFT);
Robin Dong479ed9a2012-09-29 02:07:47 -0600438}
439
Chris Masond1310b22008-01-24 16:13:08 -0500440static inline void extent_buffer_get(struct extent_buffer *eb)
441{
442 atomic_inc(&eb->refs);
443}
444
Anand Jainba020492018-02-13 12:35:44 +0800445static inline int extent_buffer_uptodate(struct extent_buffer *eb)
446{
447 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
448}
449
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600450int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
451 unsigned long start, unsigned long len);
452void read_extent_buffer(const struct extent_buffer *eb, void *dst,
Chris Masond1310b22008-01-24 16:13:08 -0500453 unsigned long start,
454 unsigned long len);
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600455int read_extent_buffer_to_user(const struct extent_buffer *eb,
456 void __user *dst, unsigned long start,
Gerhard Heift550ac1d2014-01-30 16:24:01 +0100457 unsigned long len);
David Sterbaf157bf72016-11-09 17:43:38 +0100458void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
459void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
460 const void *src);
Chris Masond1310b22008-01-24 16:13:08 -0500461void write_extent_buffer(struct extent_buffer *eb, const void *src,
462 unsigned long start, unsigned long len);
David Sterba58e80122016-11-08 18:30:31 +0100463void copy_extent_buffer_full(struct extent_buffer *dst,
464 struct extent_buffer *src);
Chris Masond1310b22008-01-24 16:13:08 -0500465void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
466 unsigned long dst_offset, unsigned long src_offset,
467 unsigned long len);
468void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
469 unsigned long src_offset, unsigned long len);
470void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
471 unsigned long src_offset, unsigned long len);
David Sterbab159fa22016-11-08 18:09:03 +0100472void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
473 unsigned long len);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -0700474int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
475 unsigned long pos);
476void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
477 unsigned long pos, unsigned long len);
478void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
479 unsigned long pos, unsigned long len);
Chris Mason1d4284b2012-03-28 20:31:37 -0400480void clear_extent_buffer_dirty(struct extent_buffer *eb);
Liu Boabb57ef2018-09-14 01:44:42 +0800481bool set_extent_buffer_dirty(struct extent_buffer *eb);
David Sterba09c25a82015-12-03 13:08:59 +0100482void set_extent_buffer_uptodate(struct extent_buffer *eb);
David Sterba69ba3922015-12-03 13:08:59 +0100483void clear_extent_buffer_uptodate(struct extent_buffer *eb);
Josef Bacika26e8c92014-03-28 17:07:27 -0400484int extent_buffer_under_io(struct extent_buffer *eb);
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600485int map_private_extent_buffer(const struct extent_buffer *eb,
486 unsigned long offset, unsigned long min_len,
487 char **map, unsigned long *map_start,
488 unsigned long *map_len);
David Sterbabd1fa4f2015-12-03 13:08:59 +0100489void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
David Sterbaf6311572015-12-03 13:08:59 +0100490void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
David Sterbaa9d93e12015-12-03 13:08:59 +0100491void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
Qu Wenruoba8b04c2016-07-19 16:50:36 +0800492 u64 delalloc_end, struct page *locked_page,
David Sterba9ee49a042015-01-14 19:52:13 +0100493 unsigned bits_to_clear,
Josef Bacikc2790a22013-07-29 11:20:47 -0400494 unsigned long page_ops);
David Sterbac821e7f32017-06-02 18:35:36 +0200495struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte);
David Sterbac5e4c3d2017-06-12 17:29:41 +0200496struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
David Sterba8b6c1d52017-06-02 17:48:13 +0200497struct bio *btrfs_bio_clone(struct bio *bio);
Liu Boe4770942017-05-16 10:57:14 -0700498struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +0200499
Stefan Behrens3ec706c2012-11-05 15:46:42 +0100500struct btrfs_fs_info;
Nikolay Borisov9d4f7f82017-02-20 13:50:55 +0200501struct btrfs_inode;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +0200502
Josef Bacik6ec656b2017-05-05 11:57:14 -0400503int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
504 u64 length, u64 logical, struct page *page,
505 unsigned int pg_offset, int mirror_num);
Josef Bacik7870d082017-05-05 11:57:15 -0400506int clean_io_failure(struct btrfs_fs_info *fs_info,
507 struct extent_io_tree *failure_tree,
508 struct extent_io_tree *io_tree, u64 start,
509 struct page *page, u64 ino, unsigned int pg_offset);
David Sterbab5227c02015-12-03 13:08:59 +0100510void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
David Sterba20a1fbf92019-03-20 11:23:44 +0100511int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num);
Miao Xie2fe63032014-09-12 18:43:59 +0800512
513/*
514 * When IO fails, either with EIO or csum verification fails, we
515 * try other mirrors that might have a good copy of the data. This
516 * io_failure_record is used to record state as we go through all the
517 * mirrors. If another mirror has good data, the page is set up to date
518 * and things continue. If a good mirror can't be found, the original
519 * bio end_io callback is called to indicate things have failed.
520 */
521struct io_failure_record {
522 struct page *page;
523 u64 start;
524 u64 len;
525 u64 logical;
526 unsigned long bio_flags;
527 int this_mirror;
528 int failed_mirror;
529 int in_validation;
530};
531
Nikolay Borisov4ac1f4a2017-02-20 13:50:52 +0200532
Nikolay Borisov7ab79562017-02-20 13:50:57 +0200533void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
534 u64 end);
Miao Xie2fe63032014-09-12 18:43:59 +0800535int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
536 struct io_failure_record **failrec_ret);
Ming Leia0b60d72017-12-18 20:22:11 +0800537bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
Liu Boc3cfb652017-07-13 15:00:50 -0700538 struct io_failure_record *failrec, int fail_mirror);
Miao Xie2fe63032014-09-12 18:43:59 +0800539struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
540 struct io_failure_record *failrec,
541 struct page *page, int pg_offset, int icsum,
Miao Xie8b110e32014-09-12 18:44:03 +0800542 bio_end_io_t *endio_func, void *data);
Josef Bacik7870d082017-05-05 11:57:15 -0400543int free_io_failure(struct extent_io_tree *failure_tree,
544 struct extent_io_tree *io_tree,
545 struct io_failure_record *rec);
Josef Bacik294e30f2013-10-09 12:00:56 -0400546#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
Lu Fengqi3522e902018-11-29 11:33:38 +0800547bool find_lock_delalloc_range(struct inode *inode, struct extent_io_tree *tree,
Johannes Thumshirnce9f9672018-11-19 10:38:17 +0100548 struct page *locked_page, u64 *start,
549 u64 *end);
Chris Mason0d4cf4e2014-10-07 13:24:20 -0700550#endif
Josef Bacikfaa2dbf2014-05-07 17:06:09 -0400551struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -0400552 u64 start);
David Sterba9888c342018-04-03 19:16:55 +0200553
Josef Bacik294e30f2013-10-09 12:00:56 -0400554#endif