blob: 37240e03c4e3935b31a724f7a2a8e81f83b1a7d9 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
David Sterba9888c342018-04-03 19:16:55 +02002
3#ifndef BTRFS_EXTENT_IO_H
4#define BTRFS_EXTENT_IO_H
Chris Masond1310b22008-01-24 16:13:08 -05005
6#include <linux/rbtree.h>
Elena Reshetovab7ac31b2017-03-03 10:55:19 +02007#include <linux/refcount.h>
Qu Wenruoac467772015-10-12 12:08:16 +08008#include "ulist.h"
Chris Masond1310b22008-01-24 16:13:08 -05009
10/* bits for the extent state */
David Sterba9ee49a042015-01-14 19:52:13 +010011#define EXTENT_DIRTY (1U << 0)
Nikolay Borisov4e586ca2019-03-14 15:28:30 +020012#define EXTENT_UPTODATE (1U << 1)
13#define EXTENT_LOCKED (1U << 2)
14#define EXTENT_NEW (1U << 3)
15#define EXTENT_DELALLOC (1U << 4)
16#define EXTENT_DEFRAG (1U << 5)
17#define EXTENT_BOUNDARY (1U << 6)
18#define EXTENT_NODATASUM (1U << 7)
19#define EXTENT_CLEAR_META_RESV (1U << 8)
20#define EXTENT_NEED_WAIT (1U << 9)
21#define EXTENT_DAMAGED (1U << 10)
22#define EXTENT_NORESERVE (1U << 11)
23#define EXTENT_QGROUP_RESERVED (1U << 12)
24#define EXTENT_CLEAR_DATA_RESV (1U << 13)
25#define EXTENT_DELALLOC_NEW (1U << 14)
Filipe Mananaa315e682017-03-06 23:04:20 +000026#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
27 EXTENT_CLEAR_DATA_RESV)
Nikolay Borisovba8f5202019-01-30 16:50:50 +020028#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING)
Chris Masond1310b22008-01-24 16:13:08 -050029
Nikolay Borisov88111332019-03-27 14:24:16 +020030/*
31 * Redefined bits above which are used only in the device allocation tree,
32 * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
33 * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
34 * manipulation functions
35 */
Nikolay Borisov930b0902019-03-25 14:31:26 +020036#define CHUNK_ALLOCATED EXTENT_DIRTY
Nikolay Borisov88111332019-03-27 14:24:16 +020037#define CHUNK_TRIMMED EXTENT_DEFRAG
Nikolay Borisov930b0902019-03-25 14:31:26 +020038
Li Zefan261507a02010-12-17 14:21:50 +080039/*
40 * flags for bio submission. The high bits indicate the compression
41 * type for this bio
42 */
Chris Masonc8b97812008-10-29 14:49:59 -040043#define EXTENT_BIO_COMPRESSED 1
Li Zefan261507a02010-12-17 14:21:50 +080044#define EXTENT_BIO_FLAG_SHIFT 16
Chris Masonc8b97812008-10-29 14:49:59 -040045
David Sterba80cb3832018-11-27 15:03:20 +010046enum {
47 EXTENT_BUFFER_UPTODATE,
48 EXTENT_BUFFER_DIRTY,
49 EXTENT_BUFFER_CORRUPT,
50 /* this got triggered by readahead */
51 EXTENT_BUFFER_READAHEAD,
52 EXTENT_BUFFER_TREE_REF,
53 EXTENT_BUFFER_STALE,
54 EXTENT_BUFFER_WRITEBACK,
55 /* read IO error */
56 EXTENT_BUFFER_READ_ERR,
57 EXTENT_BUFFER_UNMAPPED,
58 EXTENT_BUFFER_IN_TREE,
59 /* write IO error */
60 EXTENT_BUFFER_WRITE_ERR,
61};
Chris Masonb4ce94d2009-02-04 09:25:08 -050062
Liu Boda2c7002017-02-10 16:41:05 +010063/* these are flags for __process_pages_contig */
Josef Bacikc2790a22013-07-29 11:20:47 -040064#define PAGE_UNLOCK (1 << 0)
65#define PAGE_CLEAR_DIRTY (1 << 1)
66#define PAGE_SET_WRITEBACK (1 << 2)
67#define PAGE_END_WRITEBACK (1 << 3)
68#define PAGE_SET_PRIVATE2 (1 << 4)
Filipe Manana704de492014-10-06 22:14:22 +010069#define PAGE_SET_ERROR (1 << 5)
Liu Boda2c7002017-02-10 16:41:05 +010070#define PAGE_LOCK (1 << 6)
Chris Masona791e352009-10-08 11:27:10 -040071
Chris Masond1310b22008-01-24 16:13:08 -050072/*
73 * page->private values. Every page that is controlled by the extent
74 * map has page->private set to one.
75 */
76#define EXTENT_PAGE_PRIVATE 1
Chris Masond1310b22008-01-24 16:13:08 -050077
Omar Sandoval2fe1d552016-09-22 17:24:20 -070078/*
79 * The extent buffer bitmap operations are done with byte granularity instead of
80 * word granularity for two reasons:
81 * 1. The bitmaps must be little-endian on disk.
82 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
83 * single word in a bitmap may straddle two pages in the extent buffer.
84 */
85#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
86#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
87#define BITMAP_FIRST_BYTE_MASK(start) \
88 ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
89#define BITMAP_LAST_BYTE_MASK(nbits) \
90 (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
91
Chris Mason70dec802008-01-29 09:59:12 -050092struct extent_state;
Josef Bacikea466792012-03-26 21:57:36 -040093struct btrfs_root;
Nikolay Borisov6fc0ef62017-02-20 13:51:03 +020094struct btrfs_inode;
Miao Xiefacc8a222013-07-25 19:22:34 +080095struct btrfs_io_bio;
David Sterba47dc1962016-02-11 13:24:13 +010096struct io_failure_record;
Chris Mason70dec802008-01-29 09:59:12 -050097
David Sterbaa7587812017-06-23 03:05:23 +020098
99typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
David Sterbad0779292018-03-08 13:47:33 +0100100 struct bio *bio, u64 bio_offset);
David Sterbaa7587812017-06-23 03:05:23 +0200101
Chris Masond1310b22008-01-24 16:13:08 -0500102struct extent_io_ops {
David Sterba4d53ddd2017-02-17 15:27:44 +0100103 /*
Andrea Gelmini52042d82018-11-28 12:05:13 +0100104 * The following callbacks must be always defined, the function
David Sterba4d53ddd2017-02-17 15:27:44 +0100105 * pointer will be called unconditionally.
106 */
Nikolay Borisova56b1c72019-04-10 17:24:39 +0300107 blk_status_t (*submit_bio_hook)(struct inode *inode, struct bio *bio,
Nikolay Borisova9355a02019-04-10 17:24:38 +0300108 int mirror_num, unsigned long bio_flags,
109 u64 bio_offset);
Miao Xiefacc8a222013-07-25 19:22:34 +0800110 int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
111 struct page *page, u64 start, u64 end,
112 int mirror);
Chris Masond1310b22008-01-24 16:13:08 -0500113};
114
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800115enum {
116 IO_TREE_FS_INFO_FREED_EXTENTS0,
117 IO_TREE_FS_INFO_FREED_EXTENTS1,
118 IO_TREE_INODE_IO,
119 IO_TREE_INODE_IO_FAILURE,
120 IO_TREE_RELOC_BLOCKS,
121 IO_TREE_TRANS_DIRTY_PAGES,
122 IO_TREE_ROOT_DIRTY_LOG_PAGES,
123 IO_TREE_SELFTEST,
124};
125
Chris Masond1310b22008-01-24 16:13:08 -0500126struct extent_io_tree {
127 struct rb_root state;
Qu Wenruoc258d6e2019-03-01 10:47:58 +0800128 struct btrfs_fs_info *fs_info;
Josef Bacikc6100a42017-05-05 11:57:13 -0400129 void *private_data;
Chris Masond1310b22008-01-24 16:13:08 -0500130 u64 dirty_bytes;
David Sterba7b439732019-03-11 15:58:30 +0100131 bool track_uptodate;
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800132
133 /* Who owns this io tree, should be one of IO_TREE_* */
134 u8 owner;
135
Chris Mason70dec802008-01-29 09:59:12 -0500136 spinlock_t lock;
David Sterbae8c9f182015-01-02 18:23:10 +0100137 const struct extent_io_ops *ops;
Chris Masond1310b22008-01-24 16:13:08 -0500138};
139
140struct extent_state {
141 u64 start;
142 u64 end; /* inclusive */
Chris Masond1310b22008-01-24 16:13:08 -0500143 struct rb_node rb_node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400144
145 /* ADD NEW ELEMENTS AFTER THIS */
Chris Masond1310b22008-01-24 16:13:08 -0500146 wait_queue_head_t wq;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200147 refcount_t refs;
David Sterba9ee49a042015-01-14 19:52:13 +0100148 unsigned state;
Chris Masond1310b22008-01-24 16:13:08 -0500149
David Sterba47dc1962016-02-11 13:24:13 +0100150 struct io_failure_record *failrec;
Chris Masond1310b22008-01-24 16:13:08 -0500151
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000152#ifdef CONFIG_BTRFS_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400153 struct list_head leak_list;
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000154#endif
Chris Masond1310b22008-01-24 16:13:08 -0500155};
156
Chris Mason727011e2010-08-06 13:21:20 -0400157#define INLINE_EXTENT_BUFFER_PAGES 16
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300158#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
Chris Masond1310b22008-01-24 16:13:08 -0500159struct extent_buffer {
160 u64 start;
161 unsigned long len;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500162 unsigned long bflags;
Josef Bacikf28491e2013-12-16 13:24:27 -0500163 struct btrfs_fs_info *fs_info;
Josef Bacik3083ee22012-03-09 16:01:49 -0500164 spinlock_t refs_lock;
Chris Mason727011e2010-08-06 13:21:20 -0400165 atomic_t refs;
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400166 atomic_t io_pages;
Josef Bacik5cf1ab52012-04-16 09:42:26 -0400167 int read_mirror;
Miao Xie19fe0a82010-10-26 20:57:29 -0400168 struct rcu_head rcu_head;
Arne Jansen5b25f702011-09-13 10:55:48 +0200169 pid_t lock_owner;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500170
Chris Masonbd681512011-07-16 15:23:14 -0400171 atomic_t blocking_writers;
172 atomic_t blocking_readers;
David Sterbaed1b4ed2018-08-24 16:31:17 +0200173 bool lock_nested;
Filipe Manana656f30d2014-09-26 12:25:56 +0100174 /* >= 0 if eb belongs to a log tree, -1 otherwise */
175 short log_index;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500176
Chris Masonbd681512011-07-16 15:23:14 -0400177 /* protects write locks */
178 rwlock_t lock;
179
180 /* readers use lock_wq while they wait for the write
181 * lock holders to unlock
Chris Masonb4ce94d2009-02-04 09:25:08 -0500182 */
Chris Masonbd681512011-07-16 15:23:14 -0400183 wait_queue_head_t write_lock_wq;
184
185 /* writers use read_lock_wq while they wait for readers
186 * to unlock
187 */
188 wait_queue_head_t read_lock_wq;
David Sterbab8dae312013-02-28 14:54:18 +0000189 struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000190#ifdef CONFIG_BTRFS_DEBUG
David Sterba843ccf92018-08-24 14:56:28 +0200191 atomic_t spinning_writers;
David Sterbaafd495a2018-08-24 15:57:38 +0200192 atomic_t spinning_readers;
David Sterba5c9c7992018-08-24 16:15:51 +0200193 atomic_t read_locks;
David Sterbac79adfc2018-08-24 16:24:26 +0200194 atomic_t write_locks;
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000195 struct list_head leak_list;
196#endif
Chris Masond1310b22008-01-24 16:13:08 -0500197};
198
Qu Wenruoac467772015-10-12 12:08:16 +0800199/*
200 * Structure to record how many bytes and which ranges are set/cleared
201 */
202struct extent_changeset {
203 /* How many bytes are set/cleared in this operation */
Qu Wenruo7bc329c2017-02-27 15:10:36 +0800204 unsigned int bytes_changed;
Qu Wenruoac467772015-10-12 12:08:16 +0800205
206 /* Changed ranges */
David Sterba53d32352017-02-13 13:42:29 +0100207 struct ulist range_changed;
Qu Wenruoac467772015-10-12 12:08:16 +0800208};
209
Qu Wenruo364ecf32017-02-27 15:10:38 +0800210static inline void extent_changeset_init(struct extent_changeset *changeset)
211{
212 changeset->bytes_changed = 0;
213 ulist_init(&changeset->range_changed);
214}
215
216static inline struct extent_changeset *extent_changeset_alloc(void)
217{
218 struct extent_changeset *ret;
219
220 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
221 if (!ret)
222 return NULL;
223
224 extent_changeset_init(ret);
225 return ret;
226}
227
228static inline void extent_changeset_release(struct extent_changeset *changeset)
229{
230 if (!changeset)
231 return;
232 changeset->bytes_changed = 0;
233 ulist_release(&changeset->range_changed);
234}
235
236static inline void extent_changeset_free(struct extent_changeset *changeset)
237{
238 if (!changeset)
239 return;
240 extent_changeset_release(changeset);
241 kfree(changeset);
242}
243
Li Zefan261507a02010-12-17 14:21:50 +0800244static inline void extent_set_compress_type(unsigned long *bio_flags,
245 int compress_type)
246{
247 *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
248}
249
250static inline int extent_compress_type(unsigned long bio_flags)
251{
252 return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
253}
254
Chris Masond1310b22008-01-24 16:13:08 -0500255struct extent_map_tree;
256
Nikolay Borisovfc4f21b12017-02-20 13:51:06 +0200257typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
Chris Masond1310b22008-01-24 16:13:08 -0500258 struct page *page,
David Sterba306e16c2011-04-19 14:29:38 +0200259 size_t pg_offset,
Chris Masond1310b22008-01-24 16:13:08 -0500260 u64 start, u64 len,
261 int create);
262
Qu Wenruoc258d6e2019-03-01 10:47:58 +0800263void extent_io_tree_init(struct btrfs_fs_info *fs_info,
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800264 struct extent_io_tree *tree, unsigned int owner,
265 void *private_data);
Nikolay Borisov41e7acd2019-03-25 14:31:24 +0200266void extent_io_tree_release(struct extent_io_tree *tree);
Nikolay Borisov477a30b2018-04-19 10:46:34 +0300267int try_release_extent_mapping(struct page *page, gfp_t mask);
David Sterbaf7a52a42013-04-26 14:56:29 +0000268int try_release_extent_buffer(struct page *page);
Chris Mason1edbb732009-09-02 13:24:36 -0400269int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaff13db42015-12-03 14:30:40 +0100270 struct extent_state **cached);
David Sterbacd716d82015-12-03 14:41:30 +0100271
272static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
273{
274 return lock_extent_bits(tree, start, end, NULL);
275}
276
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100277int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
Chris Masond1310b22008-01-24 16:13:08 -0500278int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
Jan Schmidt8ddc7d92011-06-13 20:02:58 +0200279 get_extent_t *get_extent, int mirror_num);
Chris Masond1310b22008-01-24 16:13:08 -0500280int __init extent_io_init(void);
David Sterbae67c7182018-02-19 17:24:18 +0100281void __cold extent_io_exit(void);
Chris Masond1310b22008-01-24 16:13:08 -0500282
283u64 count_range_bits(struct extent_io_tree *tree,
284 u64 *start, u64 search_end,
David Sterba9ee49a042015-01-14 19:52:13 +0100285 u64 max_bytes, unsigned bits, int contig);
Chris Masond1310b22008-01-24 16:13:08 -0500286
Chris Mason4845e442010-05-25 20:56:50 -0400287void free_extent_state(struct extent_state *state);
Chris Masond1310b22008-01-24 16:13:08 -0500288int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100289 unsigned bits, int filled,
David Sterba41074882013-04-29 13:38:46 +0000290 struct extent_state *cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +0800291int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaf734c442016-04-26 23:54:39 +0200292 unsigned bits, struct extent_changeset *changeset);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400293int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100294 unsigned bits, int wake, int delete,
David Sterbaae0f1622017-10-31 16:37:52 +0100295 struct extent_state **cached);
David Sterba66b0c882017-10-31 16:30:47 +0100296int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
297 unsigned bits, int wake, int delete,
298 struct extent_state **cached, gfp_t mask,
299 struct extent_changeset *changeset);
David Sterbac6317952015-12-03 14:08:11 +0100300
David Sterbae83b1d92015-12-03 14:08:11 +0100301static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
302{
David Sterbaae0f1622017-10-31 16:37:52 +0100303 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
David Sterbae83b1d92015-12-03 14:08:11 +0100304}
305
306static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
David Sterbae43bbe52017-12-12 21:43:52 +0100307 u64 end, struct extent_state **cached)
David Sterbae83b1d92015-12-03 14:08:11 +0100308{
David Sterba66b0c882017-10-31 16:30:47 +0100309 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
David Sterbae43bbe52017-12-12 21:43:52 +0100310 GFP_NOFS, NULL);
David Sterbae83b1d92015-12-03 14:08:11 +0100311}
312
David Sterbad810a4b2017-12-07 18:52:54 +0100313static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
314 u64 start, u64 end, struct extent_state **cached)
315{
316 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
317 GFP_ATOMIC, NULL);
David Sterbae83b1d92015-12-03 14:08:11 +0100318}
319
320static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
David Sterba91166212016-04-26 23:54:39 +0200321 u64 end, unsigned bits)
David Sterbae83b1d92015-12-03 14:08:11 +0100322{
323 int wake = 0;
324
325 if (bits & EXTENT_LOCKED)
326 wake = 1;
327
David Sterbaae0f1622017-10-31 16:37:52 +0100328 return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
David Sterbae83b1d92015-12-03 14:08:11 +0100329}
330
Qu Wenruod38ed272015-10-12 14:53:37 +0800331int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba2c53b912016-04-26 23:54:39 +0200332 unsigned bits, struct extent_changeset *changeset);
Chris Mason4845e442010-05-25 20:56:50 -0400333int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100334 unsigned bits, u64 *failed_start,
Chris Mason4845e442010-05-25 20:56:50 -0400335 struct extent_state **cached_state, gfp_t mask);
Nikolay Borisov4ca73652019-03-27 14:24:10 +0200336int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
337 unsigned bits);
David Sterbac6317952015-12-03 14:08:11 +0100338
339static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
David Sterbaceeb0ae2016-04-26 23:54:39 +0200340 u64 end, unsigned bits)
David Sterbac6317952015-12-03 14:08:11 +0100341{
David Sterbaceeb0ae2016-04-26 23:54:39 +0200342 return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100343}
344
David Sterbae83b1d92015-12-03 14:08:11 +0100345static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
David Sterbaf08dc362017-10-31 17:02:39 +0100346 u64 end, struct extent_state **cached_state)
David Sterbae83b1d92015-12-03 14:08:11 +0100347{
David Sterba66b0c882017-10-31 16:30:47 +0100348 return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
David Sterbaf08dc362017-10-31 17:02:39 +0100349 cached_state, GFP_NOFS, NULL);
David Sterbae83b1d92015-12-03 14:08:11 +0100350}
David Sterbac6317952015-12-03 14:08:11 +0100351
352static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
353 u64 end, gfp_t mask)
354{
355 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
356 NULL, mask);
357}
358
David Sterbae83b1d92015-12-03 14:08:11 +0100359static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
Filipe Manana0e6ec382018-11-16 13:04:44 +0000360 u64 end, struct extent_state **cached)
David Sterbae83b1d92015-12-03 14:08:11 +0100361{
362 return clear_extent_bit(tree, start, end,
363 EXTENT_DIRTY | EXTENT_DELALLOC |
Filipe Manana0e6ec382018-11-16 13:04:44 +0000364 EXTENT_DO_ACCOUNTING, 0, 0, cached);
David Sterbae83b1d92015-12-03 14:08:11 +0100365}
366
Josef Bacik462d6fa2011-09-26 13:56:12 -0400367int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100368 unsigned bits, unsigned clear_bits,
David Sterba210aa272016-04-26 23:54:39 +0200369 struct extent_state **cached_state);
David Sterbac6317952015-12-03 14:08:11 +0100370
371static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
Filipe Mananae3b8a482017-11-04 00:16:59 +0000372 u64 end, unsigned int extra_bits,
373 struct extent_state **cached_state)
David Sterbac6317952015-12-03 14:08:11 +0100374{
375 return set_extent_bit(tree, start, end,
Filipe Mananae3b8a482017-11-04 00:16:59 +0000376 EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
David Sterba7cd8c752016-04-26 23:54:39 +0200377 NULL, cached_state, GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100378}
379
380static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
David Sterba018ed4f2016-04-26 23:54:39 +0200381 u64 end, struct extent_state **cached_state)
David Sterbac6317952015-12-03 14:08:11 +0100382{
383 return set_extent_bit(tree, start, end,
384 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
David Sterba018ed4f2016-04-26 23:54:39 +0200385 NULL, cached_state, GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100386}
387
388static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
David Sterba3744dbe2016-04-26 23:54:39 +0200389 u64 end)
David Sterbac6317952015-12-03 14:08:11 +0100390{
David Sterba3744dbe2016-04-26 23:54:39 +0200391 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
392 GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100393}
394
395static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
396 u64 end, struct extent_state **cached_state, gfp_t mask)
397{
398 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
399 cached_state, mask);
400}
401
Chris Masond1310b22008-01-24 16:13:08 -0500402int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
David Sterba9ee49a042015-01-14 19:52:13 +0100403 u64 *start_ret, u64 *end_ret, unsigned bits,
Josef Bacike6138872012-09-27 17:07:30 -0400404 struct extent_state **cached_state);
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +0200405void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
406 u64 *start_ret, u64 *end_ret, unsigned bits);
Chris Masond1310b22008-01-24 16:13:08 -0500407int extent_invalidatepage(struct extent_io_tree *tree,
408 struct page *page, unsigned long offset);
Nikolay Borisov0a9b0e52017-12-08 15:55:59 +0200409int extent_write_full_page(struct page *page, struct writeback_control *wbc);
Nikolay Borisov5e3ee232017-12-08 15:55:58 +0200410int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
Chris Mason771ed682008-11-06 22:02:51 -0500411 int mode);
Nikolay Borisov8ae225a2018-04-19 10:46:38 +0300412int extent_writepages(struct address_space *mapping,
Chris Masond1310b22008-01-24 16:13:08 -0500413 struct writeback_control *wbc);
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400414int btree_write_cache_pages(struct address_space *mapping,
415 struct writeback_control *wbc);
Nikolay Borisov2a3ff0a2018-04-19 10:46:36 +0300416int extent_readpages(struct address_space *mapping, struct list_head *pages,
417 unsigned nr_pages);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -0500418int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
David Sterba2135fb92017-06-23 04:09:57 +0200419 __u64 start, __u64 len);
Chris Masond1310b22008-01-24 16:13:08 -0500420void set_page_extent_mapped(struct page *page);
421
Josef Bacikf28491e2013-12-16 13:24:27 -0500422struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
David Sterbace3e6982014-06-15 03:00:04 +0200423 u64 start);
Omar Sandoval0f331222015-09-29 20:50:31 -0700424struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
425 u64 start, unsigned long len);
David Sterba3f556f72014-06-15 03:20:26 +0200426struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -0400427 u64 start);
Jan Schmidt815a51c2012-05-16 17:00:02 +0200428struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
Josef Bacikf28491e2013-12-16 13:24:27 -0500429struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
Chandra Seetharaman452c75c2013-10-07 10:45:25 -0500430 u64 start);
Chris Masond1310b22008-01-24 16:13:08 -0500431void free_extent_buffer(struct extent_buffer *eb);
Josef Bacik3083ee22012-03-09 16:01:49 -0500432void free_extent_buffer_stale(struct extent_buffer *eb);
Arne Jansenbb82ab82011-06-10 14:06:53 +0200433#define WAIT_NONE 0
434#define WAIT_COMPLETE 1
435#define WAIT_PAGE_LOCK 2
Nikolay Borisovc2ccfbc2019-04-10 17:24:40 +0300436int read_extent_buffer_pages(struct extent_buffer *eb, int wait,
David Sterba6af49db2017-06-23 04:09:57 +0200437 int mirror_num);
Josef Bacikfd8b2b62013-04-24 16:41:19 -0400438void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
Robin Dong479ed9a2012-09-29 02:07:47 -0600439
David Sterbacc5e31a2018-03-01 18:20:27 +0100440static inline int num_extent_pages(const struct extent_buffer *eb)
Robin Dong479ed9a2012-09-29 02:07:47 -0600441{
David Sterba8791d432018-07-04 17:49:31 +0200442 return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
443 (eb->start >> PAGE_SHIFT);
Robin Dong479ed9a2012-09-29 02:07:47 -0600444}
445
Chris Masond1310b22008-01-24 16:13:08 -0500446static inline void extent_buffer_get(struct extent_buffer *eb)
447{
448 atomic_inc(&eb->refs);
449}
450
Anand Jainba020492018-02-13 12:35:44 +0800451static inline int extent_buffer_uptodate(struct extent_buffer *eb)
452{
453 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
454}
455
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600456int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
457 unsigned long start, unsigned long len);
458void read_extent_buffer(const struct extent_buffer *eb, void *dst,
Chris Masond1310b22008-01-24 16:13:08 -0500459 unsigned long start,
460 unsigned long len);
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600461int read_extent_buffer_to_user(const struct extent_buffer *eb,
462 void __user *dst, unsigned long start,
Gerhard Heift550ac1d2014-01-30 16:24:01 +0100463 unsigned long len);
David Sterbaf157bf72016-11-09 17:43:38 +0100464void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
465void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
466 const void *src);
Chris Masond1310b22008-01-24 16:13:08 -0500467void write_extent_buffer(struct extent_buffer *eb, const void *src,
468 unsigned long start, unsigned long len);
David Sterba58e80122016-11-08 18:30:31 +0100469void copy_extent_buffer_full(struct extent_buffer *dst,
470 struct extent_buffer *src);
Chris Masond1310b22008-01-24 16:13:08 -0500471void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
472 unsigned long dst_offset, unsigned long src_offset,
473 unsigned long len);
474void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
475 unsigned long src_offset, unsigned long len);
476void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
477 unsigned long src_offset, unsigned long len);
David Sterbab159fa22016-11-08 18:09:03 +0100478void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
479 unsigned long len);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -0700480int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
481 unsigned long pos);
482void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
483 unsigned long pos, unsigned long len);
484void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
485 unsigned long pos, unsigned long len);
Chris Mason1d4284b2012-03-28 20:31:37 -0400486void clear_extent_buffer_dirty(struct extent_buffer *eb);
Liu Boabb57ef2018-09-14 01:44:42 +0800487bool set_extent_buffer_dirty(struct extent_buffer *eb);
David Sterba09c25a82015-12-03 13:08:59 +0100488void set_extent_buffer_uptodate(struct extent_buffer *eb);
David Sterba69ba3922015-12-03 13:08:59 +0100489void clear_extent_buffer_uptodate(struct extent_buffer *eb);
Josef Bacika26e8c92014-03-28 17:07:27 -0400490int extent_buffer_under_io(struct extent_buffer *eb);
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600491int map_private_extent_buffer(const struct extent_buffer *eb,
492 unsigned long offset, unsigned long min_len,
493 char **map, unsigned long *map_start,
494 unsigned long *map_len);
David Sterbabd1fa4f2015-12-03 13:08:59 +0100495void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
David Sterbaf6311572015-12-03 13:08:59 +0100496void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
David Sterbaa9d93e12015-12-03 13:08:59 +0100497void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
Qu Wenruoba8b04c2016-07-19 16:50:36 +0800498 u64 delalloc_end, struct page *locked_page,
David Sterba9ee49a042015-01-14 19:52:13 +0100499 unsigned bits_to_clear,
Josef Bacikc2790a22013-07-29 11:20:47 -0400500 unsigned long page_ops);
David Sterbac821e7f32017-06-02 18:35:36 +0200501struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte);
David Sterbac5e4c3d2017-06-12 17:29:41 +0200502struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
David Sterba8b6c1d52017-06-02 17:48:13 +0200503struct bio *btrfs_bio_clone(struct bio *bio);
Liu Boe4770942017-05-16 10:57:14 -0700504struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +0200505
Stefan Behrens3ec706c2012-11-05 15:46:42 +0100506struct btrfs_fs_info;
Nikolay Borisov9d4f7f82017-02-20 13:50:55 +0200507struct btrfs_inode;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +0200508
Josef Bacik6ec656b2017-05-05 11:57:14 -0400509int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
510 u64 length, u64 logical, struct page *page,
511 unsigned int pg_offset, int mirror_num);
Josef Bacik7870d082017-05-05 11:57:15 -0400512int clean_io_failure(struct btrfs_fs_info *fs_info,
513 struct extent_io_tree *failure_tree,
514 struct extent_io_tree *io_tree, u64 start,
515 struct page *page, u64 ino, unsigned int pg_offset);
David Sterbab5227c02015-12-03 13:08:59 +0100516void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
David Sterba20a1fbf92019-03-20 11:23:44 +0100517int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num);
Miao Xie2fe63032014-09-12 18:43:59 +0800518
519/*
520 * When IO fails, either with EIO or csum verification fails, we
521 * try other mirrors that might have a good copy of the data. This
522 * io_failure_record is used to record state as we go through all the
523 * mirrors. If another mirror has good data, the page is set up to date
524 * and things continue. If a good mirror can't be found, the original
525 * bio end_io callback is called to indicate things have failed.
526 */
527struct io_failure_record {
528 struct page *page;
529 u64 start;
530 u64 len;
531 u64 logical;
532 unsigned long bio_flags;
533 int this_mirror;
534 int failed_mirror;
535 int in_validation;
536};
537
Nikolay Borisov4ac1f4a2017-02-20 13:50:52 +0200538
Nikolay Borisov7ab79562017-02-20 13:50:57 +0200539void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
540 u64 end);
Miao Xie2fe63032014-09-12 18:43:59 +0800541int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
542 struct io_failure_record **failrec_ret);
Ming Leia0b60d72017-12-18 20:22:11 +0800543bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
Liu Boc3cfb652017-07-13 15:00:50 -0700544 struct io_failure_record *failrec, int fail_mirror);
Miao Xie2fe63032014-09-12 18:43:59 +0800545struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
546 struct io_failure_record *failrec,
547 struct page *page, int pg_offset, int icsum,
Miao Xie8b110e32014-09-12 18:44:03 +0800548 bio_end_io_t *endio_func, void *data);
Josef Bacik7870d082017-05-05 11:57:15 -0400549int free_io_failure(struct extent_io_tree *failure_tree,
550 struct extent_io_tree *io_tree,
551 struct io_failure_record *rec);
Josef Bacik294e30f2013-10-09 12:00:56 -0400552#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
Lu Fengqi3522e902018-11-29 11:33:38 +0800553bool find_lock_delalloc_range(struct inode *inode, struct extent_io_tree *tree,
Johannes Thumshirnce9f9672018-11-19 10:38:17 +0100554 struct page *locked_page, u64 *start,
555 u64 *end);
Chris Mason0d4cf4e2014-10-07 13:24:20 -0700556#endif
Josef Bacikfaa2dbf2014-05-07 17:06:09 -0400557struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -0400558 u64 start);
David Sterba9888c342018-04-03 19:16:55 +0200559
Josef Bacik294e30f2013-10-09 12:00:56 -0400560#endif