blob: d5ff51b973a440f2997f3917fc642117903c4f5e [file] [log] [blame]
Chris Masond1310b22008-01-24 16:13:08 -05001#ifndef __EXTENTIO__
2#define __EXTENTIO__
3
4#include <linux/rbtree.h>
Qu Wenruoac467772015-10-12 12:08:16 +08005#include "ulist.h"
Chris Masond1310b22008-01-24 16:13:08 -05006
7/* bits for the extent state */
David Sterba9ee49a042015-01-14 19:52:13 +01008#define EXTENT_DIRTY (1U << 0)
9#define EXTENT_WRITEBACK (1U << 1)
10#define EXTENT_UPTODATE (1U << 2)
11#define EXTENT_LOCKED (1U << 3)
12#define EXTENT_NEW (1U << 4)
13#define EXTENT_DELALLOC (1U << 5)
14#define EXTENT_DEFRAG (1U << 6)
15#define EXTENT_BOUNDARY (1U << 9)
16#define EXTENT_NODATASUM (1U << 10)
Filipe Mananaa315e682017-03-06 23:04:20 +000017#define EXTENT_CLEAR_META_RESV (1U << 11)
David Sterba9ee49a042015-01-14 19:52:13 +010018#define EXTENT_FIRST_DELALLOC (1U << 12)
19#define EXTENT_NEED_WAIT (1U << 13)
20#define EXTENT_DAMAGED (1U << 14)
21#define EXTENT_NORESERVE (1U << 15)
Qu Wenruo52472552015-10-12 16:05:40 +080022#define EXTENT_QGROUP_RESERVED (1U << 16)
Wang Xiaoguang18513092016-07-25 15:51:40 +080023#define EXTENT_CLEAR_DATA_RESV (1U << 17)
Filipe Mananaa7e3b972017-04-03 10:45:46 +010024#define EXTENT_DELALLOC_NEW (1U << 18)
David Sterba9ee49a042015-01-14 19:52:13 +010025#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
Filipe Mananaa315e682017-03-06 23:04:20 +000026#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
27 EXTENT_CLEAR_DATA_RESV)
David Sterba9ee49a042015-01-14 19:52:13 +010028#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
Chris Masond1310b22008-01-24 16:13:08 -050029
Li Zefan261507a02010-12-17 14:21:50 +080030/*
31 * flags for bio submission. The high bits indicate the compression
32 * type for this bio
33 */
Chris Masonc8b97812008-10-29 14:49:59 -040034#define EXTENT_BIO_COMPRESSED 1
Josef Bacikde0022b2012-09-25 14:25:58 -040035#define EXTENT_BIO_TREE_LOG 2
Li Zefan261507a02010-12-17 14:21:50 +080036#define EXTENT_BIO_FLAG_SHIFT 16
Chris Masonc8b97812008-10-29 14:49:59 -040037
Chris Masonb4ce94d2009-02-04 09:25:08 -050038/* these are bit numbers for test/set bit */
39#define EXTENT_BUFFER_UPTODATE 0
Chris Masonb9473432009-03-13 11:00:37 -040040#define EXTENT_BUFFER_DIRTY 2
Josef Bacika826d6d2011-03-16 13:42:43 -040041#define EXTENT_BUFFER_CORRUPT 3
Arne Jansenab0fff02011-05-23 14:25:41 +020042#define EXTENT_BUFFER_READAHEAD 4 /* this got triggered by readahead */
Josef Bacik3083ee22012-03-09 16:01:49 -050043#define EXTENT_BUFFER_TREE_REF 5
44#define EXTENT_BUFFER_STALE 6
Josef Bacik0b32f4b2012-03-13 09:38:00 -040045#define EXTENT_BUFFER_WRITEBACK 7
Filipe Manana656f30d2014-09-26 12:25:56 +010046#define EXTENT_BUFFER_READ_ERR 8 /* read IO error */
Jan Schmidt815a51c2012-05-16 17:00:02 +020047#define EXTENT_BUFFER_DUMMY 9
Josef Bacik34b41ac2013-12-13 10:41:51 -050048#define EXTENT_BUFFER_IN_TREE 10
Filipe Manana656f30d2014-09-26 12:25:56 +010049#define EXTENT_BUFFER_WRITE_ERR 11 /* write IO error */
Chris Masonb4ce94d2009-02-04 09:25:08 -050050
Liu Boda2c7002017-02-10 16:41:05 +010051/* these are flags for __process_pages_contig */
Josef Bacikc2790a22013-07-29 11:20:47 -040052#define PAGE_UNLOCK (1 << 0)
53#define PAGE_CLEAR_DIRTY (1 << 1)
54#define PAGE_SET_WRITEBACK (1 << 2)
55#define PAGE_END_WRITEBACK (1 << 3)
56#define PAGE_SET_PRIVATE2 (1 << 4)
Filipe Manana704de492014-10-06 22:14:22 +010057#define PAGE_SET_ERROR (1 << 5)
Liu Boda2c7002017-02-10 16:41:05 +010058#define PAGE_LOCK (1 << 6)
Chris Masona791e352009-10-08 11:27:10 -040059
Chris Masond1310b22008-01-24 16:13:08 -050060/*
61 * page->private values. Every page that is controlled by the extent
62 * map has page->private set to one.
63 */
64#define EXTENT_PAGE_PRIVATE 1
Chris Masond1310b22008-01-24 16:13:08 -050065
Omar Sandoval2fe1d552016-09-22 17:24:20 -070066/*
67 * The extent buffer bitmap operations are done with byte granularity instead of
68 * word granularity for two reasons:
69 * 1. The bitmaps must be little-endian on disk.
70 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
71 * single word in a bitmap may straddle two pages in the extent buffer.
72 */
73#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
74#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
75#define BITMAP_FIRST_BYTE_MASK(start) \
76 ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
77#define BITMAP_LAST_BYTE_MASK(nbits) \
78 (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
79
80static inline int le_test_bit(int nr, const u8 *addr)
81{
82 return 1U & (addr[BIT_BYTE(nr)] >> (nr & (BITS_PER_BYTE-1)));
83}
84
85extern void le_bitmap_set(u8 *map, unsigned int start, int len);
86extern void le_bitmap_clear(u8 *map, unsigned int start, int len);
87
Chris Mason70dec802008-01-29 09:59:12 -050088struct extent_state;
Josef Bacikea466792012-03-26 21:57:36 -040089struct btrfs_root;
Nikolay Borisov6fc0ef62017-02-20 13:51:03 +020090struct btrfs_inode;
Miao Xiefacc8a222013-07-25 19:22:34 +080091struct btrfs_io_bio;
David Sterba47dc1962016-02-11 13:24:13 +010092struct io_failure_record;
Chris Mason70dec802008-01-29 09:59:12 -050093
Mike Christie81a75f672016-06-05 14:31:54 -050094typedef int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio,
95 int mirror_num, unsigned long bio_flags,
96 u64 bio_offset);
Chris Masond1310b22008-01-24 16:13:08 -050097struct extent_io_ops {
David Sterba4d53ddd2017-02-17 15:27:44 +010098 /*
99 * The following callbacks must be allways defined, the function
100 * pointer will be called unconditionally.
101 */
Chris Mason44b8bd72008-04-16 11:14:51 -0400102 extent_submit_bio_hook_t *submit_bio_hook;
Miao Xiefacc8a222013-07-25 19:22:34 +0800103 int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
104 struct page *page, u64 start, u64 end,
105 int mirror);
David Sterba4d53ddd2017-02-17 15:27:44 +0100106 int (*merge_bio_hook)(struct page *page, unsigned long offset,
107 size_t size, struct bio *bio,
108 unsigned long bio_flags);
David Sterba20a7db82017-02-17 16:24:29 +0100109 int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
David Sterba4d53ddd2017-02-17 15:27:44 +0100110
111 /*
112 * Optional hooks, called if the pointer is not NULL
113 */
114 int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
115 u64 start, u64 end, int *page_started,
116 unsigned long *nr_written);
David Sterba4d53ddd2017-02-17 15:27:44 +0100117
118 int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
David Sterbac3988d62017-02-17 15:18:32 +0100119 void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
Chris Masone6dcd2d2008-07-17 12:53:50 -0400120 struct extent_state *state, int uptodate);
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000121 void (*set_bit_hook)(struct inode *inode, struct extent_state *state,
David Sterba9ee49a042015-01-14 19:52:13 +0100122 unsigned *bits);
Nikolay Borisov6fc0ef62017-02-20 13:51:03 +0200123 void (*clear_bit_hook)(struct btrfs_inode *inode,
124 struct extent_state *state,
125 unsigned *bits);
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000126 void (*merge_extent_hook)(struct inode *inode,
127 struct extent_state *new,
128 struct extent_state *other);
129 void (*split_extent_hook)(struct inode *inode,
130 struct extent_state *orig, u64 split);
Chris Masond1310b22008-01-24 16:13:08 -0500131};
132
133struct extent_io_tree {
134 struct rb_root state;
135 struct address_space *mapping;
136 u64 dirty_bytes;
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400137 int track_uptodate;
Chris Mason70dec802008-01-29 09:59:12 -0500138 spinlock_t lock;
David Sterbae8c9f182015-01-02 18:23:10 +0100139 const struct extent_io_ops *ops;
Chris Masond1310b22008-01-24 16:13:08 -0500140};
141
142struct extent_state {
143 u64 start;
144 u64 end; /* inclusive */
Chris Masond1310b22008-01-24 16:13:08 -0500145 struct rb_node rb_node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400146
147 /* ADD NEW ELEMENTS AFTER THIS */
Chris Masond1310b22008-01-24 16:13:08 -0500148 wait_queue_head_t wq;
149 atomic_t refs;
David Sterba9ee49a042015-01-14 19:52:13 +0100150 unsigned state;
Chris Masond1310b22008-01-24 16:13:08 -0500151
David Sterba47dc1962016-02-11 13:24:13 +0100152 struct io_failure_record *failrec;
Chris Masond1310b22008-01-24 16:13:08 -0500153
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000154#ifdef CONFIG_BTRFS_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400155 struct list_head leak_list;
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000156#endif
Chris Masond1310b22008-01-24 16:13:08 -0500157};
158
Chris Mason727011e2010-08-06 13:21:20 -0400159#define INLINE_EXTENT_BUFFER_PAGES 16
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300160#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
Chris Masond1310b22008-01-24 16:13:08 -0500161struct extent_buffer {
162 u64 start;
163 unsigned long len;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500164 unsigned long bflags;
Josef Bacikf28491e2013-12-16 13:24:27 -0500165 struct btrfs_fs_info *fs_info;
Josef Bacik3083ee22012-03-09 16:01:49 -0500166 spinlock_t refs_lock;
Chris Mason727011e2010-08-06 13:21:20 -0400167 atomic_t refs;
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400168 atomic_t io_pages;
Josef Bacik5cf1ab52012-04-16 09:42:26 -0400169 int read_mirror;
Miao Xie19fe0a82010-10-26 20:57:29 -0400170 struct rcu_head rcu_head;
Arne Jansen5b25f702011-09-13 10:55:48 +0200171 pid_t lock_owner;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500172
Chris Masonbd681512011-07-16 15:23:14 -0400173 /* count of read lock holders on the extent buffer */
174 atomic_t write_locks;
175 atomic_t read_locks;
176 atomic_t blocking_writers;
177 atomic_t blocking_readers;
178 atomic_t spinning_readers;
179 atomic_t spinning_writers;
Filipe Manana656f30d2014-09-26 12:25:56 +0100180 short lock_nested;
181 /* >= 0 if eb belongs to a log tree, -1 otherwise */
182 short log_index;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500183
Chris Masonbd681512011-07-16 15:23:14 -0400184 /* protects write locks */
185 rwlock_t lock;
186
187 /* readers use lock_wq while they wait for the write
188 * lock holders to unlock
Chris Masonb4ce94d2009-02-04 09:25:08 -0500189 */
Chris Masonbd681512011-07-16 15:23:14 -0400190 wait_queue_head_t write_lock_wq;
191
192 /* writers use read_lock_wq while they wait for readers
193 * to unlock
194 */
195 wait_queue_head_t read_lock_wq;
David Sterbab8dae312013-02-28 14:54:18 +0000196 struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000197#ifdef CONFIG_BTRFS_DEBUG
198 struct list_head leak_list;
199#endif
Chris Masond1310b22008-01-24 16:13:08 -0500200};
201
Qu Wenruoac467772015-10-12 12:08:16 +0800202/*
203 * Structure to record how many bytes and which ranges are set/cleared
204 */
205struct extent_changeset {
206 /* How many bytes are set/cleared in this operation */
207 u64 bytes_changed;
208
209 /* Changed ranges */
David Sterba53d32352017-02-13 13:42:29 +0100210 struct ulist range_changed;
Qu Wenruoac467772015-10-12 12:08:16 +0800211};
212
Li Zefan261507a02010-12-17 14:21:50 +0800213static inline void extent_set_compress_type(unsigned long *bio_flags,
214 int compress_type)
215{
216 *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
217}
218
219static inline int extent_compress_type(unsigned long bio_flags)
220{
221 return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
222}
223
Chris Masond1310b22008-01-24 16:13:08 -0500224struct extent_map_tree;
225
Nikolay Borisovfc4f21b12017-02-20 13:51:06 +0200226typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
Chris Masond1310b22008-01-24 16:13:08 -0500227 struct page *page,
David Sterba306e16c2011-04-19 14:29:38 +0200228 size_t pg_offset,
Chris Masond1310b22008-01-24 16:13:08 -0500229 u64 start, u64 len,
230 int create);
231
232void extent_io_tree_init(struct extent_io_tree *tree,
David Sterbaf993c882011-04-20 23:35:57 +0200233 struct address_space *mapping);
Chris Masond1310b22008-01-24 16:13:08 -0500234int try_release_extent_mapping(struct extent_map_tree *map,
Chris Mason70dec802008-01-29 09:59:12 -0500235 struct extent_io_tree *tree, struct page *page,
236 gfp_t mask);
David Sterbaf7a52a42013-04-26 14:56:29 +0000237int try_release_extent_buffer(struct page *page);
Chris Mason1edbb732009-09-02 13:24:36 -0400238int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaff13db42015-12-03 14:30:40 +0100239 struct extent_state **cached);
David Sterbacd716d82015-12-03 14:41:30 +0100240
241static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
242{
243 return lock_extent_bits(tree, start, end, NULL);
244}
245
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100246int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
Chris Masond1310b22008-01-24 16:13:08 -0500247int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
Jan Schmidt8ddc7d92011-06-13 20:02:58 +0200248 get_extent_t *get_extent, int mirror_num);
Chris Masond1310b22008-01-24 16:13:08 -0500249int __init extent_io_init(void);
250void extent_io_exit(void);
251
252u64 count_range_bits(struct extent_io_tree *tree,
253 u64 *start, u64 search_end,
David Sterba9ee49a042015-01-14 19:52:13 +0100254 u64 max_bytes, unsigned bits, int contig);
Chris Masond1310b22008-01-24 16:13:08 -0500255
Chris Mason4845e442010-05-25 20:56:50 -0400256void free_extent_state(struct extent_state *state);
Chris Masond1310b22008-01-24 16:13:08 -0500257int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100258 unsigned bits, int filled,
David Sterba41074882013-04-29 13:38:46 +0000259 struct extent_state *cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +0800260int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaf734c442016-04-26 23:54:39 +0200261 unsigned bits, struct extent_changeset *changeset);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400262int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100263 unsigned bits, int wake, int delete,
David Sterba41074882013-04-29 13:38:46 +0000264 struct extent_state **cached, gfp_t mask);
David Sterbac6317952015-12-03 14:08:11 +0100265
David Sterbae83b1d92015-12-03 14:08:11 +0100266static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
267{
268 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
269 GFP_NOFS);
270}
271
272static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
273 u64 end, struct extent_state **cached, gfp_t mask)
274{
275 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
276 mask);
277}
278
279static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
David Sterba91166212016-04-26 23:54:39 +0200280 u64 end, unsigned bits)
David Sterbae83b1d92015-12-03 14:08:11 +0100281{
282 int wake = 0;
283
284 if (bits & EXTENT_LOCKED)
285 wake = 1;
286
David Sterba91166212016-04-26 23:54:39 +0200287 return clear_extent_bit(tree, start, end, bits, wake, 0, NULL,
288 GFP_NOFS);
David Sterbae83b1d92015-12-03 14:08:11 +0100289}
290
Qu Wenruod38ed272015-10-12 14:53:37 +0800291int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba2c53b912016-04-26 23:54:39 +0200292 unsigned bits, struct extent_changeset *changeset);
Chris Mason4845e442010-05-25 20:56:50 -0400293int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100294 unsigned bits, u64 *failed_start,
Chris Mason4845e442010-05-25 20:56:50 -0400295 struct extent_state **cached_state, gfp_t mask);
David Sterbac6317952015-12-03 14:08:11 +0100296
297static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
David Sterbaceeb0ae2016-04-26 23:54:39 +0200298 u64 end, unsigned bits)
David Sterbac6317952015-12-03 14:08:11 +0100299{
David Sterbaceeb0ae2016-04-26 23:54:39 +0200300 return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100301}
302
David Sterbae83b1d92015-12-03 14:08:11 +0100303static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
304 u64 end, struct extent_state **cached_state, gfp_t mask)
305{
306 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
307 cached_state, mask);
308}
David Sterbac6317952015-12-03 14:08:11 +0100309
310static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
311 u64 end, gfp_t mask)
312{
313 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
314 NULL, mask);
315}
316
David Sterbae83b1d92015-12-03 14:08:11 +0100317static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
David Sterbaaf6f8f62016-04-26 23:54:39 +0200318 u64 end)
David Sterbae83b1d92015-12-03 14:08:11 +0100319{
320 return clear_extent_bit(tree, start, end,
321 EXTENT_DIRTY | EXTENT_DELALLOC |
David Sterbaaf6f8f62016-04-26 23:54:39 +0200322 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
David Sterbae83b1d92015-12-03 14:08:11 +0100323}
324
Josef Bacik462d6fa2011-09-26 13:56:12 -0400325int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100326 unsigned bits, unsigned clear_bits,
David Sterba210aa272016-04-26 23:54:39 +0200327 struct extent_state **cached_state);
David Sterbac6317952015-12-03 14:08:11 +0100328
329static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
David Sterba7cd8c752016-04-26 23:54:39 +0200330 u64 end, struct extent_state **cached_state)
David Sterbac6317952015-12-03 14:08:11 +0100331{
332 return set_extent_bit(tree, start, end,
333 EXTENT_DELALLOC | EXTENT_UPTODATE,
David Sterba7cd8c752016-04-26 23:54:39 +0200334 NULL, cached_state, GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100335}
336
337static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
David Sterba018ed4f2016-04-26 23:54:39 +0200338 u64 end, struct extent_state **cached_state)
David Sterbac6317952015-12-03 14:08:11 +0100339{
340 return set_extent_bit(tree, start, end,
341 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
David Sterba018ed4f2016-04-26 23:54:39 +0200342 NULL, cached_state, GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100343}
344
345static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
David Sterba3744dbe2016-04-26 23:54:39 +0200346 u64 end)
David Sterbac6317952015-12-03 14:08:11 +0100347{
David Sterba3744dbe2016-04-26 23:54:39 +0200348 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
349 GFP_NOFS);
David Sterbac6317952015-12-03 14:08:11 +0100350}
351
352static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
353 u64 end, struct extent_state **cached_state, gfp_t mask)
354{
355 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
356 cached_state, mask);
357}
358
Chris Masond1310b22008-01-24 16:13:08 -0500359int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
David Sterba9ee49a042015-01-14 19:52:13 +0100360 u64 *start_ret, u64 *end_ret, unsigned bits,
Josef Bacike6138872012-09-27 17:07:30 -0400361 struct extent_state **cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500362int extent_invalidatepage(struct extent_io_tree *tree,
363 struct page *page, unsigned long offset);
364int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
365 get_extent_t *get_extent,
366 struct writeback_control *wbc);
Chris Mason771ed682008-11-06 22:02:51 -0500367int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
368 u64 start, u64 end, get_extent_t *get_extent,
369 int mode);
Chris Masond1310b22008-01-24 16:13:08 -0500370int extent_writepages(struct extent_io_tree *tree,
371 struct address_space *mapping,
372 get_extent_t *get_extent,
373 struct writeback_control *wbc);
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400374int btree_write_cache_pages(struct address_space *mapping,
375 struct writeback_control *wbc);
Chris Masond1310b22008-01-24 16:13:08 -0500376int extent_readpages(struct extent_io_tree *tree,
377 struct address_space *mapping,
378 struct list_head *pages, unsigned nr_pages,
379 get_extent_t get_extent);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -0500380int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
381 __u64 start, __u64 len, get_extent_t *get_extent);
Chris Masond1310b22008-01-24 16:13:08 -0500382void set_page_extent_mapped(struct page *page);
383
Josef Bacikf28491e2013-12-16 13:24:27 -0500384struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
David Sterbace3e6982014-06-15 03:00:04 +0200385 u64 start);
Omar Sandoval0f331222015-09-29 20:50:31 -0700386struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
387 u64 start, unsigned long len);
David Sterba3f556f72014-06-15 03:20:26 +0200388struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -0400389 u64 start);
Jan Schmidt815a51c2012-05-16 17:00:02 +0200390struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
Josef Bacikf28491e2013-12-16 13:24:27 -0500391struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
Chandra Seetharaman452c75c2013-10-07 10:45:25 -0500392 u64 start);
Chris Masond1310b22008-01-24 16:13:08 -0500393void free_extent_buffer(struct extent_buffer *eb);
Josef Bacik3083ee22012-03-09 16:01:49 -0500394void free_extent_buffer_stale(struct extent_buffer *eb);
Arne Jansenbb82ab82011-06-10 14:06:53 +0200395#define WAIT_NONE 0
396#define WAIT_COMPLETE 1
397#define WAIT_PAGE_LOCK 2
Chris Masond1310b22008-01-24 16:13:08 -0500398int read_extent_buffer_pages(struct extent_io_tree *tree,
Josef Bacik8436ea912016-09-02 15:40:03 -0400399 struct extent_buffer *eb, int wait,
Chris Masonf1885912008-04-09 16:28:12 -0400400 get_extent_t *get_extent, int mirror_num);
Josef Bacikfd8b2b62013-04-24 16:41:19 -0400401void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
Robin Dong479ed9a2012-09-29 02:07:47 -0600402
403static inline unsigned long num_extent_pages(u64 start, u64 len)
404{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300405 return ((start + len + PAGE_SIZE - 1) >> PAGE_SHIFT) -
406 (start >> PAGE_SHIFT);
Robin Dong479ed9a2012-09-29 02:07:47 -0600407}
408
Chris Masond1310b22008-01-24 16:13:08 -0500409static inline void extent_buffer_get(struct extent_buffer *eb)
410{
411 atomic_inc(&eb->refs);
412}
413
414int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
415 unsigned long start,
416 unsigned long len);
417void read_extent_buffer(struct extent_buffer *eb, void *dst,
418 unsigned long start,
419 unsigned long len);
Gerhard Heift550ac1d2014-01-30 16:24:01 +0100420int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dst,
421 unsigned long start,
422 unsigned long len);
David Sterbaf157bf72016-11-09 17:43:38 +0100423void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
424void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
425 const void *src);
Chris Masond1310b22008-01-24 16:13:08 -0500426void write_extent_buffer(struct extent_buffer *eb, const void *src,
427 unsigned long start, unsigned long len);
David Sterba58e80122016-11-08 18:30:31 +0100428void copy_extent_buffer_full(struct extent_buffer *dst,
429 struct extent_buffer *src);
Chris Masond1310b22008-01-24 16:13:08 -0500430void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
431 unsigned long dst_offset, unsigned long src_offset,
432 unsigned long len);
433void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
434 unsigned long src_offset, unsigned long len);
435void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
436 unsigned long src_offset, unsigned long len);
David Sterbab159fa22016-11-08 18:09:03 +0100437void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
438 unsigned long len);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -0700439int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
440 unsigned long pos);
441void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
442 unsigned long pos, unsigned long len);
443void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
444 unsigned long pos, unsigned long len);
Chris Mason1d4284b2012-03-28 20:31:37 -0400445void clear_extent_buffer_dirty(struct extent_buffer *eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400446int set_extent_buffer_dirty(struct extent_buffer *eb);
David Sterba09c25a82015-12-03 13:08:59 +0100447void set_extent_buffer_uptodate(struct extent_buffer *eb);
David Sterba69ba3922015-12-03 13:08:59 +0100448void clear_extent_buffer_uptodate(struct extent_buffer *eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400449int extent_buffer_uptodate(struct extent_buffer *eb);
Josef Bacika26e8c92014-03-28 17:07:27 -0400450int extent_buffer_under_io(struct extent_buffer *eb);
Chris Masond1310b22008-01-24 16:13:08 -0500451int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
Chris Masona6591712011-07-19 12:04:14 -0400452 unsigned long min_len, char **map,
Chris Masond1310b22008-01-24 16:13:08 -0500453 unsigned long *map_start,
Chris Masona6591712011-07-19 12:04:14 -0400454 unsigned long *map_len);
David Sterbabd1fa4f2015-12-03 13:08:59 +0100455void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
David Sterbaf6311572015-12-03 13:08:59 +0100456void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
David Sterbaa9d93e12015-12-03 13:08:59 +0100457void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
Qu Wenruoba8b04c2016-07-19 16:50:36 +0800458 u64 delalloc_end, struct page *locked_page,
David Sterba9ee49a042015-01-14 19:52:13 +0100459 unsigned bits_to_clear,
Josef Bacikc2790a22013-07-29 11:20:47 -0400460 unsigned long page_ops);
Miao Xie88f794e2010-11-22 03:02:55 +0000461struct bio *
462btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
463 gfp_t gfp_flags);
Chris Mason9be33952013-05-17 18:30:14 -0400464struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs);
465struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +0200466
Stefan Behrens3ec706c2012-11-05 15:46:42 +0100467struct btrfs_fs_info;
Nikolay Borisov9d4f7f82017-02-20 13:50:55 +0200468struct btrfs_inode;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +0200469
Nikolay Borisov9d4f7f82017-02-20 13:50:55 +0200470int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length,
471 u64 logical, struct page *page,
472 unsigned int pg_offset, int mirror_num);
Nikolay Borisovb30cb442017-02-20 13:50:56 +0200473int clean_io_failure(struct btrfs_inode *inode, u64 start,
474 struct page *page, unsigned int pg_offset);
David Sterbab5227c02015-12-03 13:08:59 +0100475void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400476int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
477 struct extent_buffer *eb, int mirror_num);
Miao Xie2fe63032014-09-12 18:43:59 +0800478
479/*
480 * When IO fails, either with EIO or csum verification fails, we
481 * try other mirrors that might have a good copy of the data. This
482 * io_failure_record is used to record state as we go through all the
483 * mirrors. If another mirror has good data, the page is set up to date
484 * and things continue. If a good mirror can't be found, the original
485 * bio end_io callback is called to indicate things have failed.
486 */
487struct io_failure_record {
488 struct page *page;
489 u64 start;
490 u64 len;
491 u64 logical;
492 unsigned long bio_flags;
493 int this_mirror;
494 int failed_mirror;
495 int in_validation;
496};
497
Nikolay Borisov4ac1f4a2017-02-20 13:50:52 +0200498
Nikolay Borisov7ab79562017-02-20 13:50:57 +0200499void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
500 u64 end);
Miao Xie2fe63032014-09-12 18:43:59 +0800501int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
502 struct io_failure_record **failrec_ret);
503int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
504 struct io_failure_record *failrec, int fail_mirror);
505struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
506 struct io_failure_record *failrec,
507 struct page *page, int pg_offset, int icsum,
Miao Xie8b110e32014-09-12 18:44:03 +0800508 bio_end_io_t *endio_func, void *data);
Nikolay Borisov4ac1f4a2017-02-20 13:50:52 +0200509int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec);
Josef Bacik294e30f2013-10-09 12:00:56 -0400510#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
511noinline u64 find_lock_delalloc_range(struct inode *inode,
512 struct extent_io_tree *tree,
513 struct page *locked_page, u64 *start,
514 u64 *end, u64 max_bytes);
Chris Mason0d4cf4e2014-10-07 13:24:20 -0700515#endif
Josef Bacikfaa2dbf2014-05-07 17:06:09 -0400516struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -0400517 u64 start);
Josef Bacik294e30f2013-10-09 12:00:56 -0400518#endif