blob: 079489892e467b854f601983fefb49c8647442eb [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Chris Masond1310b22008-01-24 16:13:08 -05002#include <linux/bitops.h>
3#include <linux/slab.h>
4#include <linux/bio.h>
5#include <linux/mm.h>
Chris Masond1310b22008-01-24 16:13:08 -05006#include <linux/pagemap.h>
7#include <linux/page-flags.h>
Chris Masond1310b22008-01-24 16:13:08 -05008#include <linux/spinlock.h>
9#include <linux/blkdev.h>
10#include <linux/swap.h>
Chris Masond1310b22008-01-24 16:13:08 -050011#include <linux/writeback.h>
12#include <linux/pagevec.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070013#include <linux/prefetch.h>
Dan Magenheimer90a887c2011-05-26 10:01:56 -060014#include <linux/cleancache.h>
Chris Masond1310b22008-01-24 16:13:08 -050015#include "extent_io.h"
16#include "extent_map.h"
David Woodhouse902b22f2008-08-20 08:51:49 -040017#include "ctree.h"
18#include "btrfs_inode.h"
Jan Schmidt4a54c8c2011-07-22 15:41:52 +020019#include "volumes.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010020#include "check-integrity.h"
Josef Bacik0b32f4b2012-03-13 09:38:00 -040021#include "locking.h"
Josef Bacik606686e2012-06-04 14:03:51 -040022#include "rcu-string.h"
Liu Bofe09e162013-09-22 12:54:23 +080023#include "backref.h"
David Sterba6af49db2017-06-23 04:09:57 +020024#include "disk-io.h"
Chris Masond1310b22008-01-24 16:13:08 -050025
Chris Masond1310b22008-01-24 16:13:08 -050026static struct kmem_cache *extent_state_cache;
27static struct kmem_cache *extent_buffer_cache;
Chris Mason9be33952013-05-17 18:30:14 -040028static struct bio_set *btrfs_bioset;
Chris Masond1310b22008-01-24 16:13:08 -050029
Filipe Manana27a35072014-07-06 20:09:59 +010030static inline bool extent_state_in_tree(const struct extent_state *state)
31{
32 return !RB_EMPTY_NODE(&state->rb_node);
33}
34
Eric Sandeen6d49ba12013-04-22 16:12:31 +000035#ifdef CONFIG_BTRFS_DEBUG
Chris Masond1310b22008-01-24 16:13:08 -050036static LIST_HEAD(buffers);
37static LIST_HEAD(states);
Chris Mason4bef0842008-09-08 11:18:08 -040038
Chris Masond3977122009-01-05 21:25:51 -050039static DEFINE_SPINLOCK(leak_lock);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000040
41static inline
42void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
43{
44 unsigned long flags;
45
46 spin_lock_irqsave(&leak_lock, flags);
47 list_add(new, head);
48 spin_unlock_irqrestore(&leak_lock, flags);
49}
50
51static inline
52void btrfs_leak_debug_del(struct list_head *entry)
53{
54 unsigned long flags;
55
56 spin_lock_irqsave(&leak_lock, flags);
57 list_del(entry);
58 spin_unlock_irqrestore(&leak_lock, flags);
59}
60
61static inline
62void btrfs_leak_debug_check(void)
63{
64 struct extent_state *state;
65 struct extent_buffer *eb;
66
67 while (!list_empty(&states)) {
68 state = list_entry(states.next, struct extent_state, leak_list);
David Sterba9ee49a042015-01-14 19:52:13 +010069 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
Filipe Manana27a35072014-07-06 20:09:59 +010070 state->start, state->end, state->state,
71 extent_state_in_tree(state),
Elena Reshetovab7ac31b2017-03-03 10:55:19 +020072 refcount_read(&state->refs));
Eric Sandeen6d49ba12013-04-22 16:12:31 +000073 list_del(&state->leak_list);
74 kmem_cache_free(extent_state_cache, state);
75 }
76
77 while (!list_empty(&buffers)) {
78 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
Liu Boaf2679e2018-01-25 11:02:48 -070079 pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n",
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000081 list_del(&eb->leak_list);
82 kmem_cache_free(extent_buffer_cache, eb);
83 }
84}
David Sterba8d599ae2013-04-30 15:22:23 +000085
Josef Bacika5dee372013-12-13 10:02:44 -050086#define btrfs_debug_check_extent_io_range(tree, start, end) \
87 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
David Sterba8d599ae2013-04-30 15:22:23 +000088static inline void __btrfs_debug_check_extent_io_range(const char *caller,
Josef Bacika5dee372013-12-13 10:02:44 -050089 struct extent_io_tree *tree, u64 start, u64 end)
David Sterba8d599ae2013-04-30 15:22:23 +000090{
Josef Bacikc6100a42017-05-05 11:57:13 -040091 if (tree->ops && tree->ops->check_extent_io_range)
92 tree->ops->check_extent_io_range(tree->private_data, caller,
93 start, end);
David Sterba8d599ae2013-04-30 15:22:23 +000094}
Eric Sandeen6d49ba12013-04-22 16:12:31 +000095#else
96#define btrfs_leak_debug_add(new, head) do {} while (0)
97#define btrfs_leak_debug_del(entry) do {} while (0)
98#define btrfs_leak_debug_check() do {} while (0)
David Sterba8d599ae2013-04-30 15:22:23 +000099#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
Chris Mason4bef0842008-09-08 11:18:08 -0400100#endif
Chris Masond1310b22008-01-24 16:13:08 -0500101
Chris Masond1310b22008-01-24 16:13:08 -0500102#define BUFFER_LRU_MAX 64
103
104struct tree_entry {
105 u64 start;
106 u64 end;
Chris Masond1310b22008-01-24 16:13:08 -0500107 struct rb_node rb_node;
108};
109
110struct extent_page_data {
111 struct bio *bio;
112 struct extent_io_tree *tree;
Chris Mason771ed682008-11-06 22:02:51 -0500113 /* tells writepage not to lock the state bits for this range
114 * it still does the unlocking
115 */
Chris Masonffbd5172009-04-20 15:50:09 -0400116 unsigned int extent_locked:1;
117
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600118 /* tells the submit_bio code to use REQ_SYNC */
Chris Masonffbd5172009-04-20 15:50:09 -0400119 unsigned int sync_io:1;
Chris Masond1310b22008-01-24 16:13:08 -0500120};
121
Qu Wenruod38ed272015-10-12 14:53:37 +0800122static void add_extent_changeset(struct extent_state *state, unsigned bits,
123 struct extent_changeset *changeset,
124 int set)
125{
126 int ret;
127
128 if (!changeset)
129 return;
130 if (set && (state->state & bits) == bits)
131 return;
Qu Wenruofefdc552015-10-12 15:35:38 +0800132 if (!set && (state->state & bits) == 0)
133 return;
Qu Wenruod38ed272015-10-12 14:53:37 +0800134 changeset->bytes_changed += state->end - state->start + 1;
David Sterba53d32352017-02-13 13:42:29 +0100135 ret = ulist_add(&changeset->range_changed, state->start, state->end,
Qu Wenruod38ed272015-10-12 14:53:37 +0800136 GFP_ATOMIC);
137 /* ENOMEM */
138 BUG_ON(ret < 0);
139}
140
David Sterbaaab6e9e2017-11-30 18:00:02 +0100141static void flush_write_bio(struct extent_page_data *epd);
David Sterbae2932ee2017-06-23 04:16:17 +0200142
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400143static inline struct btrfs_fs_info *
144tree_fs_info(struct extent_io_tree *tree)
145{
Josef Bacikc6100a42017-05-05 11:57:13 -0400146 if (tree->ops)
147 return tree->ops->tree_fs_info(tree->private_data);
148 return NULL;
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400149}
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400150
Chris Masond1310b22008-01-24 16:13:08 -0500151int __init extent_io_init(void)
152{
David Sterba837e1972012-09-07 03:00:48 -0600153 extent_state_cache = kmem_cache_create("btrfs_extent_state",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +0200154 sizeof(struct extent_state), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300155 SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500156 if (!extent_state_cache)
157 return -ENOMEM;
158
David Sterba837e1972012-09-07 03:00:48 -0600159 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +0200160 sizeof(struct extent_buffer), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300161 SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500162 if (!extent_buffer_cache)
163 goto free_state_cache;
Chris Mason9be33952013-05-17 18:30:14 -0400164
165 btrfs_bioset = bioset_create(BIO_POOL_SIZE,
NeilBrown011067b2017-06-18 14:38:57 +1000166 offsetof(struct btrfs_io_bio, bio),
167 BIOSET_NEED_BVECS);
Chris Mason9be33952013-05-17 18:30:14 -0400168 if (!btrfs_bioset)
169 goto free_buffer_cache;
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700170
171 if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
172 goto free_bioset;
173
Chris Masond1310b22008-01-24 16:13:08 -0500174 return 0;
175
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700176free_bioset:
177 bioset_free(btrfs_bioset);
178 btrfs_bioset = NULL;
179
Chris Mason9be33952013-05-17 18:30:14 -0400180free_buffer_cache:
181 kmem_cache_destroy(extent_buffer_cache);
182 extent_buffer_cache = NULL;
183
Chris Masond1310b22008-01-24 16:13:08 -0500184free_state_cache:
185 kmem_cache_destroy(extent_state_cache);
Chris Mason9be33952013-05-17 18:30:14 -0400186 extent_state_cache = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500187 return -ENOMEM;
188}
189
David Sterbae67c7182018-02-19 17:24:18 +0100190void __cold extent_io_exit(void)
Chris Masond1310b22008-01-24 16:13:08 -0500191{
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000192 btrfs_leak_debug_check();
Kirill A. Shutemov8c0a8532012-09-26 11:33:07 +1000193
194 /*
195 * Make sure all delayed rcu free are flushed before we
196 * destroy caches.
197 */
198 rcu_barrier();
Kinglong Mee5598e902016-01-29 21:36:35 +0800199 kmem_cache_destroy(extent_state_cache);
200 kmem_cache_destroy(extent_buffer_cache);
Chris Mason9be33952013-05-17 18:30:14 -0400201 if (btrfs_bioset)
202 bioset_free(btrfs_bioset);
Chris Masond1310b22008-01-24 16:13:08 -0500203}
204
205void extent_io_tree_init(struct extent_io_tree *tree,
Josef Bacikc6100a42017-05-05 11:57:13 -0400206 void *private_data)
Chris Masond1310b22008-01-24 16:13:08 -0500207{
Eric Paris6bef4d32010-02-23 19:43:04 +0000208 tree->state = RB_ROOT;
Chris Masond1310b22008-01-24 16:13:08 -0500209 tree->ops = NULL;
210 tree->dirty_bytes = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500211 spin_lock_init(&tree->lock);
Josef Bacikc6100a42017-05-05 11:57:13 -0400212 tree->private_data = private_data;
Chris Masond1310b22008-01-24 16:13:08 -0500213}
Chris Masond1310b22008-01-24 16:13:08 -0500214
Christoph Hellwigb2950862008-12-02 09:54:17 -0500215static struct extent_state *alloc_extent_state(gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500216{
217 struct extent_state *state;
Chris Masond1310b22008-01-24 16:13:08 -0500218
Michal Hocko3ba7ab22017-01-09 15:39:02 +0100219 /*
220 * The given mask might be not appropriate for the slab allocator,
221 * drop the unsupported bits
222 */
223 mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
Chris Masond1310b22008-01-24 16:13:08 -0500224 state = kmem_cache_alloc(extent_state_cache, mask);
Peter2b114d12008-04-01 11:21:40 -0400225 if (!state)
Chris Masond1310b22008-01-24 16:13:08 -0500226 return state;
227 state->state = 0;
David Sterba47dc1962016-02-11 13:24:13 +0100228 state->failrec = NULL;
Filipe Manana27a35072014-07-06 20:09:59 +0100229 RB_CLEAR_NODE(&state->rb_node);
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000230 btrfs_leak_debug_add(&state->leak_list, &states);
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200231 refcount_set(&state->refs, 1);
Chris Masond1310b22008-01-24 16:13:08 -0500232 init_waitqueue_head(&state->wq);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100233 trace_alloc_extent_state(state, mask, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500234 return state;
235}
Chris Masond1310b22008-01-24 16:13:08 -0500236
Chris Mason4845e442010-05-25 20:56:50 -0400237void free_extent_state(struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500238{
Chris Masond1310b22008-01-24 16:13:08 -0500239 if (!state)
240 return;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200241 if (refcount_dec_and_test(&state->refs)) {
Filipe Manana27a35072014-07-06 20:09:59 +0100242 WARN_ON(extent_state_in_tree(state));
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000243 btrfs_leak_debug_del(&state->leak_list);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100244 trace_free_extent_state(state, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500245 kmem_cache_free(extent_state_cache, state);
246 }
247}
Chris Masond1310b22008-01-24 16:13:08 -0500248
Filipe Mananaf2071b22014-02-12 15:05:53 +0000249static struct rb_node *tree_insert(struct rb_root *root,
250 struct rb_node *search_start,
251 u64 offset,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000252 struct rb_node *node,
253 struct rb_node ***p_in,
254 struct rb_node **parent_in)
Chris Masond1310b22008-01-24 16:13:08 -0500255{
Filipe Mananaf2071b22014-02-12 15:05:53 +0000256 struct rb_node **p;
Chris Masond3977122009-01-05 21:25:51 -0500257 struct rb_node *parent = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500258 struct tree_entry *entry;
259
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000260 if (p_in && parent_in) {
261 p = *p_in;
262 parent = *parent_in;
263 goto do_insert;
264 }
265
Filipe Mananaf2071b22014-02-12 15:05:53 +0000266 p = search_start ? &search_start : &root->rb_node;
Chris Masond3977122009-01-05 21:25:51 -0500267 while (*p) {
Chris Masond1310b22008-01-24 16:13:08 -0500268 parent = *p;
269 entry = rb_entry(parent, struct tree_entry, rb_node);
270
271 if (offset < entry->start)
272 p = &(*p)->rb_left;
273 else if (offset > entry->end)
274 p = &(*p)->rb_right;
275 else
276 return parent;
277 }
278
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000279do_insert:
Chris Masond1310b22008-01-24 16:13:08 -0500280 rb_link_node(node, parent, p);
281 rb_insert_color(node, root);
282 return NULL;
283}
284
Chris Mason80ea96b2008-02-01 14:51:59 -0500285static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000286 struct rb_node **prev_ret,
287 struct rb_node **next_ret,
288 struct rb_node ***p_ret,
289 struct rb_node **parent_ret)
Chris Masond1310b22008-01-24 16:13:08 -0500290{
Chris Mason80ea96b2008-02-01 14:51:59 -0500291 struct rb_root *root = &tree->state;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000292 struct rb_node **n = &root->rb_node;
Chris Masond1310b22008-01-24 16:13:08 -0500293 struct rb_node *prev = NULL;
294 struct rb_node *orig_prev = NULL;
295 struct tree_entry *entry;
296 struct tree_entry *prev_entry = NULL;
297
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000298 while (*n) {
299 prev = *n;
300 entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500301 prev_entry = entry;
302
303 if (offset < entry->start)
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000304 n = &(*n)->rb_left;
Chris Masond1310b22008-01-24 16:13:08 -0500305 else if (offset > entry->end)
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000306 n = &(*n)->rb_right;
Chris Masond3977122009-01-05 21:25:51 -0500307 else
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000308 return *n;
Chris Masond1310b22008-01-24 16:13:08 -0500309 }
310
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000311 if (p_ret)
312 *p_ret = n;
313 if (parent_ret)
314 *parent_ret = prev;
315
Chris Masond1310b22008-01-24 16:13:08 -0500316 if (prev_ret) {
317 orig_prev = prev;
Chris Masond3977122009-01-05 21:25:51 -0500318 while (prev && offset > prev_entry->end) {
Chris Masond1310b22008-01-24 16:13:08 -0500319 prev = rb_next(prev);
320 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
321 }
322 *prev_ret = prev;
323 prev = orig_prev;
324 }
325
326 if (next_ret) {
327 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500328 while (prev && offset < prev_entry->start) {
Chris Masond1310b22008-01-24 16:13:08 -0500329 prev = rb_prev(prev);
330 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
331 }
332 *next_ret = prev;
333 }
334 return NULL;
335}
336
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000337static inline struct rb_node *
338tree_search_for_insert(struct extent_io_tree *tree,
339 u64 offset,
340 struct rb_node ***p_ret,
341 struct rb_node **parent_ret)
Chris Masond1310b22008-01-24 16:13:08 -0500342{
Chris Mason70dec802008-01-29 09:59:12 -0500343 struct rb_node *prev = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500344 struct rb_node *ret;
Chris Mason70dec802008-01-29 09:59:12 -0500345
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000346 ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
Chris Masond3977122009-01-05 21:25:51 -0500347 if (!ret)
Chris Masond1310b22008-01-24 16:13:08 -0500348 return prev;
349 return ret;
350}
351
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000352static inline struct rb_node *tree_search(struct extent_io_tree *tree,
353 u64 offset)
354{
355 return tree_search_for_insert(tree, offset, NULL, NULL);
356}
357
Josef Bacik9ed74f22009-09-11 16:12:44 -0400358static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
359 struct extent_state *other)
360{
361 if (tree->ops && tree->ops->merge_extent_hook)
Josef Bacikc6100a42017-05-05 11:57:13 -0400362 tree->ops->merge_extent_hook(tree->private_data, new, other);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400363}
364
Chris Masond1310b22008-01-24 16:13:08 -0500365/*
366 * utility function to look for merge candidates inside a given range.
367 * Any extents with matching state are merged together into a single
368 * extent in the tree. Extents with EXTENT_IO in their state field
369 * are not merged because the end_io handlers need to be able to do
370 * operations on them without sleeping (or doing allocations/splits).
371 *
372 * This should be called with the tree lock held.
373 */
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000374static void merge_state(struct extent_io_tree *tree,
375 struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500376{
377 struct extent_state *other;
378 struct rb_node *other_node;
379
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400380 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000381 return;
Chris Masond1310b22008-01-24 16:13:08 -0500382
383 other_node = rb_prev(&state->rb_node);
384 if (other_node) {
385 other = rb_entry(other_node, struct extent_state, rb_node);
386 if (other->end == state->start - 1 &&
387 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400388 merge_cb(tree, state, other);
Chris Masond1310b22008-01-24 16:13:08 -0500389 state->start = other->start;
Chris Masond1310b22008-01-24 16:13:08 -0500390 rb_erase(&other->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100391 RB_CLEAR_NODE(&other->rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500392 free_extent_state(other);
393 }
394 }
395 other_node = rb_next(&state->rb_node);
396 if (other_node) {
397 other = rb_entry(other_node, struct extent_state, rb_node);
398 if (other->start == state->end + 1 &&
399 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400400 merge_cb(tree, state, other);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400401 state->end = other->end;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400402 rb_erase(&other->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100403 RB_CLEAR_NODE(&other->rb_node);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400404 free_extent_state(other);
Chris Masond1310b22008-01-24 16:13:08 -0500405 }
406 }
Chris Masond1310b22008-01-24 16:13:08 -0500407}
408
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000409static void set_state_cb(struct extent_io_tree *tree,
David Sterba9ee49a042015-01-14 19:52:13 +0100410 struct extent_state *state, unsigned *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500411{
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000412 if (tree->ops && tree->ops->set_bit_hook)
Josef Bacikc6100a42017-05-05 11:57:13 -0400413 tree->ops->set_bit_hook(tree->private_data, state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500414}
415
416static void clear_state_cb(struct extent_io_tree *tree,
David Sterba9ee49a042015-01-14 19:52:13 +0100417 struct extent_state *state, unsigned *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500418{
Josef Bacik9ed74f22009-09-11 16:12:44 -0400419 if (tree->ops && tree->ops->clear_bit_hook)
Josef Bacikc6100a42017-05-05 11:57:13 -0400420 tree->ops->clear_bit_hook(tree->private_data, state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500421}
422
Xiao Guangrong3150b692011-07-14 03:19:08 +0000423static void set_state_bits(struct extent_io_tree *tree,
Qu Wenruod38ed272015-10-12 14:53:37 +0800424 struct extent_state *state, unsigned *bits,
425 struct extent_changeset *changeset);
Xiao Guangrong3150b692011-07-14 03:19:08 +0000426
Chris Masond1310b22008-01-24 16:13:08 -0500427/*
428 * insert an extent_state struct into the tree. 'bits' are set on the
429 * struct before it is inserted.
430 *
431 * This may return -EEXIST if the extent is already there, in which case the
432 * state struct is freed.
433 *
434 * The tree lock is not taken internally. This is a utility function and
435 * probably isn't what you want to call (see set/clear_extent_bit).
436 */
437static int insert_state(struct extent_io_tree *tree,
438 struct extent_state *state, u64 start, u64 end,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000439 struct rb_node ***p,
440 struct rb_node **parent,
Qu Wenruod38ed272015-10-12 14:53:37 +0800441 unsigned *bits, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500442{
443 struct rb_node *node;
444
Julia Lawall31b1a2b2012-11-03 10:58:34 +0000445 if (end < start)
Frank Holtonefe120a2013-12-20 11:37:06 -0500446 WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200447 end, start);
Chris Masond1310b22008-01-24 16:13:08 -0500448 state->start = start;
449 state->end = end;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400450
Qu Wenruod38ed272015-10-12 14:53:37 +0800451 set_state_bits(tree, state, bits, changeset);
Xiao Guangrong3150b692011-07-14 03:19:08 +0000452
Filipe Mananaf2071b22014-02-12 15:05:53 +0000453 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
Chris Masond1310b22008-01-24 16:13:08 -0500454 if (node) {
455 struct extent_state *found;
456 found = rb_entry(node, struct extent_state, rb_node);
Jeff Mahoney62e85572016-09-20 10:05:01 -0400457 pr_err("BTRFS: found node %llu %llu on insert of %llu %llu\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200458 found->start, found->end, start, end);
Chris Masond1310b22008-01-24 16:13:08 -0500459 return -EEXIST;
460 }
461 merge_state(tree, state);
462 return 0;
463}
464
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000465static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
Josef Bacik9ed74f22009-09-11 16:12:44 -0400466 u64 split)
467{
468 if (tree->ops && tree->ops->split_extent_hook)
Josef Bacikc6100a42017-05-05 11:57:13 -0400469 tree->ops->split_extent_hook(tree->private_data, orig, split);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400470}
471
Chris Masond1310b22008-01-24 16:13:08 -0500472/*
473 * split a given extent state struct in two, inserting the preallocated
474 * struct 'prealloc' as the newly created second half. 'split' indicates an
475 * offset inside 'orig' where it should be split.
476 *
477 * Before calling,
478 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
479 * are two extent state structs in the tree:
480 * prealloc: [orig->start, split - 1]
481 * orig: [ split, orig->end ]
482 *
483 * The tree locks are not taken by this function. They need to be held
484 * by the caller.
485 */
486static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
487 struct extent_state *prealloc, u64 split)
488{
489 struct rb_node *node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400490
491 split_cb(tree, orig, split);
492
Chris Masond1310b22008-01-24 16:13:08 -0500493 prealloc->start = orig->start;
494 prealloc->end = split - 1;
495 prealloc->state = orig->state;
496 orig->start = split;
497
Filipe Mananaf2071b22014-02-12 15:05:53 +0000498 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
499 &prealloc->rb_node, NULL, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500500 if (node) {
Chris Masond1310b22008-01-24 16:13:08 -0500501 free_extent_state(prealloc);
502 return -EEXIST;
503 }
504 return 0;
505}
506
Li Zefancdc6a392012-03-12 16:39:48 +0800507static struct extent_state *next_state(struct extent_state *state)
508{
509 struct rb_node *next = rb_next(&state->rb_node);
510 if (next)
511 return rb_entry(next, struct extent_state, rb_node);
512 else
513 return NULL;
514}
515
Chris Masond1310b22008-01-24 16:13:08 -0500516/*
517 * utility function to clear some bits in an extent state struct.
Wang Sheng-Hui1b303fc2012-04-06 14:35:18 +0800518 * it will optionally wake up any one waiting on this state (wake == 1).
Chris Masond1310b22008-01-24 16:13:08 -0500519 *
520 * If no bits are set on the state struct after clearing things, the
521 * struct is freed and removed from the tree
522 */
Li Zefancdc6a392012-03-12 16:39:48 +0800523static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
524 struct extent_state *state,
Qu Wenruofefdc552015-10-12 15:35:38 +0800525 unsigned *bits, int wake,
526 struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500527{
Li Zefancdc6a392012-03-12 16:39:48 +0800528 struct extent_state *next;
David Sterba9ee49a042015-01-14 19:52:13 +0100529 unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
Chris Masond1310b22008-01-24 16:13:08 -0500530
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400531 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500532 u64 range = state->end - state->start + 1;
533 WARN_ON(range > tree->dirty_bytes);
534 tree->dirty_bytes -= range;
535 }
Chris Mason291d6732008-01-29 15:55:23 -0500536 clear_state_cb(tree, state, bits);
Qu Wenruofefdc552015-10-12 15:35:38 +0800537 add_extent_changeset(state, bits_to_clear, changeset, 0);
Josef Bacik32c00af2009-10-08 13:34:05 -0400538 state->state &= ~bits_to_clear;
Chris Masond1310b22008-01-24 16:13:08 -0500539 if (wake)
540 wake_up(&state->wq);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400541 if (state->state == 0) {
Li Zefancdc6a392012-03-12 16:39:48 +0800542 next = next_state(state);
Filipe Manana27a35072014-07-06 20:09:59 +0100543 if (extent_state_in_tree(state)) {
Chris Masond1310b22008-01-24 16:13:08 -0500544 rb_erase(&state->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100545 RB_CLEAR_NODE(&state->rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500546 free_extent_state(state);
547 } else {
548 WARN_ON(1);
549 }
550 } else {
551 merge_state(tree, state);
Li Zefancdc6a392012-03-12 16:39:48 +0800552 next = next_state(state);
Chris Masond1310b22008-01-24 16:13:08 -0500553 }
Li Zefancdc6a392012-03-12 16:39:48 +0800554 return next;
Chris Masond1310b22008-01-24 16:13:08 -0500555}
556
Xiao Guangrong82337672011-04-20 06:44:57 +0000557static struct extent_state *
558alloc_extent_state_atomic(struct extent_state *prealloc)
559{
560 if (!prealloc)
561 prealloc = alloc_extent_state(GFP_ATOMIC);
562
563 return prealloc;
564}
565
Eric Sandeen48a3b632013-04-25 20:41:01 +0000566static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400567{
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400568 btrfs_panic(tree_fs_info(tree), err,
569 "Locking error: Extent tree was modified by another thread while locked.");
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400570}
571
Chris Masond1310b22008-01-24 16:13:08 -0500572/*
573 * clear some bits on a range in the tree. This may require splitting
574 * or inserting elements in the tree, so the gfp mask is used to
575 * indicate which allocations or sleeping are allowed.
576 *
577 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
578 * the given range from the tree regardless of state (ie for truncate).
579 *
580 * the range [start, end] is inclusive.
581 *
Jeff Mahoney6763af82012-03-01 14:56:29 +0100582 * This takes the tree lock, and returns 0 on success and < 0 on error.
Chris Masond1310b22008-01-24 16:13:08 -0500583 */
David Sterba66b0c882017-10-31 16:30:47 +0100584int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruofefdc552015-10-12 15:35:38 +0800585 unsigned bits, int wake, int delete,
586 struct extent_state **cached_state,
587 gfp_t mask, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500588{
589 struct extent_state *state;
Chris Mason2c64c532009-09-02 15:04:12 -0400590 struct extent_state *cached;
Chris Masond1310b22008-01-24 16:13:08 -0500591 struct extent_state *prealloc = NULL;
592 struct rb_node *node;
Yan Zheng5c939df2009-05-27 09:16:03 -0400593 u64 last_end;
Chris Masond1310b22008-01-24 16:13:08 -0500594 int err;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000595 int clear = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500596
Josef Bacika5dee372013-12-13 10:02:44 -0500597 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +0000598
Josef Bacik7ee9e442013-06-21 16:37:03 -0400599 if (bits & EXTENT_DELALLOC)
600 bits |= EXTENT_NORESERVE;
601
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400602 if (delete)
603 bits |= ~EXTENT_CTLBITS;
604 bits |= EXTENT_FIRST_DELALLOC;
605
Josef Bacik2ac55d42010-02-03 19:33:23 +0000606 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
607 clear = 1;
Chris Masond1310b22008-01-24 16:13:08 -0500608again:
Mel Gormand0164ad2015-11-06 16:28:21 -0800609 if (!prealloc && gfpflags_allow_blocking(mask)) {
Filipe Mananac7bc6312014-11-03 14:12:57 +0000610 /*
611 * Don't care for allocation failure here because we might end
612 * up not needing the pre-allocated extent state at all, which
613 * is the case if we only have in the tree extent states that
614 * cover our input range and don't cover too any other range.
615 * If we end up needing a new extent state we allocate it later.
616 */
Chris Masond1310b22008-01-24 16:13:08 -0500617 prealloc = alloc_extent_state(mask);
Chris Masond1310b22008-01-24 16:13:08 -0500618 }
619
Chris Masoncad321a2008-12-17 14:51:42 -0500620 spin_lock(&tree->lock);
Chris Mason2c64c532009-09-02 15:04:12 -0400621 if (cached_state) {
622 cached = *cached_state;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000623
624 if (clear) {
625 *cached_state = NULL;
626 cached_state = NULL;
627 }
628
Filipe Manana27a35072014-07-06 20:09:59 +0100629 if (cached && extent_state_in_tree(cached) &&
630 cached->start <= start && cached->end > start) {
Josef Bacik2ac55d42010-02-03 19:33:23 +0000631 if (clear)
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200632 refcount_dec(&cached->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400633 state = cached;
Chris Mason42daec22009-09-23 19:51:09 -0400634 goto hit_next;
Chris Mason2c64c532009-09-02 15:04:12 -0400635 }
Josef Bacik2ac55d42010-02-03 19:33:23 +0000636 if (clear)
637 free_extent_state(cached);
Chris Mason2c64c532009-09-02 15:04:12 -0400638 }
Chris Masond1310b22008-01-24 16:13:08 -0500639 /*
640 * this search will find the extents that end after
641 * our range starts
642 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500643 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500644 if (!node)
645 goto out;
646 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason2c64c532009-09-02 15:04:12 -0400647hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500648 if (state->start > end)
649 goto out;
650 WARN_ON(state->end < start);
Yan Zheng5c939df2009-05-27 09:16:03 -0400651 last_end = state->end;
Chris Masond1310b22008-01-24 16:13:08 -0500652
Liu Bo04493142012-02-16 18:34:37 +0800653 /* the state doesn't have the wanted bits, go ahead */
Li Zefancdc6a392012-03-12 16:39:48 +0800654 if (!(state->state & bits)) {
655 state = next_state(state);
Liu Bo04493142012-02-16 18:34:37 +0800656 goto next;
Li Zefancdc6a392012-03-12 16:39:48 +0800657 }
Liu Bo04493142012-02-16 18:34:37 +0800658
Chris Masond1310b22008-01-24 16:13:08 -0500659 /*
660 * | ---- desired range ---- |
661 * | state | or
662 * | ------------- state -------------- |
663 *
664 * We need to split the extent we found, and may flip
665 * bits on second half.
666 *
667 * If the extent we found extends past our range, we
668 * just split and search again. It'll get split again
669 * the next time though.
670 *
671 * If the extent we found is inside our range, we clear
672 * the desired bit on it.
673 */
674
675 if (state->start < start) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000676 prealloc = alloc_extent_state_atomic(prealloc);
677 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500678 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400679 if (err)
680 extent_io_tree_panic(tree, err);
681
Chris Masond1310b22008-01-24 16:13:08 -0500682 prealloc = NULL;
683 if (err)
684 goto out;
685 if (state->end <= end) {
Qu Wenruofefdc552015-10-12 15:35:38 +0800686 state = clear_state_bit(tree, state, &bits, wake,
687 changeset);
Liu Bod1ac6e42012-05-10 18:10:39 +0800688 goto next;
Chris Masond1310b22008-01-24 16:13:08 -0500689 }
690 goto search_again;
691 }
692 /*
693 * | ---- desired range ---- |
694 * | state |
695 * We need to split the extent, and clear the bit
696 * on the first half
697 */
698 if (state->start <= end && state->end > end) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000699 prealloc = alloc_extent_state_atomic(prealloc);
700 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500701 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400702 if (err)
703 extent_io_tree_panic(tree, err);
704
Chris Masond1310b22008-01-24 16:13:08 -0500705 if (wake)
706 wake_up(&state->wq);
Chris Mason42daec22009-09-23 19:51:09 -0400707
Qu Wenruofefdc552015-10-12 15:35:38 +0800708 clear_state_bit(tree, prealloc, &bits, wake, changeset);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400709
Chris Masond1310b22008-01-24 16:13:08 -0500710 prealloc = NULL;
711 goto out;
712 }
Chris Mason42daec22009-09-23 19:51:09 -0400713
Qu Wenruofefdc552015-10-12 15:35:38 +0800714 state = clear_state_bit(tree, state, &bits, wake, changeset);
Liu Bo04493142012-02-16 18:34:37 +0800715next:
Yan Zheng5c939df2009-05-27 09:16:03 -0400716 if (last_end == (u64)-1)
717 goto out;
718 start = last_end + 1;
Li Zefancdc6a392012-03-12 16:39:48 +0800719 if (start <= end && state && !need_resched())
Liu Bo692e5752012-02-16 18:34:36 +0800720 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500721
722search_again:
723 if (start > end)
724 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500725 spin_unlock(&tree->lock);
Mel Gormand0164ad2015-11-06 16:28:21 -0800726 if (gfpflags_allow_blocking(mask))
Chris Masond1310b22008-01-24 16:13:08 -0500727 cond_resched();
728 goto again;
David Sterba7ab5cb22016-04-27 01:02:15 +0200729
730out:
731 spin_unlock(&tree->lock);
732 if (prealloc)
733 free_extent_state(prealloc);
734
735 return 0;
736
Chris Masond1310b22008-01-24 16:13:08 -0500737}
Chris Masond1310b22008-01-24 16:13:08 -0500738
Jeff Mahoney143bede2012-03-01 14:56:26 +0100739static void wait_on_state(struct extent_io_tree *tree,
740 struct extent_state *state)
Christoph Hellwig641f5212008-12-02 06:36:10 -0500741 __releases(tree->lock)
742 __acquires(tree->lock)
Chris Masond1310b22008-01-24 16:13:08 -0500743{
744 DEFINE_WAIT(wait);
745 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
Chris Masoncad321a2008-12-17 14:51:42 -0500746 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500747 schedule();
Chris Masoncad321a2008-12-17 14:51:42 -0500748 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500749 finish_wait(&state->wq, &wait);
Chris Masond1310b22008-01-24 16:13:08 -0500750}
751
752/*
753 * waits for one or more bits to clear on a range in the state tree.
754 * The range [start, end] is inclusive.
755 * The tree lock is taken by this function
756 */
David Sterba41074882013-04-29 13:38:46 +0000757static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
758 unsigned long bits)
Chris Masond1310b22008-01-24 16:13:08 -0500759{
760 struct extent_state *state;
761 struct rb_node *node;
762
Josef Bacika5dee372013-12-13 10:02:44 -0500763 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +0000764
Chris Masoncad321a2008-12-17 14:51:42 -0500765 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500766again:
767 while (1) {
768 /*
769 * this search will find all the extents that end after
770 * our range starts
771 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500772 node = tree_search(tree, start);
Filipe Mananac50d3e72014-03-31 14:53:25 +0100773process_node:
Chris Masond1310b22008-01-24 16:13:08 -0500774 if (!node)
775 break;
776
777 state = rb_entry(node, struct extent_state, rb_node);
778
779 if (state->start > end)
780 goto out;
781
782 if (state->state & bits) {
783 start = state->start;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200784 refcount_inc(&state->refs);
Chris Masond1310b22008-01-24 16:13:08 -0500785 wait_on_state(tree, state);
786 free_extent_state(state);
787 goto again;
788 }
789 start = state->end + 1;
790
791 if (start > end)
792 break;
793
Filipe Mananac50d3e72014-03-31 14:53:25 +0100794 if (!cond_resched_lock(&tree->lock)) {
795 node = rb_next(node);
796 goto process_node;
797 }
Chris Masond1310b22008-01-24 16:13:08 -0500798 }
799out:
Chris Masoncad321a2008-12-17 14:51:42 -0500800 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500801}
Chris Masond1310b22008-01-24 16:13:08 -0500802
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000803static void set_state_bits(struct extent_io_tree *tree,
Chris Masond1310b22008-01-24 16:13:08 -0500804 struct extent_state *state,
Qu Wenruod38ed272015-10-12 14:53:37 +0800805 unsigned *bits, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500806{
David Sterba9ee49a042015-01-14 19:52:13 +0100807 unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400808
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000809 set_state_cb(tree, state, bits);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400810 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500811 u64 range = state->end - state->start + 1;
812 tree->dirty_bytes += range;
813 }
Qu Wenruod38ed272015-10-12 14:53:37 +0800814 add_extent_changeset(state, bits_to_set, changeset, 1);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400815 state->state |= bits_to_set;
Chris Masond1310b22008-01-24 16:13:08 -0500816}
817
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100818static void cache_state_if_flags(struct extent_state *state,
819 struct extent_state **cached_ptr,
David Sterba9ee49a042015-01-14 19:52:13 +0100820 unsigned flags)
Chris Mason2c64c532009-09-02 15:04:12 -0400821{
822 if (cached_ptr && !(*cached_ptr)) {
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100823 if (!flags || (state->state & flags)) {
Chris Mason2c64c532009-09-02 15:04:12 -0400824 *cached_ptr = state;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200825 refcount_inc(&state->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400826 }
827 }
828}
829
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100830static void cache_state(struct extent_state *state,
831 struct extent_state **cached_ptr)
832{
833 return cache_state_if_flags(state, cached_ptr,
834 EXTENT_IOBITS | EXTENT_BOUNDARY);
835}
836
Chris Masond1310b22008-01-24 16:13:08 -0500837/*
Chris Mason1edbb732009-09-02 13:24:36 -0400838 * set some bits on a range in the tree. This may require allocations or
839 * sleeping, so the gfp mask is used to indicate what is allowed.
Chris Masond1310b22008-01-24 16:13:08 -0500840 *
Chris Mason1edbb732009-09-02 13:24:36 -0400841 * If any of the exclusive bits are set, this will fail with -EEXIST if some
842 * part of the range already has the desired bits set. The start of the
843 * existing range is returned in failed_start in this case.
Chris Masond1310b22008-01-24 16:13:08 -0500844 *
Chris Mason1edbb732009-09-02 13:24:36 -0400845 * [start, end] is inclusive This takes the tree lock.
Chris Masond1310b22008-01-24 16:13:08 -0500846 */
Chris Mason1edbb732009-09-02 13:24:36 -0400847
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +0100848static int __must_check
849__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100850 unsigned bits, unsigned exclusive_bits,
David Sterba41074882013-04-29 13:38:46 +0000851 u64 *failed_start, struct extent_state **cached_state,
Qu Wenruod38ed272015-10-12 14:53:37 +0800852 gfp_t mask, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500853{
854 struct extent_state *state;
855 struct extent_state *prealloc = NULL;
856 struct rb_node *node;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000857 struct rb_node **p;
858 struct rb_node *parent;
Chris Masond1310b22008-01-24 16:13:08 -0500859 int err = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500860 u64 last_start;
861 u64 last_end;
Chris Mason42daec22009-09-23 19:51:09 -0400862
Josef Bacika5dee372013-12-13 10:02:44 -0500863 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +0000864
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400865 bits |= EXTENT_FIRST_DELALLOC;
Chris Masond1310b22008-01-24 16:13:08 -0500866again:
Mel Gormand0164ad2015-11-06 16:28:21 -0800867 if (!prealloc && gfpflags_allow_blocking(mask)) {
David Sterba059f7912016-04-27 01:03:45 +0200868 /*
869 * Don't care for allocation failure here because we might end
870 * up not needing the pre-allocated extent state at all, which
871 * is the case if we only have in the tree extent states that
872 * cover our input range and don't cover too any other range.
873 * If we end up needing a new extent state we allocate it later.
874 */
Chris Masond1310b22008-01-24 16:13:08 -0500875 prealloc = alloc_extent_state(mask);
Chris Masond1310b22008-01-24 16:13:08 -0500876 }
877
Chris Masoncad321a2008-12-17 14:51:42 -0500878 spin_lock(&tree->lock);
Chris Mason9655d292009-09-02 15:22:30 -0400879 if (cached_state && *cached_state) {
880 state = *cached_state;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400881 if (state->start <= start && state->end > start &&
Filipe Manana27a35072014-07-06 20:09:59 +0100882 extent_state_in_tree(state)) {
Chris Mason9655d292009-09-02 15:22:30 -0400883 node = &state->rb_node;
884 goto hit_next;
885 }
886 }
Chris Masond1310b22008-01-24 16:13:08 -0500887 /*
888 * this search will find all the extents that end after
889 * our range starts.
890 */
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000891 node = tree_search_for_insert(tree, start, &p, &parent);
Chris Masond1310b22008-01-24 16:13:08 -0500892 if (!node) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000893 prealloc = alloc_extent_state_atomic(prealloc);
894 BUG_ON(!prealloc);
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000895 err = insert_state(tree, prealloc, start, end,
Qu Wenruod38ed272015-10-12 14:53:37 +0800896 &p, &parent, &bits, changeset);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400897 if (err)
898 extent_io_tree_panic(tree, err);
899
Filipe David Borba Mananac42ac0b2013-11-26 15:01:34 +0000900 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500901 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500902 goto out;
903 }
Chris Masond1310b22008-01-24 16:13:08 -0500904 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason40431d62009-08-05 12:57:59 -0400905hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500906 last_start = state->start;
907 last_end = state->end;
908
909 /*
910 * | ---- desired range ---- |
911 * | state |
912 *
913 * Just lock what we found and keep going
914 */
915 if (state->start == start && state->end <= end) {
Chris Mason1edbb732009-09-02 13:24:36 -0400916 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500917 *failed_start = state->start;
918 err = -EEXIST;
919 goto out;
920 }
Chris Mason42daec22009-09-23 19:51:09 -0400921
Qu Wenruod38ed272015-10-12 14:53:37 +0800922 set_state_bits(tree, state, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -0400923 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500924 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400925 if (last_end == (u64)-1)
926 goto out;
927 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +0800928 state = next_state(state);
929 if (start < end && state && state->start == start &&
930 !need_resched())
931 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500932 goto search_again;
933 }
934
935 /*
936 * | ---- desired range ---- |
937 * | state |
938 * or
939 * | ------------- state -------------- |
940 *
941 * We need to split the extent we found, and may flip bits on
942 * second half.
943 *
944 * If the extent we found extends past our
945 * range, we just split and search again. It'll get split
946 * again the next time though.
947 *
948 * If the extent we found is inside our range, we set the
949 * desired bit on it.
950 */
951 if (state->start < start) {
Chris Mason1edbb732009-09-02 13:24:36 -0400952 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500953 *failed_start = start;
954 err = -EEXIST;
955 goto out;
956 }
Xiao Guangrong82337672011-04-20 06:44:57 +0000957
958 prealloc = alloc_extent_state_atomic(prealloc);
959 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500960 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400961 if (err)
962 extent_io_tree_panic(tree, err);
963
Chris Masond1310b22008-01-24 16:13:08 -0500964 prealloc = NULL;
965 if (err)
966 goto out;
967 if (state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +0800968 set_state_bits(tree, state, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -0400969 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500970 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400971 if (last_end == (u64)-1)
972 goto out;
973 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +0800974 state = next_state(state);
975 if (start < end && state && state->start == start &&
976 !need_resched())
977 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500978 }
979 goto search_again;
980 }
981 /*
982 * | ---- desired range ---- |
983 * | state | or | state |
984 *
985 * There's a hole, we need to insert something in it and
986 * ignore the extent we found.
987 */
988 if (state->start > start) {
989 u64 this_end;
990 if (end < last_start)
991 this_end = end;
992 else
Chris Masond3977122009-01-05 21:25:51 -0500993 this_end = last_start - 1;
Xiao Guangrong82337672011-04-20 06:44:57 +0000994
995 prealloc = alloc_extent_state_atomic(prealloc);
996 BUG_ON(!prealloc);
Xiao Guangrongc7f895a2011-04-20 06:45:49 +0000997
998 /*
999 * Avoid to free 'prealloc' if it can be merged with
1000 * the later extent.
1001 */
Chris Masond1310b22008-01-24 16:13:08 -05001002 err = insert_state(tree, prealloc, start, this_end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001003 NULL, NULL, &bits, changeset);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001004 if (err)
1005 extent_io_tree_panic(tree, err);
1006
Chris Mason2c64c532009-09-02 15:04:12 -04001007 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001008 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05001009 start = this_end + 1;
1010 goto search_again;
1011 }
1012 /*
1013 * | ---- desired range ---- |
1014 * | state |
1015 * We need to split the extent, and set the bit
1016 * on the first half
1017 */
1018 if (state->start <= end && state->end > end) {
Chris Mason1edbb732009-09-02 13:24:36 -04001019 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001020 *failed_start = start;
1021 err = -EEXIST;
1022 goto out;
1023 }
Xiao Guangrong82337672011-04-20 06:44:57 +00001024
1025 prealloc = alloc_extent_state_atomic(prealloc);
1026 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -05001027 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001028 if (err)
1029 extent_io_tree_panic(tree, err);
Chris Masond1310b22008-01-24 16:13:08 -05001030
Qu Wenruod38ed272015-10-12 14:53:37 +08001031 set_state_bits(tree, prealloc, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -04001032 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001033 merge_state(tree, prealloc);
1034 prealloc = NULL;
1035 goto out;
1036 }
1037
David Sterbab5a4ba142016-04-27 01:02:15 +02001038search_again:
1039 if (start > end)
1040 goto out;
1041 spin_unlock(&tree->lock);
1042 if (gfpflags_allow_blocking(mask))
1043 cond_resched();
1044 goto again;
Chris Masond1310b22008-01-24 16:13:08 -05001045
1046out:
Chris Masoncad321a2008-12-17 14:51:42 -05001047 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001048 if (prealloc)
1049 free_extent_state(prealloc);
1050
1051 return err;
1052
Chris Masond1310b22008-01-24 16:13:08 -05001053}
Chris Masond1310b22008-01-24 16:13:08 -05001054
David Sterba41074882013-04-29 13:38:46 +00001055int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +01001056 unsigned bits, u64 * failed_start,
David Sterba41074882013-04-29 13:38:46 +00001057 struct extent_state **cached_state, gfp_t mask)
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001058{
1059 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
Qu Wenruod38ed272015-10-12 14:53:37 +08001060 cached_state, mask, NULL);
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001061}
1062
1063
Josef Bacik462d6fa2011-09-26 13:56:12 -04001064/**
Liu Bo10983f22012-07-11 15:26:19 +08001065 * convert_extent_bit - convert all bits in a given range from one bit to
1066 * another
Josef Bacik462d6fa2011-09-26 13:56:12 -04001067 * @tree: the io tree to search
1068 * @start: the start offset in bytes
1069 * @end: the end offset in bytes (inclusive)
1070 * @bits: the bits to set in this range
1071 * @clear_bits: the bits to clear in this range
Josef Bacike6138872012-09-27 17:07:30 -04001072 * @cached_state: state that we're going to cache
Josef Bacik462d6fa2011-09-26 13:56:12 -04001073 *
1074 * This will go through and set bits for the given range. If any states exist
1075 * already in this range they are set with the given bit and cleared of the
1076 * clear_bits. This is only meant to be used by things that are mergeable, ie
1077 * converting from say DELALLOC to DIRTY. This is not meant to be used with
1078 * boundary bits like LOCK.
David Sterba210aa272016-04-26 23:54:39 +02001079 *
1080 * All allocations are done with GFP_NOFS.
Josef Bacik462d6fa2011-09-26 13:56:12 -04001081 */
1082int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +01001083 unsigned bits, unsigned clear_bits,
David Sterba210aa272016-04-26 23:54:39 +02001084 struct extent_state **cached_state)
Josef Bacik462d6fa2011-09-26 13:56:12 -04001085{
1086 struct extent_state *state;
1087 struct extent_state *prealloc = NULL;
1088 struct rb_node *node;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001089 struct rb_node **p;
1090 struct rb_node *parent;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001091 int err = 0;
1092 u64 last_start;
1093 u64 last_end;
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001094 bool first_iteration = true;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001095
Josef Bacika5dee372013-12-13 10:02:44 -05001096 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +00001097
Josef Bacik462d6fa2011-09-26 13:56:12 -04001098again:
David Sterba210aa272016-04-26 23:54:39 +02001099 if (!prealloc) {
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001100 /*
1101 * Best effort, don't worry if extent state allocation fails
1102 * here for the first iteration. We might have a cached state
1103 * that matches exactly the target range, in which case no
1104 * extent state allocations are needed. We'll only know this
1105 * after locking the tree.
1106 */
David Sterba210aa272016-04-26 23:54:39 +02001107 prealloc = alloc_extent_state(GFP_NOFS);
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001108 if (!prealloc && !first_iteration)
Josef Bacik462d6fa2011-09-26 13:56:12 -04001109 return -ENOMEM;
1110 }
1111
1112 spin_lock(&tree->lock);
Josef Bacike6138872012-09-27 17:07:30 -04001113 if (cached_state && *cached_state) {
1114 state = *cached_state;
1115 if (state->start <= start && state->end > start &&
Filipe Manana27a35072014-07-06 20:09:59 +01001116 extent_state_in_tree(state)) {
Josef Bacike6138872012-09-27 17:07:30 -04001117 node = &state->rb_node;
1118 goto hit_next;
1119 }
1120 }
1121
Josef Bacik462d6fa2011-09-26 13:56:12 -04001122 /*
1123 * this search will find all the extents that end after
1124 * our range starts.
1125 */
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001126 node = tree_search_for_insert(tree, start, &p, &parent);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001127 if (!node) {
1128 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001129 if (!prealloc) {
1130 err = -ENOMEM;
1131 goto out;
1132 }
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001133 err = insert_state(tree, prealloc, start, end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001134 &p, &parent, &bits, NULL);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001135 if (err)
1136 extent_io_tree_panic(tree, err);
Filipe David Borba Mananac42ac0b2013-11-26 15:01:34 +00001137 cache_state(prealloc, cached_state);
1138 prealloc = NULL;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001139 goto out;
1140 }
1141 state = rb_entry(node, struct extent_state, rb_node);
1142hit_next:
1143 last_start = state->start;
1144 last_end = state->end;
1145
1146 /*
1147 * | ---- desired range ---- |
1148 * | state |
1149 *
1150 * Just lock what we found and keep going
1151 */
1152 if (state->start == start && state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001153 set_state_bits(tree, state, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001154 cache_state(state, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001155 state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001156 if (last_end == (u64)-1)
1157 goto out;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001158 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001159 if (start < end && state && state->start == start &&
1160 !need_resched())
1161 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001162 goto search_again;
1163 }
1164
1165 /*
1166 * | ---- desired range ---- |
1167 * | state |
1168 * or
1169 * | ------------- state -------------- |
1170 *
1171 * We need to split the extent we found, and may flip bits on
1172 * second half.
1173 *
1174 * If the extent we found extends past our
1175 * range, we just split and search again. It'll get split
1176 * again the next time though.
1177 *
1178 * If the extent we found is inside our range, we set the
1179 * desired bit on it.
1180 */
1181 if (state->start < start) {
1182 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001183 if (!prealloc) {
1184 err = -ENOMEM;
1185 goto out;
1186 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001187 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001188 if (err)
1189 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001190 prealloc = NULL;
1191 if (err)
1192 goto out;
1193 if (state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001194 set_state_bits(tree, state, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001195 cache_state(state, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001196 state = clear_state_bit(tree, state, &clear_bits, 0,
1197 NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001198 if (last_end == (u64)-1)
1199 goto out;
1200 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001201 if (start < end && state && state->start == start &&
1202 !need_resched())
1203 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001204 }
1205 goto search_again;
1206 }
1207 /*
1208 * | ---- desired range ---- |
1209 * | state | or | state |
1210 *
1211 * There's a hole, we need to insert something in it and
1212 * ignore the extent we found.
1213 */
1214 if (state->start > start) {
1215 u64 this_end;
1216 if (end < last_start)
1217 this_end = end;
1218 else
1219 this_end = last_start - 1;
1220
1221 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001222 if (!prealloc) {
1223 err = -ENOMEM;
1224 goto out;
1225 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001226
1227 /*
1228 * Avoid to free 'prealloc' if it can be merged with
1229 * the later extent.
1230 */
1231 err = insert_state(tree, prealloc, start, this_end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001232 NULL, NULL, &bits, NULL);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001233 if (err)
1234 extent_io_tree_panic(tree, err);
Josef Bacike6138872012-09-27 17:07:30 -04001235 cache_state(prealloc, cached_state);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001236 prealloc = NULL;
1237 start = this_end + 1;
1238 goto search_again;
1239 }
1240 /*
1241 * | ---- desired range ---- |
1242 * | state |
1243 * We need to split the extent, and set the bit
1244 * on the first half
1245 */
1246 if (state->start <= end && state->end > end) {
1247 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001248 if (!prealloc) {
1249 err = -ENOMEM;
1250 goto out;
1251 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001252
1253 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001254 if (err)
1255 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001256
Qu Wenruod38ed272015-10-12 14:53:37 +08001257 set_state_bits(tree, prealloc, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001258 cache_state(prealloc, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001259 clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001260 prealloc = NULL;
1261 goto out;
1262 }
1263
Josef Bacik462d6fa2011-09-26 13:56:12 -04001264search_again:
1265 if (start > end)
1266 goto out;
1267 spin_unlock(&tree->lock);
David Sterba210aa272016-04-26 23:54:39 +02001268 cond_resched();
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001269 first_iteration = false;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001270 goto again;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001271
1272out:
1273 spin_unlock(&tree->lock);
1274 if (prealloc)
1275 free_extent_state(prealloc);
1276
1277 return err;
1278}
1279
Chris Masond1310b22008-01-24 16:13:08 -05001280/* wrappers around set/clear extent bit */
Qu Wenruod38ed272015-10-12 14:53:37 +08001281int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba2c53b912016-04-26 23:54:39 +02001282 unsigned bits, struct extent_changeset *changeset)
Qu Wenruod38ed272015-10-12 14:53:37 +08001283{
1284 /*
1285 * We don't support EXTENT_LOCKED yet, as current changeset will
1286 * record any bits changed, so for EXTENT_LOCKED case, it will
1287 * either fail with -EEXIST or changeset will record the whole
1288 * range.
1289 */
1290 BUG_ON(bits & EXTENT_LOCKED);
1291
David Sterba2c53b912016-04-26 23:54:39 +02001292 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
Qu Wenruod38ed272015-10-12 14:53:37 +08001293 changeset);
1294}
1295
Qu Wenruofefdc552015-10-12 15:35:38 +08001296int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1297 unsigned bits, int wake, int delete,
David Sterbaae0f1622017-10-31 16:37:52 +01001298 struct extent_state **cached)
Qu Wenruofefdc552015-10-12 15:35:38 +08001299{
1300 return __clear_extent_bit(tree, start, end, bits, wake, delete,
David Sterbaae0f1622017-10-31 16:37:52 +01001301 cached, GFP_NOFS, NULL);
Qu Wenruofefdc552015-10-12 15:35:38 +08001302}
1303
Qu Wenruofefdc552015-10-12 15:35:38 +08001304int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaf734c442016-04-26 23:54:39 +02001305 unsigned bits, struct extent_changeset *changeset)
Qu Wenruofefdc552015-10-12 15:35:38 +08001306{
1307 /*
1308 * Don't support EXTENT_LOCKED case, same reason as
1309 * set_record_extent_bits().
1310 */
1311 BUG_ON(bits & EXTENT_LOCKED);
1312
David Sterbaf734c442016-04-26 23:54:39 +02001313 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
Qu Wenruofefdc552015-10-12 15:35:38 +08001314 changeset);
1315}
1316
Chris Masond352ac62008-09-29 15:18:18 -04001317/*
1318 * either insert or lock state struct between start and end use mask to tell
1319 * us if waiting is desired.
1320 */
Chris Mason1edbb732009-09-02 13:24:36 -04001321int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaff13db42015-12-03 14:30:40 +01001322 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001323{
1324 int err;
1325 u64 failed_start;
David Sterba9ee49a042015-01-14 19:52:13 +01001326
Chris Masond1310b22008-01-24 16:13:08 -05001327 while (1) {
David Sterbaff13db42015-12-03 14:30:40 +01001328 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001329 EXTENT_LOCKED, &failed_start,
Qu Wenruod38ed272015-10-12 14:53:37 +08001330 cached_state, GFP_NOFS, NULL);
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001331 if (err == -EEXIST) {
Chris Masond1310b22008-01-24 16:13:08 -05001332 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1333 start = failed_start;
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001334 } else
Chris Masond1310b22008-01-24 16:13:08 -05001335 break;
Chris Masond1310b22008-01-24 16:13:08 -05001336 WARN_ON(start > end);
1337 }
1338 return err;
1339}
Chris Masond1310b22008-01-24 16:13:08 -05001340
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001341int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
Josef Bacik25179202008-10-29 14:49:05 -04001342{
1343 int err;
1344 u64 failed_start;
1345
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001346 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
Qu Wenruod38ed272015-10-12 14:53:37 +08001347 &failed_start, NULL, GFP_NOFS, NULL);
Yan Zheng66435582008-10-30 14:19:50 -04001348 if (err == -EEXIST) {
1349 if (failed_start > start)
1350 clear_extent_bit(tree, start, failed_start - 1,
David Sterbaae0f1622017-10-31 16:37:52 +01001351 EXTENT_LOCKED, 1, 0, NULL);
Josef Bacik25179202008-10-29 14:49:05 -04001352 return 0;
Yan Zheng66435582008-10-30 14:19:50 -04001353 }
Josef Bacik25179202008-10-29 14:49:05 -04001354 return 1;
1355}
Josef Bacik25179202008-10-29 14:49:05 -04001356
David Sterbabd1fa4f2015-12-03 13:08:59 +01001357void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
Chris Mason4adaa612013-03-26 13:07:00 -04001358{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001359 unsigned long index = start >> PAGE_SHIFT;
1360 unsigned long end_index = end >> PAGE_SHIFT;
Chris Mason4adaa612013-03-26 13:07:00 -04001361 struct page *page;
1362
1363 while (index <= end_index) {
1364 page = find_get_page(inode->i_mapping, index);
1365 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1366 clear_page_dirty_for_io(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001367 put_page(page);
Chris Mason4adaa612013-03-26 13:07:00 -04001368 index++;
1369 }
Chris Mason4adaa612013-03-26 13:07:00 -04001370}
1371
David Sterbaf6311572015-12-03 13:08:59 +01001372void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
Chris Mason4adaa612013-03-26 13:07:00 -04001373{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001374 unsigned long index = start >> PAGE_SHIFT;
1375 unsigned long end_index = end >> PAGE_SHIFT;
Chris Mason4adaa612013-03-26 13:07:00 -04001376 struct page *page;
1377
1378 while (index <= end_index) {
1379 page = find_get_page(inode->i_mapping, index);
1380 BUG_ON(!page); /* Pages should be in the extent_io_tree */
Chris Mason4adaa612013-03-26 13:07:00 -04001381 __set_page_dirty_nobuffers(page);
Konstantin Khebnikov8d386332015-02-11 15:26:55 -08001382 account_page_redirty(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001383 put_page(page);
Chris Mason4adaa612013-03-26 13:07:00 -04001384 index++;
1385 }
Chris Mason4adaa612013-03-26 13:07:00 -04001386}
1387
Chris Masond1310b22008-01-24 16:13:08 -05001388/*
Chris Masond1310b22008-01-24 16:13:08 -05001389 * helper function to set both pages and extents in the tree writeback
1390 */
David Sterba35de6db2015-12-03 13:08:59 +01001391static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
Chris Masond1310b22008-01-24 16:13:08 -05001392{
Josef Bacikc6100a42017-05-05 11:57:13 -04001393 tree->ops->set_range_writeback(tree->private_data, start, end);
Chris Masond1310b22008-01-24 16:13:08 -05001394}
Chris Masond1310b22008-01-24 16:13:08 -05001395
Chris Masond352ac62008-09-29 15:18:18 -04001396/* find the first state struct with 'bits' set after 'start', and
1397 * return it. tree->lock must be held. NULL will returned if
1398 * nothing was found after 'start'
1399 */
Eric Sandeen48a3b632013-04-25 20:41:01 +00001400static struct extent_state *
1401find_first_extent_bit_state(struct extent_io_tree *tree,
David Sterba9ee49a042015-01-14 19:52:13 +01001402 u64 start, unsigned bits)
Chris Masond7fc6402008-02-18 12:12:38 -05001403{
1404 struct rb_node *node;
1405 struct extent_state *state;
1406
1407 /*
1408 * this search will find all the extents that end after
1409 * our range starts.
1410 */
1411 node = tree_search(tree, start);
Chris Masond3977122009-01-05 21:25:51 -05001412 if (!node)
Chris Masond7fc6402008-02-18 12:12:38 -05001413 goto out;
Chris Masond7fc6402008-02-18 12:12:38 -05001414
Chris Masond3977122009-01-05 21:25:51 -05001415 while (1) {
Chris Masond7fc6402008-02-18 12:12:38 -05001416 state = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -05001417 if (state->end >= start && (state->state & bits))
Chris Masond7fc6402008-02-18 12:12:38 -05001418 return state;
Chris Masond3977122009-01-05 21:25:51 -05001419
Chris Masond7fc6402008-02-18 12:12:38 -05001420 node = rb_next(node);
1421 if (!node)
1422 break;
1423 }
1424out:
1425 return NULL;
1426}
Chris Masond7fc6402008-02-18 12:12:38 -05001427
Chris Masond352ac62008-09-29 15:18:18 -04001428/*
Xiao Guangrong69261c42011-07-14 03:19:45 +00001429 * find the first offset in the io tree with 'bits' set. zero is
1430 * returned if we find something, and *start_ret and *end_ret are
1431 * set to reflect the state struct that was found.
1432 *
Wang Sheng-Hui477d7ea2012-04-06 14:35:47 +08001433 * If nothing was found, 1 is returned. If found something, return 0.
Xiao Guangrong69261c42011-07-14 03:19:45 +00001434 */
1435int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
David Sterba9ee49a042015-01-14 19:52:13 +01001436 u64 *start_ret, u64 *end_ret, unsigned bits,
Josef Bacike6138872012-09-27 17:07:30 -04001437 struct extent_state **cached_state)
Xiao Guangrong69261c42011-07-14 03:19:45 +00001438{
1439 struct extent_state *state;
Josef Bacike6138872012-09-27 17:07:30 -04001440 struct rb_node *n;
Xiao Guangrong69261c42011-07-14 03:19:45 +00001441 int ret = 1;
1442
1443 spin_lock(&tree->lock);
Josef Bacike6138872012-09-27 17:07:30 -04001444 if (cached_state && *cached_state) {
1445 state = *cached_state;
Filipe Manana27a35072014-07-06 20:09:59 +01001446 if (state->end == start - 1 && extent_state_in_tree(state)) {
Josef Bacike6138872012-09-27 17:07:30 -04001447 n = rb_next(&state->rb_node);
1448 while (n) {
1449 state = rb_entry(n, struct extent_state,
1450 rb_node);
1451 if (state->state & bits)
1452 goto got_it;
1453 n = rb_next(n);
1454 }
1455 free_extent_state(*cached_state);
1456 *cached_state = NULL;
1457 goto out;
1458 }
1459 free_extent_state(*cached_state);
1460 *cached_state = NULL;
1461 }
1462
Xiao Guangrong69261c42011-07-14 03:19:45 +00001463 state = find_first_extent_bit_state(tree, start, bits);
Josef Bacike6138872012-09-27 17:07:30 -04001464got_it:
Xiao Guangrong69261c42011-07-14 03:19:45 +00001465 if (state) {
Filipe Mananae38e2ed2014-10-13 12:28:38 +01001466 cache_state_if_flags(state, cached_state, 0);
Xiao Guangrong69261c42011-07-14 03:19:45 +00001467 *start_ret = state->start;
1468 *end_ret = state->end;
1469 ret = 0;
1470 }
Josef Bacike6138872012-09-27 17:07:30 -04001471out:
Xiao Guangrong69261c42011-07-14 03:19:45 +00001472 spin_unlock(&tree->lock);
1473 return ret;
1474}
1475
1476/*
Chris Masond352ac62008-09-29 15:18:18 -04001477 * find a contiguous range of bytes in the file marked as delalloc, not
1478 * more than 'max_bytes'. start and end are used to return the range,
1479 *
1480 * 1 is returned if we find something, 0 if nothing was in the tree
1481 */
Chris Masonc8b97812008-10-29 14:49:59 -04001482static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001483 u64 *start, u64 *end, u64 max_bytes,
1484 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001485{
1486 struct rb_node *node;
1487 struct extent_state *state;
1488 u64 cur_start = *start;
1489 u64 found = 0;
1490 u64 total_bytes = 0;
1491
Chris Masoncad321a2008-12-17 14:51:42 -05001492 spin_lock(&tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -04001493
Chris Masond1310b22008-01-24 16:13:08 -05001494 /*
1495 * this search will find all the extents that end after
1496 * our range starts.
1497 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001498 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001499 if (!node) {
Chris Mason3b951512008-04-17 11:29:12 -04001500 if (!found)
1501 *end = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05001502 goto out;
1503 }
1504
Chris Masond3977122009-01-05 21:25:51 -05001505 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001506 state = rb_entry(node, struct extent_state, rb_node);
Zheng Yan5b21f2e2008-09-26 10:05:38 -04001507 if (found && (state->start != cur_start ||
1508 (state->state & EXTENT_BOUNDARY))) {
Chris Masond1310b22008-01-24 16:13:08 -05001509 goto out;
1510 }
1511 if (!(state->state & EXTENT_DELALLOC)) {
1512 if (!found)
1513 *end = state->end;
1514 goto out;
1515 }
Josef Bacikc2a128d2010-02-02 21:19:11 +00001516 if (!found) {
Chris Masond1310b22008-01-24 16:13:08 -05001517 *start = state->start;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001518 *cached_state = state;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +02001519 refcount_inc(&state->refs);
Josef Bacikc2a128d2010-02-02 21:19:11 +00001520 }
Chris Masond1310b22008-01-24 16:13:08 -05001521 found++;
1522 *end = state->end;
1523 cur_start = state->end + 1;
1524 node = rb_next(node);
Chris Masond1310b22008-01-24 16:13:08 -05001525 total_bytes += state->end - state->start + 1;
Josef Bacik7bf811a52013-10-07 22:11:09 -04001526 if (total_bytes >= max_bytes)
Josef Bacik573aeca2013-08-30 14:38:49 -04001527 break;
Josef Bacik573aeca2013-08-30 14:38:49 -04001528 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001529 break;
1530 }
1531out:
Chris Masoncad321a2008-12-17 14:51:42 -05001532 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001533 return found;
1534}
1535
Liu Boda2c7002017-02-10 16:41:05 +01001536static int __process_pages_contig(struct address_space *mapping,
1537 struct page *locked_page,
1538 pgoff_t start_index, pgoff_t end_index,
1539 unsigned long page_ops, pgoff_t *index_ret);
1540
Jeff Mahoney143bede2012-03-01 14:56:26 +01001541static noinline void __unlock_for_delalloc(struct inode *inode,
1542 struct page *locked_page,
1543 u64 start, u64 end)
Chris Masonc8b97812008-10-29 14:49:59 -04001544{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001545 unsigned long index = start >> PAGE_SHIFT;
1546 unsigned long end_index = end >> PAGE_SHIFT;
Chris Masonc8b97812008-10-29 14:49:59 -04001547
Liu Bo76c00212017-02-10 16:42:14 +01001548 ASSERT(locked_page);
Chris Masonc8b97812008-10-29 14:49:59 -04001549 if (index == locked_page->index && end_index == index)
Jeff Mahoney143bede2012-03-01 14:56:26 +01001550 return;
Chris Masonc8b97812008-10-29 14:49:59 -04001551
Liu Bo76c00212017-02-10 16:42:14 +01001552 __process_pages_contig(inode->i_mapping, locked_page, index, end_index,
1553 PAGE_UNLOCK, NULL);
Chris Masonc8b97812008-10-29 14:49:59 -04001554}
1555
1556static noinline int lock_delalloc_pages(struct inode *inode,
1557 struct page *locked_page,
1558 u64 delalloc_start,
1559 u64 delalloc_end)
1560{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001561 unsigned long index = delalloc_start >> PAGE_SHIFT;
Liu Bo76c00212017-02-10 16:42:14 +01001562 unsigned long index_ret = index;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001563 unsigned long end_index = delalloc_end >> PAGE_SHIFT;
Chris Masonc8b97812008-10-29 14:49:59 -04001564 int ret;
Chris Masonc8b97812008-10-29 14:49:59 -04001565
Liu Bo76c00212017-02-10 16:42:14 +01001566 ASSERT(locked_page);
Chris Masonc8b97812008-10-29 14:49:59 -04001567 if (index == locked_page->index && index == end_index)
1568 return 0;
1569
Liu Bo76c00212017-02-10 16:42:14 +01001570 ret = __process_pages_contig(inode->i_mapping, locked_page, index,
1571 end_index, PAGE_LOCK, &index_ret);
1572 if (ret == -EAGAIN)
1573 __unlock_for_delalloc(inode, locked_page, delalloc_start,
1574 (u64)index_ret << PAGE_SHIFT);
Chris Masonc8b97812008-10-29 14:49:59 -04001575 return ret;
1576}
1577
1578/*
1579 * find a contiguous range of bytes in the file marked as delalloc, not
1580 * more than 'max_bytes'. start and end are used to return the range,
1581 *
1582 * 1 is returned if we find something, 0 if nothing was in the tree
1583 */
Josef Bacik294e30f2013-10-09 12:00:56 -04001584STATIC u64 find_lock_delalloc_range(struct inode *inode,
1585 struct extent_io_tree *tree,
1586 struct page *locked_page, u64 *start,
1587 u64 *end, u64 max_bytes)
Chris Masonc8b97812008-10-29 14:49:59 -04001588{
1589 u64 delalloc_start;
1590 u64 delalloc_end;
1591 u64 found;
Chris Mason9655d292009-09-02 15:22:30 -04001592 struct extent_state *cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001593 int ret;
1594 int loops = 0;
1595
1596again:
1597 /* step one, find a bunch of delalloc bytes starting at start */
1598 delalloc_start = *start;
1599 delalloc_end = 0;
1600 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001601 max_bytes, &cached_state);
Chris Mason70b99e62008-10-31 12:46:39 -04001602 if (!found || delalloc_end <= *start) {
Chris Masonc8b97812008-10-29 14:49:59 -04001603 *start = delalloc_start;
1604 *end = delalloc_end;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001605 free_extent_state(cached_state);
Liu Bo385fe0b2013-10-01 23:49:49 +08001606 return 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001607 }
1608
1609 /*
Chris Mason70b99e62008-10-31 12:46:39 -04001610 * start comes from the offset of locked_page. We have to lock
1611 * pages in order, so we can't process delalloc bytes before
1612 * locked_page
1613 */
Chris Masond3977122009-01-05 21:25:51 -05001614 if (delalloc_start < *start)
Chris Mason70b99e62008-10-31 12:46:39 -04001615 delalloc_start = *start;
Chris Mason70b99e62008-10-31 12:46:39 -04001616
1617 /*
Chris Masonc8b97812008-10-29 14:49:59 -04001618 * make sure to limit the number of pages we try to lock down
Chris Masonc8b97812008-10-29 14:49:59 -04001619 */
Josef Bacik7bf811a52013-10-07 22:11:09 -04001620 if (delalloc_end + 1 - delalloc_start > max_bytes)
1621 delalloc_end = delalloc_start + max_bytes - 1;
Chris Masond3977122009-01-05 21:25:51 -05001622
Chris Masonc8b97812008-10-29 14:49:59 -04001623 /* step two, lock all the pages after the page that has start */
1624 ret = lock_delalloc_pages(inode, locked_page,
1625 delalloc_start, delalloc_end);
1626 if (ret == -EAGAIN) {
1627 /* some of the pages are gone, lets avoid looping by
1628 * shortening the size of the delalloc range we're searching
1629 */
Chris Mason9655d292009-09-02 15:22:30 -04001630 free_extent_state(cached_state);
Chris Mason7d788742014-05-21 05:49:54 -07001631 cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001632 if (!loops) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001633 max_bytes = PAGE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -04001634 loops = 1;
1635 goto again;
1636 } else {
1637 found = 0;
1638 goto out_failed;
1639 }
1640 }
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001641 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
Chris Masonc8b97812008-10-29 14:49:59 -04001642
1643 /* step three, lock the state bits for the whole range */
David Sterbaff13db42015-12-03 14:30:40 +01001644 lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001645
1646 /* then test to make sure it is all still delalloc */
1647 ret = test_range_bit(tree, delalloc_start, delalloc_end,
Chris Mason9655d292009-09-02 15:22:30 -04001648 EXTENT_DELALLOC, 1, cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001649 if (!ret) {
Chris Mason9655d292009-09-02 15:22:30 -04001650 unlock_extent_cached(tree, delalloc_start, delalloc_end,
David Sterbae43bbe52017-12-12 21:43:52 +01001651 &cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001652 __unlock_for_delalloc(inode, locked_page,
1653 delalloc_start, delalloc_end);
1654 cond_resched();
1655 goto again;
1656 }
Chris Mason9655d292009-09-02 15:22:30 -04001657 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001658 *start = delalloc_start;
1659 *end = delalloc_end;
1660out_failed:
1661 return found;
1662}
1663
Liu Boda2c7002017-02-10 16:41:05 +01001664static int __process_pages_contig(struct address_space *mapping,
1665 struct page *locked_page,
1666 pgoff_t start_index, pgoff_t end_index,
1667 unsigned long page_ops, pgoff_t *index_ret)
Chris Masonc8b97812008-10-29 14:49:59 -04001668{
Liu Bo873695b2017-02-02 17:49:22 -08001669 unsigned long nr_pages = end_index - start_index + 1;
Liu Boda2c7002017-02-10 16:41:05 +01001670 unsigned long pages_locked = 0;
Liu Bo873695b2017-02-02 17:49:22 -08001671 pgoff_t index = start_index;
Chris Masonc8b97812008-10-29 14:49:59 -04001672 struct page *pages[16];
Liu Bo873695b2017-02-02 17:49:22 -08001673 unsigned ret;
Liu Boda2c7002017-02-10 16:41:05 +01001674 int err = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001675 int i;
Chris Mason771ed682008-11-06 22:02:51 -05001676
Liu Boda2c7002017-02-10 16:41:05 +01001677 if (page_ops & PAGE_LOCK) {
1678 ASSERT(page_ops == PAGE_LOCK);
1679 ASSERT(index_ret && *index_ret == start_index);
1680 }
1681
Filipe Manana704de492014-10-06 22:14:22 +01001682 if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
Liu Bo873695b2017-02-02 17:49:22 -08001683 mapping_set_error(mapping, -EIO);
Filipe Manana704de492014-10-06 22:14:22 +01001684
Chris Masond3977122009-01-05 21:25:51 -05001685 while (nr_pages > 0) {
Liu Bo873695b2017-02-02 17:49:22 -08001686 ret = find_get_pages_contig(mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001687 min_t(unsigned long,
1688 nr_pages, ARRAY_SIZE(pages)), pages);
Liu Boda2c7002017-02-10 16:41:05 +01001689 if (ret == 0) {
1690 /*
1691 * Only if we're going to lock these pages,
1692 * can we find nothing at @index.
1693 */
1694 ASSERT(page_ops & PAGE_LOCK);
Liu Bo49d4a332017-03-06 18:20:56 -08001695 err = -EAGAIN;
1696 goto out;
Liu Boda2c7002017-02-10 16:41:05 +01001697 }
Chris Mason8b62b722009-09-02 16:53:46 -04001698
Liu Boda2c7002017-02-10 16:41:05 +01001699 for (i = 0; i < ret; i++) {
Josef Bacikc2790a22013-07-29 11:20:47 -04001700 if (page_ops & PAGE_SET_PRIVATE2)
Chris Mason8b62b722009-09-02 16:53:46 -04001701 SetPagePrivate2(pages[i]);
1702
Chris Masonc8b97812008-10-29 14:49:59 -04001703 if (pages[i] == locked_page) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001704 put_page(pages[i]);
Liu Boda2c7002017-02-10 16:41:05 +01001705 pages_locked++;
Chris Masonc8b97812008-10-29 14:49:59 -04001706 continue;
1707 }
Josef Bacikc2790a22013-07-29 11:20:47 -04001708 if (page_ops & PAGE_CLEAR_DIRTY)
Chris Masonc8b97812008-10-29 14:49:59 -04001709 clear_page_dirty_for_io(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001710 if (page_ops & PAGE_SET_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001711 set_page_writeback(pages[i]);
Filipe Manana704de492014-10-06 22:14:22 +01001712 if (page_ops & PAGE_SET_ERROR)
1713 SetPageError(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001714 if (page_ops & PAGE_END_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001715 end_page_writeback(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001716 if (page_ops & PAGE_UNLOCK)
Chris Mason771ed682008-11-06 22:02:51 -05001717 unlock_page(pages[i]);
Liu Boda2c7002017-02-10 16:41:05 +01001718 if (page_ops & PAGE_LOCK) {
1719 lock_page(pages[i]);
1720 if (!PageDirty(pages[i]) ||
1721 pages[i]->mapping != mapping) {
1722 unlock_page(pages[i]);
1723 put_page(pages[i]);
1724 err = -EAGAIN;
1725 goto out;
1726 }
1727 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001728 put_page(pages[i]);
Liu Boda2c7002017-02-10 16:41:05 +01001729 pages_locked++;
Chris Masonc8b97812008-10-29 14:49:59 -04001730 }
1731 nr_pages -= ret;
1732 index += ret;
1733 cond_resched();
1734 }
Liu Boda2c7002017-02-10 16:41:05 +01001735out:
1736 if (err && index_ret)
1737 *index_ret = start_index + pages_locked - 1;
1738 return err;
Chris Masonc8b97812008-10-29 14:49:59 -04001739}
Chris Masonc8b97812008-10-29 14:49:59 -04001740
Liu Bo873695b2017-02-02 17:49:22 -08001741void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1742 u64 delalloc_end, struct page *locked_page,
1743 unsigned clear_bits,
1744 unsigned long page_ops)
1745{
1746 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 1, 0,
David Sterbaae0f1622017-10-31 16:37:52 +01001747 NULL);
Liu Bo873695b2017-02-02 17:49:22 -08001748
1749 __process_pages_contig(inode->i_mapping, locked_page,
1750 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
Liu Boda2c7002017-02-10 16:41:05 +01001751 page_ops, NULL);
Liu Bo873695b2017-02-02 17:49:22 -08001752}
1753
Chris Masond352ac62008-09-29 15:18:18 -04001754/*
1755 * count the number of bytes in the tree that have a given bit(s)
1756 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1757 * cached. The total number found is returned.
1758 */
Chris Masond1310b22008-01-24 16:13:08 -05001759u64 count_range_bits(struct extent_io_tree *tree,
1760 u64 *start, u64 search_end, u64 max_bytes,
David Sterba9ee49a042015-01-14 19:52:13 +01001761 unsigned bits, int contig)
Chris Masond1310b22008-01-24 16:13:08 -05001762{
1763 struct rb_node *node;
1764 struct extent_state *state;
1765 u64 cur_start = *start;
1766 u64 total_bytes = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05001767 u64 last = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001768 int found = 0;
1769
Dulshani Gunawardhanafae7f212013-10-31 10:30:08 +05301770 if (WARN_ON(search_end <= cur_start))
Chris Masond1310b22008-01-24 16:13:08 -05001771 return 0;
Chris Masond1310b22008-01-24 16:13:08 -05001772
Chris Masoncad321a2008-12-17 14:51:42 -05001773 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001774 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1775 total_bytes = tree->dirty_bytes;
1776 goto out;
1777 }
1778 /*
1779 * this search will find all the extents that end after
1780 * our range starts.
1781 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001782 node = tree_search(tree, cur_start);
Chris Masond3977122009-01-05 21:25:51 -05001783 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001784 goto out;
Chris Masond1310b22008-01-24 16:13:08 -05001785
Chris Masond3977122009-01-05 21:25:51 -05001786 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001787 state = rb_entry(node, struct extent_state, rb_node);
1788 if (state->start > search_end)
1789 break;
Chris Masonec29ed52011-02-23 16:23:20 -05001790 if (contig && found && state->start > last + 1)
1791 break;
1792 if (state->end >= cur_start && (state->state & bits) == bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001793 total_bytes += min(search_end, state->end) + 1 -
1794 max(cur_start, state->start);
1795 if (total_bytes >= max_bytes)
1796 break;
1797 if (!found) {
Josef Bacikaf60bed2011-05-04 11:11:17 -04001798 *start = max(cur_start, state->start);
Chris Masond1310b22008-01-24 16:13:08 -05001799 found = 1;
1800 }
Chris Masonec29ed52011-02-23 16:23:20 -05001801 last = state->end;
1802 } else if (contig && found) {
1803 break;
Chris Masond1310b22008-01-24 16:13:08 -05001804 }
1805 node = rb_next(node);
1806 if (!node)
1807 break;
1808 }
1809out:
Chris Masoncad321a2008-12-17 14:51:42 -05001810 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001811 return total_bytes;
1812}
Christoph Hellwigb2950862008-12-02 09:54:17 -05001813
Chris Masond352ac62008-09-29 15:18:18 -04001814/*
1815 * set the private field for a given byte offset in the tree. If there isn't
1816 * an extent_state there already, this does nothing.
1817 */
Arnd Bergmannf827ba92016-02-22 22:53:20 +01001818static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start,
David Sterba47dc1962016-02-11 13:24:13 +01001819 struct io_failure_record *failrec)
Chris Masond1310b22008-01-24 16:13:08 -05001820{
1821 struct rb_node *node;
1822 struct extent_state *state;
1823 int ret = 0;
1824
Chris Masoncad321a2008-12-17 14:51:42 -05001825 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001826 /*
1827 * this search will find all the extents that end after
1828 * our range starts.
1829 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001830 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001831 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001832 ret = -ENOENT;
1833 goto out;
1834 }
1835 state = rb_entry(node, struct extent_state, rb_node);
1836 if (state->start != start) {
1837 ret = -ENOENT;
1838 goto out;
1839 }
David Sterba47dc1962016-02-11 13:24:13 +01001840 state->failrec = failrec;
Chris Masond1310b22008-01-24 16:13:08 -05001841out:
Chris Masoncad321a2008-12-17 14:51:42 -05001842 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001843 return ret;
1844}
1845
Arnd Bergmannf827ba92016-02-22 22:53:20 +01001846static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start,
David Sterba47dc1962016-02-11 13:24:13 +01001847 struct io_failure_record **failrec)
Chris Masond1310b22008-01-24 16:13:08 -05001848{
1849 struct rb_node *node;
1850 struct extent_state *state;
1851 int ret = 0;
1852
Chris Masoncad321a2008-12-17 14:51:42 -05001853 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001854 /*
1855 * this search will find all the extents that end after
1856 * our range starts.
1857 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001858 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001859 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001860 ret = -ENOENT;
1861 goto out;
1862 }
1863 state = rb_entry(node, struct extent_state, rb_node);
1864 if (state->start != start) {
1865 ret = -ENOENT;
1866 goto out;
1867 }
David Sterba47dc1962016-02-11 13:24:13 +01001868 *failrec = state->failrec;
Chris Masond1310b22008-01-24 16:13:08 -05001869out:
Chris Masoncad321a2008-12-17 14:51:42 -05001870 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001871 return ret;
1872}
1873
1874/*
1875 * searches a range in the state tree for a given mask.
Chris Mason70dec802008-01-29 09:59:12 -05001876 * If 'filled' == 1, this returns 1 only if every extent in the tree
Chris Masond1310b22008-01-24 16:13:08 -05001877 * has the bits set. Otherwise, 1 is returned if any bit in the
1878 * range is found set.
1879 */
1880int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +01001881 unsigned bits, int filled, struct extent_state *cached)
Chris Masond1310b22008-01-24 16:13:08 -05001882{
1883 struct extent_state *state = NULL;
1884 struct rb_node *node;
1885 int bitset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001886
Chris Masoncad321a2008-12-17 14:51:42 -05001887 spin_lock(&tree->lock);
Filipe Manana27a35072014-07-06 20:09:59 +01001888 if (cached && extent_state_in_tree(cached) && cached->start <= start &&
Josef Bacikdf98b6e2011-06-20 14:53:48 -04001889 cached->end > start)
Chris Mason9655d292009-09-02 15:22:30 -04001890 node = &cached->rb_node;
1891 else
1892 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -05001893 while (node && start <= end) {
1894 state = rb_entry(node, struct extent_state, rb_node);
1895
1896 if (filled && state->start > start) {
1897 bitset = 0;
1898 break;
1899 }
1900
1901 if (state->start > end)
1902 break;
1903
1904 if (state->state & bits) {
1905 bitset = 1;
1906 if (!filled)
1907 break;
1908 } else if (filled) {
1909 bitset = 0;
1910 break;
1911 }
Chris Mason46562ce2009-09-23 20:23:16 -04001912
1913 if (state->end == (u64)-1)
1914 break;
1915
Chris Masond1310b22008-01-24 16:13:08 -05001916 start = state->end + 1;
1917 if (start > end)
1918 break;
1919 node = rb_next(node);
1920 if (!node) {
1921 if (filled)
1922 bitset = 0;
1923 break;
1924 }
1925 }
Chris Masoncad321a2008-12-17 14:51:42 -05001926 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001927 return bitset;
1928}
Chris Masond1310b22008-01-24 16:13:08 -05001929
1930/*
1931 * helper function to set a given page up to date if all the
1932 * extents in the tree for that page are up to date
1933 */
Jeff Mahoney143bede2012-03-01 14:56:26 +01001934static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
Chris Masond1310b22008-01-24 16:13:08 -05001935{
Miao Xie4eee4fa2012-12-21 09:17:45 +00001936 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001937 u64 end = start + PAGE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04001938 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05001939 SetPageUptodate(page);
Chris Masond1310b22008-01-24 16:13:08 -05001940}
1941
Josef Bacik7870d082017-05-05 11:57:15 -04001942int free_io_failure(struct extent_io_tree *failure_tree,
1943 struct extent_io_tree *io_tree,
1944 struct io_failure_record *rec)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001945{
1946 int ret;
1947 int err = 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001948
David Sterba47dc1962016-02-11 13:24:13 +01001949 set_state_failrec(failure_tree, rec->start, NULL);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001950 ret = clear_extent_bits(failure_tree, rec->start,
1951 rec->start + rec->len - 1,
David Sterba91166212016-04-26 23:54:39 +02001952 EXTENT_LOCKED | EXTENT_DIRTY);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001953 if (ret)
1954 err = ret;
1955
Josef Bacik7870d082017-05-05 11:57:15 -04001956 ret = clear_extent_bits(io_tree, rec->start,
David Woodhouse53b381b2013-01-29 18:40:14 -05001957 rec->start + rec->len - 1,
David Sterba91166212016-04-26 23:54:39 +02001958 EXTENT_DAMAGED);
David Woodhouse53b381b2013-01-29 18:40:14 -05001959 if (ret && !err)
1960 err = ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001961
1962 kfree(rec);
1963 return err;
1964}
1965
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001966/*
1967 * this bypasses the standard btrfs submit functions deliberately, as
1968 * the standard behavior is to write all copies in a raid setup. here we only
1969 * want to write the one bad copy. so we do the mapping for ourselves and issue
1970 * submit_bio directly.
Stefan Behrens3ec706c2012-11-05 15:46:42 +01001971 * to avoid any synchronization issues, wait for the data after writing, which
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001972 * actually prevents the read that triggered the error from finishing.
1973 * currently, there can be no more than two copies of every data bit. thus,
1974 * exactly one rewrite is required.
1975 */
Josef Bacik6ec656b2017-05-05 11:57:14 -04001976int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
1977 u64 length, u64 logical, struct page *page,
1978 unsigned int pg_offset, int mirror_num)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001979{
1980 struct bio *bio;
1981 struct btrfs_device *dev;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001982 u64 map_length = 0;
1983 u64 sector;
1984 struct btrfs_bio *bbio = NULL;
1985 int ret;
1986
Linus Torvalds1751e8a2017-11-27 13:05:09 -08001987 ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001988 BUG_ON(!mirror_num);
1989
David Sterbac5e4c3d2017-06-12 17:29:41 +02001990 bio = btrfs_io_bio_alloc(1);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001991 bio->bi_iter.bi_size = 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001992 map_length = length;
1993
Filipe Mananab5de8d02016-05-27 22:21:27 +01001994 /*
1995 * Avoid races with device replace and make sure our bbio has devices
1996 * associated to its stripes that don't go away while we are doing the
1997 * read repair operation.
1998 */
1999 btrfs_bio_counter_inc_blocked(fs_info);
Nikolay Borisove4ff5fb2017-07-19 10:48:42 +03002000 if (btrfs_is_parity_mirror(fs_info, logical, length)) {
Liu Boc7253282017-03-29 10:53:58 -07002001 /*
2002 * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2003 * to update all raid stripes, but here we just want to correct
2004 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2005 * stripe's dev and sector.
2006 */
2007 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2008 &map_length, &bbio, 0);
2009 if (ret) {
2010 btrfs_bio_counter_dec(fs_info);
2011 bio_put(bio);
2012 return -EIO;
2013 }
2014 ASSERT(bbio->mirror_num == 1);
2015 } else {
2016 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2017 &map_length, &bbio, mirror_num);
2018 if (ret) {
2019 btrfs_bio_counter_dec(fs_info);
2020 bio_put(bio);
2021 return -EIO;
2022 }
2023 BUG_ON(mirror_num != bbio->mirror_num);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002024 }
Liu Boc7253282017-03-29 10:53:58 -07002025
2026 sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002027 bio->bi_iter.bi_sector = sector;
Liu Boc7253282017-03-29 10:53:58 -07002028 dev = bbio->stripes[bbio->mirror_num - 1].dev;
Zhao Lei6e9606d2015-01-20 15:11:34 +08002029 btrfs_put_bbio(bbio);
Anand Jainebbede42017-12-04 12:54:52 +08002030 if (!dev || !dev->bdev ||
2031 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
Filipe Mananab5de8d02016-05-27 22:21:27 +01002032 btrfs_bio_counter_dec(fs_info);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002033 bio_put(bio);
2034 return -EIO;
2035 }
Christoph Hellwig74d46992017-08-23 19:10:32 +02002036 bio_set_dev(bio, dev->bdev);
Christoph Hellwig70fd7612016-11-01 07:40:10 -06002037 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
Miao Xieffdd2012014-09-12 18:44:00 +08002038 bio_add_page(bio, page, length, pg_offset);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002039
Mike Christie4e49ea42016-06-05 14:31:41 -05002040 if (btrfsic_submit_bio_wait(bio)) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002041 /* try to remap that extent elsewhere? */
Filipe Mananab5de8d02016-05-27 22:21:27 +01002042 btrfs_bio_counter_dec(fs_info);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002043 bio_put(bio);
Stefan Behrens442a4f62012-05-25 16:06:08 +02002044 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002045 return -EIO;
2046 }
2047
David Sterbab14af3b2015-10-08 10:43:10 +02002048 btrfs_info_rl_in_rcu(fs_info,
2049 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
Josef Bacik6ec656b2017-05-05 11:57:14 -04002050 ino, start,
Miao Xie1203b682014-09-12 18:44:01 +08002051 rcu_str_deref(dev->name), sector);
Filipe Mananab5de8d02016-05-27 22:21:27 +01002052 btrfs_bio_counter_dec(fs_info);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002053 bio_put(bio);
2054 return 0;
2055}
2056
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04002057int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
2058 struct extent_buffer *eb, int mirror_num)
Josef Bacikea466792012-03-26 21:57:36 -04002059{
Josef Bacikea466792012-03-26 21:57:36 -04002060 u64 start = eb->start;
2061 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
Chris Masond95603b2012-04-12 15:55:15 -04002062 int ret = 0;
Josef Bacikea466792012-03-26 21:57:36 -04002063
David Howellsbc98a422017-07-17 08:45:34 +01002064 if (sb_rdonly(fs_info->sb))
Ilya Dryomov908960c2013-11-03 19:06:39 +02002065 return -EROFS;
2066
Josef Bacikea466792012-03-26 21:57:36 -04002067 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02002068 struct page *p = eb->pages[i];
Miao Xie1203b682014-09-12 18:44:01 +08002069
Josef Bacik6ec656b2017-05-05 11:57:14 -04002070 ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
Miao Xie1203b682014-09-12 18:44:01 +08002071 start - page_offset(p), mirror_num);
Josef Bacikea466792012-03-26 21:57:36 -04002072 if (ret)
2073 break;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002074 start += PAGE_SIZE;
Josef Bacikea466792012-03-26 21:57:36 -04002075 }
2076
2077 return ret;
2078}
2079
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002080/*
2081 * each time an IO finishes, we do a fast check in the IO failure tree
2082 * to see if we need to process or clean up an io_failure_record
2083 */
Josef Bacik7870d082017-05-05 11:57:15 -04002084int clean_io_failure(struct btrfs_fs_info *fs_info,
2085 struct extent_io_tree *failure_tree,
2086 struct extent_io_tree *io_tree, u64 start,
2087 struct page *page, u64 ino, unsigned int pg_offset)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002088{
2089 u64 private;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002090 struct io_failure_record *failrec;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002091 struct extent_state *state;
2092 int num_copies;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002093 int ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002094
2095 private = 0;
Josef Bacik7870d082017-05-05 11:57:15 -04002096 ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2097 EXTENT_DIRTY, 0);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002098 if (!ret)
2099 return 0;
2100
Josef Bacik7870d082017-05-05 11:57:15 -04002101 ret = get_state_failrec(failure_tree, start, &failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002102 if (ret)
2103 return 0;
2104
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002105 BUG_ON(!failrec->this_mirror);
2106
2107 if (failrec->in_validation) {
2108 /* there was no real error, just free the record */
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002109 btrfs_debug(fs_info,
2110 "clean_io_failure: freeing dummy error at %llu",
2111 failrec->start);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002112 goto out;
2113 }
David Howellsbc98a422017-07-17 08:45:34 +01002114 if (sb_rdonly(fs_info->sb))
Ilya Dryomov908960c2013-11-03 19:06:39 +02002115 goto out;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002116
Josef Bacik7870d082017-05-05 11:57:15 -04002117 spin_lock(&io_tree->lock);
2118 state = find_first_extent_bit_state(io_tree,
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002119 failrec->start,
2120 EXTENT_LOCKED);
Josef Bacik7870d082017-05-05 11:57:15 -04002121 spin_unlock(&io_tree->lock);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002122
Miao Xie883d0de2013-07-25 19:22:35 +08002123 if (state && state->start <= failrec->start &&
2124 state->end >= failrec->start + failrec->len - 1) {
Stefan Behrens3ec706c2012-11-05 15:46:42 +01002125 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2126 failrec->len);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002127 if (num_copies > 1) {
Josef Bacik7870d082017-05-05 11:57:15 -04002128 repair_io_failure(fs_info, ino, start, failrec->len,
2129 failrec->logical, page, pg_offset,
2130 failrec->failed_mirror);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002131 }
2132 }
2133
2134out:
Josef Bacik7870d082017-05-05 11:57:15 -04002135 free_io_failure(failure_tree, io_tree, failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002136
Miao Xie454ff3d2014-09-12 18:43:58 +08002137 return 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002138}
2139
Miao Xief6124962014-09-12 18:44:04 +08002140/*
2141 * Can be called when
2142 * - hold extent lock
2143 * - under ordered extent
2144 * - the inode is freeing
2145 */
Nikolay Borisov7ab79562017-02-20 13:50:57 +02002146void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
Miao Xief6124962014-09-12 18:44:04 +08002147{
Nikolay Borisov7ab79562017-02-20 13:50:57 +02002148 struct extent_io_tree *failure_tree = &inode->io_failure_tree;
Miao Xief6124962014-09-12 18:44:04 +08002149 struct io_failure_record *failrec;
2150 struct extent_state *state, *next;
2151
2152 if (RB_EMPTY_ROOT(&failure_tree->state))
2153 return;
2154
2155 spin_lock(&failure_tree->lock);
2156 state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2157 while (state) {
2158 if (state->start > end)
2159 break;
2160
2161 ASSERT(state->end <= end);
2162
2163 next = next_state(state);
2164
David Sterba47dc1962016-02-11 13:24:13 +01002165 failrec = state->failrec;
Miao Xief6124962014-09-12 18:44:04 +08002166 free_extent_state(state);
2167 kfree(failrec);
2168
2169 state = next;
2170 }
2171 spin_unlock(&failure_tree->lock);
2172}
2173
Miao Xie2fe63032014-09-12 18:43:59 +08002174int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
David Sterba47dc1962016-02-11 13:24:13 +01002175 struct io_failure_record **failrec_ret)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002176{
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002177 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie2fe63032014-09-12 18:43:59 +08002178 struct io_failure_record *failrec;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002179 struct extent_map *em;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002180 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2181 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2182 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002183 int ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002184 u64 logical;
2185
David Sterba47dc1962016-02-11 13:24:13 +01002186 ret = get_state_failrec(failure_tree, start, &failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002187 if (ret) {
2188 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2189 if (!failrec)
2190 return -ENOMEM;
Miao Xie2fe63032014-09-12 18:43:59 +08002191
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002192 failrec->start = start;
2193 failrec->len = end - start + 1;
2194 failrec->this_mirror = 0;
2195 failrec->bio_flags = 0;
2196 failrec->in_validation = 0;
2197
2198 read_lock(&em_tree->lock);
2199 em = lookup_extent_mapping(em_tree, start, failrec->len);
2200 if (!em) {
2201 read_unlock(&em_tree->lock);
2202 kfree(failrec);
2203 return -EIO;
2204 }
2205
Filipe David Borba Manana68ba9902013-11-25 03:22:07 +00002206 if (em->start > start || em->start + em->len <= start) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002207 free_extent_map(em);
2208 em = NULL;
2209 }
2210 read_unlock(&em_tree->lock);
Tsutomu Itoh7a2d6a62012-10-01 03:07:15 -06002211 if (!em) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002212 kfree(failrec);
2213 return -EIO;
2214 }
Miao Xie2fe63032014-09-12 18:43:59 +08002215
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002216 logical = start - em->start;
2217 logical = em->block_start + logical;
2218 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2219 logical = em->block_start;
2220 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2221 extent_set_compress_type(&failrec->bio_flags,
2222 em->compress_type);
2223 }
Miao Xie2fe63032014-09-12 18:43:59 +08002224
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002225 btrfs_debug(fs_info,
2226 "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2227 logical, start, failrec->len);
Miao Xie2fe63032014-09-12 18:43:59 +08002228
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002229 failrec->logical = logical;
2230 free_extent_map(em);
2231
2232 /* set the bits in the private failure tree */
2233 ret = set_extent_bits(failure_tree, start, end,
David Sterbaceeb0ae2016-04-26 23:54:39 +02002234 EXTENT_LOCKED | EXTENT_DIRTY);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002235 if (ret >= 0)
David Sterba47dc1962016-02-11 13:24:13 +01002236 ret = set_state_failrec(failure_tree, start, failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002237 /* set the bits in the inode's tree */
2238 if (ret >= 0)
David Sterbaceeb0ae2016-04-26 23:54:39 +02002239 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002240 if (ret < 0) {
2241 kfree(failrec);
2242 return ret;
2243 }
2244 } else {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002245 btrfs_debug(fs_info,
2246 "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
2247 failrec->logical, failrec->start, failrec->len,
2248 failrec->in_validation);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002249 /*
2250 * when data can be on disk more than twice, add to failrec here
2251 * (e.g. with a list for failed_mirror) to make
2252 * clean_io_failure() clean all those errors at once.
2253 */
2254 }
Miao Xie2fe63032014-09-12 18:43:59 +08002255
2256 *failrec_ret = failrec;
2257
2258 return 0;
2259}
2260
Ming Leia0b60d72017-12-18 20:22:11 +08002261bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
Miao Xie2fe63032014-09-12 18:43:59 +08002262 struct io_failure_record *failrec, int failed_mirror)
2263{
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002264 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie2fe63032014-09-12 18:43:59 +08002265 int num_copies;
2266
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002267 num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002268 if (num_copies == 1) {
2269 /*
2270 * we only have a single copy of the data, so don't bother with
2271 * all the retry and error correction code that follows. no
2272 * matter what the error is, it is very likely to persist.
2273 */
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002274 btrfs_debug(fs_info,
2275 "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2276 num_copies, failrec->this_mirror, failed_mirror);
Liu Boc3cfb652017-07-13 15:00:50 -07002277 return false;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002278 }
2279
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002280 /*
2281 * there are two premises:
2282 * a) deliver good data to the caller
2283 * b) correct the bad sectors on disk
2284 */
Ming Leia0b60d72017-12-18 20:22:11 +08002285 if (failed_bio_pages > 1) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002286 /*
2287 * to fulfill b), we need to know the exact failing sectors, as
2288 * we don't want to rewrite any more than the failed ones. thus,
2289 * we need separate read requests for the failed bio
2290 *
2291 * if the following BUG_ON triggers, our validation request got
2292 * merged. we need separate requests for our algorithm to work.
2293 */
2294 BUG_ON(failrec->in_validation);
2295 failrec->in_validation = 1;
2296 failrec->this_mirror = failed_mirror;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002297 } else {
2298 /*
2299 * we're ready to fulfill a) and b) alongside. get a good copy
2300 * of the failed sector and if we succeed, we have setup
2301 * everything for repair_io_failure to do the rest for us.
2302 */
2303 if (failrec->in_validation) {
2304 BUG_ON(failrec->this_mirror != failed_mirror);
2305 failrec->in_validation = 0;
2306 failrec->this_mirror = 0;
2307 }
2308 failrec->failed_mirror = failed_mirror;
2309 failrec->this_mirror++;
2310 if (failrec->this_mirror == failed_mirror)
2311 failrec->this_mirror++;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002312 }
2313
Miao Xiefacc8a222013-07-25 19:22:34 +08002314 if (failrec->this_mirror > num_copies) {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002315 btrfs_debug(fs_info,
2316 "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2317 num_copies, failrec->this_mirror, failed_mirror);
Liu Boc3cfb652017-07-13 15:00:50 -07002318 return false;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002319 }
2320
Liu Boc3cfb652017-07-13 15:00:50 -07002321 return true;
Miao Xie2fe63032014-09-12 18:43:59 +08002322}
2323
2324
2325struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
2326 struct io_failure_record *failrec,
2327 struct page *page, int pg_offset, int icsum,
Miao Xie8b110e32014-09-12 18:44:03 +08002328 bio_end_io_t *endio_func, void *data)
Miao Xie2fe63032014-09-12 18:43:59 +08002329{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002330 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie2fe63032014-09-12 18:43:59 +08002331 struct bio *bio;
2332 struct btrfs_io_bio *btrfs_failed_bio;
2333 struct btrfs_io_bio *btrfs_bio;
2334
David Sterbac5e4c3d2017-06-12 17:29:41 +02002335 bio = btrfs_io_bio_alloc(1);
Miao Xie2fe63032014-09-12 18:43:59 +08002336 bio->bi_end_io = endio_func;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002337 bio->bi_iter.bi_sector = failrec->logical >> 9;
Christoph Hellwig74d46992017-08-23 19:10:32 +02002338 bio_set_dev(bio, fs_info->fs_devices->latest_bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07002339 bio->bi_iter.bi_size = 0;
Miao Xie8b110e32014-09-12 18:44:03 +08002340 bio->bi_private = data;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002341
Miao Xiefacc8a222013-07-25 19:22:34 +08002342 btrfs_failed_bio = btrfs_io_bio(failed_bio);
2343 if (btrfs_failed_bio->csum) {
Miao Xiefacc8a222013-07-25 19:22:34 +08002344 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2345
2346 btrfs_bio = btrfs_io_bio(bio);
2347 btrfs_bio->csum = btrfs_bio->csum_inline;
Miao Xie2fe63032014-09-12 18:43:59 +08002348 icsum *= csum_size;
2349 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
Miao Xiefacc8a222013-07-25 19:22:34 +08002350 csum_size);
2351 }
2352
Miao Xie2fe63032014-09-12 18:43:59 +08002353 bio_add_page(bio, page, failrec->len, pg_offset);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002354
Miao Xie2fe63032014-09-12 18:43:59 +08002355 return bio;
2356}
2357
2358/*
2359 * this is a generic handler for readpage errors (default
2360 * readpage_io_failed_hook). if other copies exist, read those and write back
2361 * good data to the failed position. does not investigate in remapping the
2362 * failed extent elsewhere, hoping the device will be smart enough to do this as
2363 * needed
2364 */
2365
2366static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2367 struct page *page, u64 start, u64 end,
2368 int failed_mirror)
2369{
2370 struct io_failure_record *failrec;
2371 struct inode *inode = page->mapping->host;
2372 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
Josef Bacik7870d082017-05-05 11:57:15 -04002373 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
Miao Xie2fe63032014-09-12 18:43:59 +08002374 struct bio *bio;
Christoph Hellwig70fd7612016-11-01 07:40:10 -06002375 int read_mode = 0;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002376 blk_status_t status;
Miao Xie2fe63032014-09-12 18:43:59 +08002377 int ret;
Ming Leia0b60d72017-12-18 20:22:11 +08002378 unsigned failed_bio_pages = bio_pages_all(failed_bio);
Miao Xie2fe63032014-09-12 18:43:59 +08002379
Mike Christie1f7ad752016-06-05 14:31:51 -05002380 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
Miao Xie2fe63032014-09-12 18:43:59 +08002381
2382 ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
2383 if (ret)
2384 return ret;
2385
Ming Leia0b60d72017-12-18 20:22:11 +08002386 if (!btrfs_check_repairable(inode, failed_bio_pages, failrec,
Liu Boc3cfb652017-07-13 15:00:50 -07002387 failed_mirror)) {
Josef Bacik7870d082017-05-05 11:57:15 -04002388 free_io_failure(failure_tree, tree, failrec);
Miao Xie2fe63032014-09-12 18:43:59 +08002389 return -EIO;
2390 }
2391
Ming Leia0b60d72017-12-18 20:22:11 +08002392 if (failed_bio_pages > 1)
Christoph Hellwig70fd7612016-11-01 07:40:10 -06002393 read_mode |= REQ_FAILFAST_DEV;
Miao Xie2fe63032014-09-12 18:43:59 +08002394
2395 phy_offset >>= inode->i_sb->s_blocksize_bits;
2396 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
2397 start - page_offset(page),
Miao Xie8b110e32014-09-12 18:44:03 +08002398 (int)phy_offset, failed_bio->bi_end_io,
2399 NULL);
Mike Christie1f7ad752016-06-05 14:31:51 -05002400 bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
Miao Xie2fe63032014-09-12 18:43:59 +08002401
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002402 btrfs_debug(btrfs_sb(inode->i_sb),
2403 "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
2404 read_mode, failrec->this_mirror, failrec->in_validation);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002405
Linus Torvalds8c27cb32017-07-05 16:41:23 -07002406 status = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror,
Tsutomu Itoh013bd4c2012-02-16 10:11:40 +09002407 failrec->bio_flags, 0);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002408 if (status) {
Josef Bacik7870d082017-05-05 11:57:15 -04002409 free_io_failure(failure_tree, tree, failrec);
Miao Xie6c387ab2014-09-12 18:43:57 +08002410 bio_put(bio);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002411 ret = blk_status_to_errno(status);
Miao Xie6c387ab2014-09-12 18:43:57 +08002412 }
2413
Tsutomu Itoh013bd4c2012-02-16 10:11:40 +09002414 return ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002415}
2416
Chris Masond1310b22008-01-24 16:13:08 -05002417/* lots and lots of room for performance fixes in the end_bio funcs */
2418
David Sterbab5227c02015-12-03 13:08:59 +01002419void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
Jeff Mahoney87826df2012-02-15 16:23:57 +01002420{
2421 int uptodate = (err == 0);
2422 struct extent_io_tree *tree;
Eric Sandeen3e2426b2014-06-12 00:39:58 -05002423 int ret = 0;
Jeff Mahoney87826df2012-02-15 16:23:57 +01002424
2425 tree = &BTRFS_I(page->mapping->host)->io_tree;
2426
David Sterbac3988d62017-02-17 15:18:32 +01002427 if (tree->ops && tree->ops->writepage_end_io_hook)
2428 tree->ops->writepage_end_io_hook(page, start, end, NULL,
2429 uptodate);
Jeff Mahoney87826df2012-02-15 16:23:57 +01002430
Jeff Mahoney87826df2012-02-15 16:23:57 +01002431 if (!uptodate) {
Jeff Mahoney87826df2012-02-15 16:23:57 +01002432 ClearPageUptodate(page);
2433 SetPageError(page);
Colin Ian Kingbff5baf2017-05-09 18:14:01 +01002434 ret = err < 0 ? err : -EIO;
Liu Bo5dca6ee2014-05-12 12:47:36 +08002435 mapping_set_error(page->mapping, ret);
Jeff Mahoney87826df2012-02-15 16:23:57 +01002436 }
Jeff Mahoney87826df2012-02-15 16:23:57 +01002437}
2438
Chris Masond1310b22008-01-24 16:13:08 -05002439/*
2440 * after a writepage IO is done, we need to:
2441 * clear the uptodate bits on error
2442 * clear the writeback bits in the extent tree for this IO
2443 * end_page_writeback if the page has no more pending IO
2444 *
2445 * Scheduling is not allowed, so the extent state tree is expected
2446 * to have one and only one object corresponding to this IO.
2447 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002448static void end_bio_extent_writepage(struct bio *bio)
Chris Masond1310b22008-01-24 16:13:08 -05002449{
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002450 int error = blk_status_to_errno(bio->bi_status);
Kent Overstreet2c30c712013-11-07 12:20:26 -08002451 struct bio_vec *bvec;
Chris Masond1310b22008-01-24 16:13:08 -05002452 u64 start;
2453 u64 end;
Kent Overstreet2c30c712013-11-07 12:20:26 -08002454 int i;
Chris Masond1310b22008-01-24 16:13:08 -05002455
David Sterbac09abff2017-07-13 18:10:07 +02002456 ASSERT(!bio_flagged(bio, BIO_CLONED));
Kent Overstreet2c30c712013-11-07 12:20:26 -08002457 bio_for_each_segment_all(bvec, bio, i) {
Chris Masond1310b22008-01-24 16:13:08 -05002458 struct page *page = bvec->bv_page;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002459 struct inode *inode = page->mapping->host;
2460 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
David Woodhouse902b22f2008-08-20 08:51:49 -04002461
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002462 /* We always issue full-page reads, but if some block
2463 * in a page fails to read, blk_update_request() will
2464 * advance bv_offset and adjust bv_len to compensate.
2465 * Print a warning for nonzero offsets, and an error
2466 * if they don't add up to a full page. */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002467 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2468 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002469 btrfs_err(fs_info,
Frank Holtonefe120a2013-12-20 11:37:06 -05002470 "partial page write in btrfs with offset %u and length %u",
2471 bvec->bv_offset, bvec->bv_len);
2472 else
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002473 btrfs_info(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04002474 "incomplete page write in btrfs with offset %u and length %u",
Frank Holtonefe120a2013-12-20 11:37:06 -05002475 bvec->bv_offset, bvec->bv_len);
2476 }
Chris Masond1310b22008-01-24 16:13:08 -05002477
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002478 start = page_offset(page);
2479 end = start + bvec->bv_offset + bvec->bv_len - 1;
Chris Masond1310b22008-01-24 16:13:08 -05002480
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002481 end_extent_writepage(page, error, start, end);
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002482 end_page_writeback(page);
Kent Overstreet2c30c712013-11-07 12:20:26 -08002483 }
Chris Mason2b1f55b2008-09-24 11:48:04 -04002484
Chris Masond1310b22008-01-24 16:13:08 -05002485 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002486}
2487
Miao Xie883d0de2013-07-25 19:22:35 +08002488static void
2489endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2490 int uptodate)
2491{
2492 struct extent_state *cached = NULL;
2493 u64 end = start + len - 1;
2494
2495 if (uptodate && tree->track_uptodate)
2496 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
David Sterbad810a4b2017-12-07 18:52:54 +01002497 unlock_extent_cached_atomic(tree, start, end, &cached);
Miao Xie883d0de2013-07-25 19:22:35 +08002498}
2499
Chris Masond1310b22008-01-24 16:13:08 -05002500/*
2501 * after a readpage IO is done, we need to:
2502 * clear the uptodate bits on error
2503 * set the uptodate bits if things worked
2504 * set the page up to date if all extents in the tree are uptodate
2505 * clear the lock bit in the extent tree
2506 * unlock the page if there are no other extents locked for it
2507 *
2508 * Scheduling is not allowed, so the extent state tree is expected
2509 * to have one and only one object corresponding to this IO.
2510 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002511static void end_bio_extent_readpage(struct bio *bio)
Chris Masond1310b22008-01-24 16:13:08 -05002512{
Kent Overstreet2c30c712013-11-07 12:20:26 -08002513 struct bio_vec *bvec;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002514 int uptodate = !bio->bi_status;
Miao Xiefacc8a222013-07-25 19:22:34 +08002515 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
Josef Bacik7870d082017-05-05 11:57:15 -04002516 struct extent_io_tree *tree, *failure_tree;
Miao Xiefacc8a222013-07-25 19:22:34 +08002517 u64 offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002518 u64 start;
2519 u64 end;
Miao Xiefacc8a222013-07-25 19:22:34 +08002520 u64 len;
Miao Xie883d0de2013-07-25 19:22:35 +08002521 u64 extent_start = 0;
2522 u64 extent_len = 0;
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002523 int mirror;
Chris Masond1310b22008-01-24 16:13:08 -05002524 int ret;
Kent Overstreet2c30c712013-11-07 12:20:26 -08002525 int i;
Chris Masond1310b22008-01-24 16:13:08 -05002526
David Sterbac09abff2017-07-13 18:10:07 +02002527 ASSERT(!bio_flagged(bio, BIO_CLONED));
Kent Overstreet2c30c712013-11-07 12:20:26 -08002528 bio_for_each_segment_all(bvec, bio, i) {
Chris Masond1310b22008-01-24 16:13:08 -05002529 struct page *page = bvec->bv_page;
Josef Bacika71754f2013-06-17 17:14:39 -04002530 struct inode *inode = page->mapping->host;
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002531 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Arne Jansen507903b2011-04-06 10:02:20 +00002532
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002533 btrfs_debug(fs_info,
2534 "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002535 (u64)bio->bi_iter.bi_sector, bio->bi_status,
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002536 io_bio->mirror_num);
Josef Bacika71754f2013-06-17 17:14:39 -04002537 tree = &BTRFS_I(inode)->io_tree;
Josef Bacik7870d082017-05-05 11:57:15 -04002538 failure_tree = &BTRFS_I(inode)->io_failure_tree;
David Woodhouse902b22f2008-08-20 08:51:49 -04002539
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002540 /* We always issue full-page reads, but if some block
2541 * in a page fails to read, blk_update_request() will
2542 * advance bv_offset and adjust bv_len to compensate.
2543 * Print a warning for nonzero offsets, and an error
2544 * if they don't add up to a full page. */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002545 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2546 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002547 btrfs_err(fs_info,
2548 "partial page read in btrfs with offset %u and length %u",
Frank Holtonefe120a2013-12-20 11:37:06 -05002549 bvec->bv_offset, bvec->bv_len);
2550 else
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002551 btrfs_info(fs_info,
2552 "incomplete page read in btrfs with offset %u and length %u",
Frank Holtonefe120a2013-12-20 11:37:06 -05002553 bvec->bv_offset, bvec->bv_len);
2554 }
Chris Masond1310b22008-01-24 16:13:08 -05002555
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002556 start = page_offset(page);
2557 end = start + bvec->bv_offset + bvec->bv_len - 1;
Miao Xiefacc8a222013-07-25 19:22:34 +08002558 len = bvec->bv_len;
Chris Masond1310b22008-01-24 16:13:08 -05002559
Chris Mason9be33952013-05-17 18:30:14 -04002560 mirror = io_bio->mirror_num;
David Sterba20c98012017-02-17 15:59:35 +01002561 if (likely(uptodate && tree->ops)) {
Miao Xiefacc8a222013-07-25 19:22:34 +08002562 ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2563 page, start, end,
2564 mirror);
Stefan Behrens5ee08442012-08-27 08:30:03 -06002565 if (ret)
Chris Masond1310b22008-01-24 16:13:08 -05002566 uptodate = 0;
Stefan Behrens5ee08442012-08-27 08:30:03 -06002567 else
Josef Bacik7870d082017-05-05 11:57:15 -04002568 clean_io_failure(BTRFS_I(inode)->root->fs_info,
2569 failure_tree, tree, start,
2570 page,
2571 btrfs_ino(BTRFS_I(inode)), 0);
Chris Masond1310b22008-01-24 16:13:08 -05002572 }
Josef Bacikea466792012-03-26 21:57:36 -04002573
Miao Xief2a09da2013-07-25 19:22:33 +08002574 if (likely(uptodate))
2575 goto readpage_ok;
2576
David Sterba20a7db82017-02-17 16:24:29 +01002577 if (tree->ops) {
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002578 ret = tree->ops->readpage_io_failed_hook(page, mirror);
Liu Bo9d0d1c82017-03-24 15:04:50 -07002579 if (ret == -EAGAIN) {
2580 /*
2581 * Data inode's readpage_io_failed_hook() always
2582 * returns -EAGAIN.
2583 *
2584 * The generic bio_readpage_error handles errors
2585 * the following way: If possible, new read
2586 * requests are created and submitted and will
2587 * end up in end_bio_extent_readpage as well (if
2588 * we're lucky, not in the !uptodate case). In
2589 * that case it returns 0 and we just go on with
2590 * the next page in our bio. If it can't handle
2591 * the error it will return -EIO and we remain
2592 * responsible for that page.
2593 */
2594 ret = bio_readpage_error(bio, offset, page,
2595 start, end, mirror);
2596 if (ret == 0) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002597 uptodate = !bio->bi_status;
Liu Bo9d0d1c82017-03-24 15:04:50 -07002598 offset += len;
2599 continue;
2600 }
Chris Mason7e383262008-04-09 16:28:12 -04002601 }
Liu Bo9d0d1c82017-03-24 15:04:50 -07002602
2603 /*
2604 * metadata's readpage_io_failed_hook() always returns
2605 * -EIO and fixes nothing. -EIO is also returned if
2606 * data inode error could not be fixed.
2607 */
2608 ASSERT(ret == -EIO);
Chris Mason7e383262008-04-09 16:28:12 -04002609 }
Miao Xief2a09da2013-07-25 19:22:33 +08002610readpage_ok:
Miao Xie883d0de2013-07-25 19:22:35 +08002611 if (likely(uptodate)) {
Josef Bacika71754f2013-06-17 17:14:39 -04002612 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002613 pgoff_t end_index = i_size >> PAGE_SHIFT;
Liu Boa583c022014-08-19 23:32:22 +08002614 unsigned off;
Josef Bacika71754f2013-06-17 17:14:39 -04002615
2616 /* Zero out the end if this page straddles i_size */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002617 off = i_size & (PAGE_SIZE-1);
Liu Boa583c022014-08-19 23:32:22 +08002618 if (page->index == end_index && off)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002619 zero_user_segment(page, off, PAGE_SIZE);
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002620 SetPageUptodate(page);
Chris Mason70dec802008-01-29 09:59:12 -05002621 } else {
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002622 ClearPageUptodate(page);
2623 SetPageError(page);
Chris Mason70dec802008-01-29 09:59:12 -05002624 }
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002625 unlock_page(page);
Miao Xiefacc8a222013-07-25 19:22:34 +08002626 offset += len;
Miao Xie883d0de2013-07-25 19:22:35 +08002627
2628 if (unlikely(!uptodate)) {
2629 if (extent_len) {
2630 endio_readpage_release_extent(tree,
2631 extent_start,
2632 extent_len, 1);
2633 extent_start = 0;
2634 extent_len = 0;
2635 }
2636 endio_readpage_release_extent(tree, start,
2637 end - start + 1, 0);
2638 } else if (!extent_len) {
2639 extent_start = start;
2640 extent_len = end + 1 - start;
2641 } else if (extent_start + extent_len == start) {
2642 extent_len += end + 1 - start;
2643 } else {
2644 endio_readpage_release_extent(tree, extent_start,
2645 extent_len, uptodate);
2646 extent_start = start;
2647 extent_len = end + 1 - start;
2648 }
Kent Overstreet2c30c712013-11-07 12:20:26 -08002649 }
Chris Masond1310b22008-01-24 16:13:08 -05002650
Miao Xie883d0de2013-07-25 19:22:35 +08002651 if (extent_len)
2652 endio_readpage_release_extent(tree, extent_start, extent_len,
2653 uptodate);
Miao Xiefacc8a222013-07-25 19:22:34 +08002654 if (io_bio->end_io)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002655 io_bio->end_io(io_bio, blk_status_to_errno(bio->bi_status));
Chris Masond1310b22008-01-24 16:13:08 -05002656 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002657}
2658
Chris Mason9be33952013-05-17 18:30:14 -04002659/*
David Sterba184f9992017-06-12 17:29:39 +02002660 * Initialize the members up to but not including 'bio'. Use after allocating a
2661 * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
2662 * 'bio' because use of __GFP_ZERO is not supported.
Chris Mason9be33952013-05-17 18:30:14 -04002663 */
David Sterba184f9992017-06-12 17:29:39 +02002664static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
Chris Masond1310b22008-01-24 16:13:08 -05002665{
David Sterba184f9992017-06-12 17:29:39 +02002666 memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
2667}
2668
2669/*
David Sterba6e707bc2017-06-02 17:26:26 +02002670 * The following helpers allocate a bio. As it's backed by a bioset, it'll
2671 * never fail. We're returning a bio right now but you can call btrfs_io_bio
2672 * for the appropriate container_of magic
Chris Masond1310b22008-01-24 16:13:08 -05002673 */
David Sterbac821e7f32017-06-02 18:35:36 +02002674struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte)
Chris Masond1310b22008-01-24 16:13:08 -05002675{
2676 struct bio *bio;
2677
David Sterba9f2179a2017-06-02 17:55:44 +02002678 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, btrfs_bioset);
Christoph Hellwig74d46992017-08-23 19:10:32 +02002679 bio_set_dev(bio, bdev);
David Sterbac821e7f32017-06-02 18:35:36 +02002680 bio->bi_iter.bi_sector = first_byte >> 9;
David Sterba184f9992017-06-12 17:29:39 +02002681 btrfs_io_bio_init(btrfs_io_bio(bio));
Chris Masond1310b22008-01-24 16:13:08 -05002682 return bio;
2683}
2684
David Sterba8b6c1d52017-06-02 17:48:13 +02002685struct bio *btrfs_bio_clone(struct bio *bio)
Chris Mason9be33952013-05-17 18:30:14 -04002686{
Miao Xie23ea8e52014-09-12 18:43:54 +08002687 struct btrfs_io_bio *btrfs_bio;
2688 struct bio *new;
Chris Mason9be33952013-05-17 18:30:14 -04002689
David Sterba6e707bc2017-06-02 17:26:26 +02002690 /* Bio allocation backed by a bioset does not fail */
David Sterba8b6c1d52017-06-02 17:48:13 +02002691 new = bio_clone_fast(bio, GFP_NOFS, btrfs_bioset);
David Sterba6e707bc2017-06-02 17:26:26 +02002692 btrfs_bio = btrfs_io_bio(new);
David Sterba184f9992017-06-12 17:29:39 +02002693 btrfs_io_bio_init(btrfs_bio);
David Sterba6e707bc2017-06-02 17:26:26 +02002694 btrfs_bio->iter = bio->bi_iter;
Miao Xie23ea8e52014-09-12 18:43:54 +08002695 return new;
2696}
Chris Mason9be33952013-05-17 18:30:14 -04002697
David Sterbac5e4c3d2017-06-12 17:29:41 +02002698struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
Chris Mason9be33952013-05-17 18:30:14 -04002699{
Miao Xiefacc8a222013-07-25 19:22:34 +08002700 struct bio *bio;
2701
David Sterba6e707bc2017-06-02 17:26:26 +02002702 /* Bio allocation backed by a bioset does not fail */
David Sterbac5e4c3d2017-06-12 17:29:41 +02002703 bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, btrfs_bioset);
David Sterba184f9992017-06-12 17:29:39 +02002704 btrfs_io_bio_init(btrfs_io_bio(bio));
Miao Xiefacc8a222013-07-25 19:22:34 +08002705 return bio;
Chris Mason9be33952013-05-17 18:30:14 -04002706}
2707
Liu Boe4770942017-05-16 10:57:14 -07002708struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
Liu Bo2f8e9142017-05-15 17:43:31 -07002709{
2710 struct bio *bio;
2711 struct btrfs_io_bio *btrfs_bio;
2712
2713 /* this will never fail when it's backed by a bioset */
Liu Boe4770942017-05-16 10:57:14 -07002714 bio = bio_clone_fast(orig, GFP_NOFS, btrfs_bioset);
Liu Bo2f8e9142017-05-15 17:43:31 -07002715 ASSERT(bio);
2716
2717 btrfs_bio = btrfs_io_bio(bio);
David Sterba184f9992017-06-12 17:29:39 +02002718 btrfs_io_bio_init(btrfs_bio);
Liu Bo2f8e9142017-05-15 17:43:31 -07002719
2720 bio_trim(bio, offset >> 9, size >> 9);
Liu Bo17347ce2017-05-15 15:33:27 -07002721 btrfs_bio->iter = bio->bi_iter;
Liu Bo2f8e9142017-05-15 17:43:31 -07002722 return bio;
2723}
Chris Mason9be33952013-05-17 18:30:14 -04002724
Mike Christie1f7ad752016-06-05 14:31:51 -05002725static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
2726 unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002727{
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002728 blk_status_t ret = 0;
Ming Leic45a8f22017-12-18 20:22:05 +08002729 struct bio_vec *bvec = bio_last_bvec_all(bio);
Chris Mason70dec802008-01-29 09:59:12 -05002730 struct page *page = bvec->bv_page;
2731 struct extent_io_tree *tree = bio->bi_private;
Chris Mason70dec802008-01-29 09:59:12 -05002732 u64 start;
Chris Mason70dec802008-01-29 09:59:12 -05002733
Miao Xie4eee4fa2012-12-21 09:17:45 +00002734 start = page_offset(page) + bvec->bv_offset;
Chris Mason70dec802008-01-29 09:59:12 -05002735
David Woodhouse902b22f2008-08-20 08:51:49 -04002736 bio->bi_private = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002737
David Sterba20c98012017-02-17 15:59:35 +01002738 if (tree->ops)
Josef Bacikc6100a42017-05-05 11:57:13 -04002739 ret = tree->ops->submit_bio_hook(tree->private_data, bio,
Chris Masoneaf25d92010-05-25 09:48:28 -04002740 mirror_num, bio_flags, start);
Chris Mason0b86a832008-03-24 15:01:56 -04002741 else
Mike Christie4e49ea42016-06-05 14:31:41 -05002742 btrfsic_submit_bio(bio);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002743
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002744 return blk_status_to_errno(ret);
Chris Masond1310b22008-01-24 16:13:08 -05002745}
2746
Mike Christie1f7ad752016-06-05 14:31:51 -05002747static int merge_bio(struct extent_io_tree *tree, struct page *page,
Jeff Mahoney3444a972011-10-03 23:23:13 -04002748 unsigned long offset, size_t size, struct bio *bio,
2749 unsigned long bio_flags)
2750{
2751 int ret = 0;
David Sterba20c98012017-02-17 15:59:35 +01002752 if (tree->ops)
Mike Christie81a75f672016-06-05 14:31:54 -05002753 ret = tree->ops->merge_bio_hook(page, offset, size, bio,
Jeff Mahoney3444a972011-10-03 23:23:13 -04002754 bio_flags);
Jeff Mahoney3444a972011-10-03 23:23:13 -04002755 return ret;
2756
2757}
2758
David Sterba4b81ba42017-06-06 19:14:26 +02002759/*
2760 * @opf: bio REQ_OP_* and REQ_* flags as one value
David Sterba5c2b1fd2017-06-06 19:22:55 +02002761 * @bio_ret: must be valid pointer, newly allocated bio will be stored there
David Sterba4b81ba42017-06-06 19:14:26 +02002762 */
2763static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
Chris Masonda2f0f72015-07-02 13:57:22 -07002764 struct writeback_control *wbc,
David Sterba6273b7f2017-10-04 17:30:11 +02002765 struct page *page, u64 offset,
David Sterba6c5a4e22017-10-04 17:10:34 +02002766 size_t size, unsigned long pg_offset,
Chris Masond1310b22008-01-24 16:13:08 -05002767 struct block_device *bdev,
2768 struct bio **bio_ret,
Chris Masonf1885912008-04-09 16:28:12 -04002769 bio_end_io_t end_io_func,
Chris Masonc8b97812008-10-29 14:49:59 -04002770 int mirror_num,
2771 unsigned long prev_bio_flags,
Filipe Manana005efed2015-09-14 09:09:31 +01002772 unsigned long bio_flags,
2773 bool force_bio_submit)
Chris Masond1310b22008-01-24 16:13:08 -05002774{
2775 int ret = 0;
2776 struct bio *bio;
Chris Masonc8b97812008-10-29 14:49:59 -04002777 int contig = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04002778 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002779 size_t page_size = min_t(size_t, size, PAGE_SIZE);
David Sterba6273b7f2017-10-04 17:30:11 +02002780 sector_t sector = offset >> 9;
Chris Masond1310b22008-01-24 16:13:08 -05002781
David Sterba5c2b1fd2017-06-06 19:22:55 +02002782 ASSERT(bio_ret);
2783
2784 if (*bio_ret) {
Chris Masond1310b22008-01-24 16:13:08 -05002785 bio = *bio_ret;
Chris Masonc8b97812008-10-29 14:49:59 -04002786 if (old_compressed)
Kent Overstreet4f024f32013-10-11 15:44:27 -07002787 contig = bio->bi_iter.bi_sector == sector;
Chris Masonc8b97812008-10-29 14:49:59 -04002788 else
Kent Overstreetf73a1c72012-09-25 15:05:12 -07002789 contig = bio_end_sector(bio) == sector;
Chris Masonc8b97812008-10-29 14:49:59 -04002790
2791 if (prev_bio_flags != bio_flags || !contig ||
Filipe Manana005efed2015-09-14 09:09:31 +01002792 force_bio_submit ||
David Sterba6c5a4e22017-10-04 17:10:34 +02002793 merge_bio(tree, page, pg_offset, page_size, bio, bio_flags) ||
2794 bio_add_page(bio, page, page_size, pg_offset) < page_size) {
Mike Christie1f7ad752016-06-05 14:31:51 -05002795 ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
Naohiro Aota289454a2015-01-06 01:01:03 +09002796 if (ret < 0) {
2797 *bio_ret = NULL;
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002798 return ret;
Naohiro Aota289454a2015-01-06 01:01:03 +09002799 }
Chris Masond1310b22008-01-24 16:13:08 -05002800 bio = NULL;
2801 } else {
Chris Masonda2f0f72015-07-02 13:57:22 -07002802 if (wbc)
2803 wbc_account_io(wbc, page, page_size);
Chris Masond1310b22008-01-24 16:13:08 -05002804 return 0;
2805 }
2806 }
Chris Masonc8b97812008-10-29 14:49:59 -04002807
David Sterba6273b7f2017-10-04 17:30:11 +02002808 bio = btrfs_bio_alloc(bdev, offset);
David Sterba6c5a4e22017-10-04 17:10:34 +02002809 bio_add_page(bio, page, page_size, pg_offset);
Chris Masond1310b22008-01-24 16:13:08 -05002810 bio->bi_end_io = end_io_func;
2811 bio->bi_private = tree;
Jens Axboee6959b92017-06-27 11:51:28 -06002812 bio->bi_write_hint = page->mapping->host->i_write_hint;
David Sterba4b81ba42017-06-06 19:14:26 +02002813 bio->bi_opf = opf;
Chris Masonda2f0f72015-07-02 13:57:22 -07002814 if (wbc) {
2815 wbc_init_bio(wbc, bio);
2816 wbc_account_io(wbc, page, page_size);
2817 }
Chris Mason70dec802008-01-29 09:59:12 -05002818
David Sterba5c2b1fd2017-06-06 19:22:55 +02002819 *bio_ret = bio;
Chris Masond1310b22008-01-24 16:13:08 -05002820
2821 return ret;
2822}
2823
Eric Sandeen48a3b632013-04-25 20:41:01 +00002824static void attach_extent_buffer_page(struct extent_buffer *eb,
2825 struct page *page)
Josef Bacik4f2de97a2012-03-07 16:20:05 -05002826{
2827 if (!PagePrivate(page)) {
2828 SetPagePrivate(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002829 get_page(page);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05002830 set_page_private(page, (unsigned long)eb);
2831 } else {
2832 WARN_ON(page->private != (unsigned long)eb);
2833 }
2834}
2835
Chris Masond1310b22008-01-24 16:13:08 -05002836void set_page_extent_mapped(struct page *page)
2837{
2838 if (!PagePrivate(page)) {
2839 SetPagePrivate(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002840 get_page(page);
Chris Mason6af118ce2008-07-22 11:18:07 -04002841 set_page_private(page, EXTENT_PAGE_PRIVATE);
Chris Masond1310b22008-01-24 16:13:08 -05002842 }
2843}
2844
Miao Xie125bac012013-07-25 19:22:37 +08002845static struct extent_map *
2846__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2847 u64 start, u64 len, get_extent_t *get_extent,
2848 struct extent_map **em_cached)
2849{
2850 struct extent_map *em;
2851
2852 if (em_cached && *em_cached) {
2853 em = *em_cached;
Filipe Mananacbc0e922014-02-25 14:15:12 +00002854 if (extent_map_in_tree(em) && start >= em->start &&
Miao Xie125bac012013-07-25 19:22:37 +08002855 start < extent_map_end(em)) {
Elena Reshetova490b54d2017-03-03 10:55:12 +02002856 refcount_inc(&em->refs);
Miao Xie125bac012013-07-25 19:22:37 +08002857 return em;
2858 }
2859
2860 free_extent_map(em);
2861 *em_cached = NULL;
2862 }
2863
Nikolay Borisovfc4f21b12017-02-20 13:51:06 +02002864 em = get_extent(BTRFS_I(inode), page, pg_offset, start, len, 0);
Miao Xie125bac012013-07-25 19:22:37 +08002865 if (em_cached && !IS_ERR_OR_NULL(em)) {
2866 BUG_ON(*em_cached);
Elena Reshetova490b54d2017-03-03 10:55:12 +02002867 refcount_inc(&em->refs);
Miao Xie125bac012013-07-25 19:22:37 +08002868 *em_cached = em;
2869 }
2870 return em;
2871}
Chris Masond1310b22008-01-24 16:13:08 -05002872/*
2873 * basic readpage implementation. Locked extent state structs are inserted
2874 * into the tree that are removed when the IO is done (by the end_io
2875 * handlers)
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002876 * XXX JDM: This needs looking at to ensure proper page locking
Liu Bobaf863b2016-07-11 10:39:07 -07002877 * return 0 on success, otherwise return error
Chris Masond1310b22008-01-24 16:13:08 -05002878 */
Miao Xie99740902013-07-25 19:22:36 +08002879static int __do_readpage(struct extent_io_tree *tree,
2880 struct page *page,
2881 get_extent_t *get_extent,
Miao Xie125bac012013-07-25 19:22:37 +08002882 struct extent_map **em_cached,
Miao Xie99740902013-07-25 19:22:36 +08002883 struct bio **bio, int mirror_num,
David Sterbaf1c77c52017-06-06 19:03:49 +02002884 unsigned long *bio_flags, unsigned int read_flags,
Filipe Manana005efed2015-09-14 09:09:31 +01002885 u64 *prev_em_start)
Chris Masond1310b22008-01-24 16:13:08 -05002886{
2887 struct inode *inode = page->mapping->host;
Miao Xie4eee4fa2012-12-21 09:17:45 +00002888 u64 start = page_offset(page);
David Sterba8eec8292017-06-06 19:50:13 +02002889 const u64 end = start + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05002890 u64 cur = start;
2891 u64 extent_offset;
2892 u64 last_byte = i_size_read(inode);
2893 u64 block_start;
2894 u64 cur_end;
Chris Masond1310b22008-01-24 16:13:08 -05002895 struct extent_map *em;
2896 struct block_device *bdev;
Liu Bobaf863b2016-07-11 10:39:07 -07002897 int ret = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002898 int nr = 0;
David Sterba306e16c2011-04-19 14:29:38 +02002899 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002900 size_t iosize;
Chris Masonc8b97812008-10-29 14:49:59 -04002901 size_t disk_io_size;
Chris Masond1310b22008-01-24 16:13:08 -05002902 size_t blocksize = inode->i_sb->s_blocksize;
Filipe Manana7f042a82016-01-27 19:17:20 +00002903 unsigned long this_bio_flag = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002904
2905 set_page_extent_mapped(page);
2906
Dan Magenheimer90a887c2011-05-26 10:01:56 -06002907 if (!PageUptodate(page)) {
2908 if (cleancache_get_page(page) == 0) {
2909 BUG_ON(blocksize != PAGE_SIZE);
Miao Xie99740902013-07-25 19:22:36 +08002910 unlock_extent(tree, start, end);
Dan Magenheimer90a887c2011-05-26 10:01:56 -06002911 goto out;
2912 }
2913 }
2914
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002915 if (page->index == last_byte >> PAGE_SHIFT) {
Chris Masonc8b97812008-10-29 14:49:59 -04002916 char *userpage;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002917 size_t zero_offset = last_byte & (PAGE_SIZE - 1);
Chris Masonc8b97812008-10-29 14:49:59 -04002918
2919 if (zero_offset) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002920 iosize = PAGE_SIZE - zero_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08002921 userpage = kmap_atomic(page);
Chris Masonc8b97812008-10-29 14:49:59 -04002922 memset(userpage + zero_offset, 0, iosize);
2923 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08002924 kunmap_atomic(userpage);
Chris Masonc8b97812008-10-29 14:49:59 -04002925 }
2926 }
Chris Masond1310b22008-01-24 16:13:08 -05002927 while (cur <= end) {
Filipe Manana005efed2015-09-14 09:09:31 +01002928 bool force_bio_submit = false;
David Sterba6273b7f2017-10-04 17:30:11 +02002929 u64 offset;
Josef Bacikc8f2f242013-02-11 11:33:00 -05002930
Chris Masond1310b22008-01-24 16:13:08 -05002931 if (cur >= last_byte) {
2932 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00002933 struct extent_state *cached = NULL;
2934
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002935 iosize = PAGE_SIZE - pg_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08002936 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02002937 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05002938 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08002939 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05002940 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00002941 &cached, GFP_NOFS);
Filipe Manana7f042a82016-01-27 19:17:20 +00002942 unlock_extent_cached(tree, cur,
David Sterbae43bbe52017-12-12 21:43:52 +01002943 cur + iosize - 1, &cached);
Chris Masond1310b22008-01-24 16:13:08 -05002944 break;
2945 }
Miao Xie125bac012013-07-25 19:22:37 +08002946 em = __get_extent_map(inode, page, pg_offset, cur,
2947 end - cur + 1, get_extent, em_cached);
David Sterbac7040052011-04-19 18:00:01 +02002948 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05002949 SetPageError(page);
Filipe Manana7f042a82016-01-27 19:17:20 +00002950 unlock_extent(tree, cur, end);
Chris Masond1310b22008-01-24 16:13:08 -05002951 break;
2952 }
Chris Masond1310b22008-01-24 16:13:08 -05002953 extent_offset = cur - em->start;
2954 BUG_ON(extent_map_end(em) <= cur);
2955 BUG_ON(end < cur);
2956
Li Zefan261507a02010-12-17 14:21:50 +08002957 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
Mark Fasheh4b384312013-08-06 11:42:50 -07002958 this_bio_flag |= EXTENT_BIO_COMPRESSED;
Li Zefan261507a02010-12-17 14:21:50 +08002959 extent_set_compress_type(&this_bio_flag,
2960 em->compress_type);
2961 }
Chris Masonc8b97812008-10-29 14:49:59 -04002962
Chris Masond1310b22008-01-24 16:13:08 -05002963 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2964 cur_end = min(extent_map_end(em) - 1, end);
Qu Wenruofda28322013-02-26 08:10:22 +00002965 iosize = ALIGN(iosize, blocksize);
Chris Masonc8b97812008-10-29 14:49:59 -04002966 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2967 disk_io_size = em->block_len;
David Sterba6273b7f2017-10-04 17:30:11 +02002968 offset = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04002969 } else {
David Sterba6273b7f2017-10-04 17:30:11 +02002970 offset = em->block_start + extent_offset;
Chris Masonc8b97812008-10-29 14:49:59 -04002971 disk_io_size = iosize;
2972 }
Chris Masond1310b22008-01-24 16:13:08 -05002973 bdev = em->bdev;
2974 block_start = em->block_start;
Yan Zhengd899e052008-10-30 14:25:28 -04002975 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2976 block_start = EXTENT_MAP_HOLE;
Filipe Manana005efed2015-09-14 09:09:31 +01002977
2978 /*
2979 * If we have a file range that points to a compressed extent
2980 * and it's followed by a consecutive file range that points to
2981 * to the same compressed extent (possibly with a different
2982 * offset and/or length, so it either points to the whole extent
2983 * or only part of it), we must make sure we do not submit a
2984 * single bio to populate the pages for the 2 ranges because
2985 * this makes the compressed extent read zero out the pages
2986 * belonging to the 2nd range. Imagine the following scenario:
2987 *
2988 * File layout
2989 * [0 - 8K] [8K - 24K]
2990 * | |
2991 * | |
2992 * points to extent X, points to extent X,
2993 * offset 4K, length of 8K offset 0, length 16K
2994 *
2995 * [extent X, compressed length = 4K uncompressed length = 16K]
2996 *
2997 * If the bio to read the compressed extent covers both ranges,
2998 * it will decompress extent X into the pages belonging to the
2999 * first range and then it will stop, zeroing out the remaining
3000 * pages that belong to the other range that points to extent X.
3001 * So here we make sure we submit 2 bios, one for the first
3002 * range and another one for the third range. Both will target
3003 * the same physical extent from disk, but we can't currently
3004 * make the compressed bio endio callback populate the pages
3005 * for both ranges because each compressed bio is tightly
3006 * coupled with a single extent map, and each range can have
3007 * an extent map with a different offset value relative to the
3008 * uncompressed data of our extent and different lengths. This
3009 * is a corner case so we prioritize correctness over
3010 * non-optimal behavior (submitting 2 bios for the same extent).
3011 */
3012 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3013 prev_em_start && *prev_em_start != (u64)-1 &&
3014 *prev_em_start != em->orig_start)
3015 force_bio_submit = true;
3016
3017 if (prev_em_start)
3018 *prev_em_start = em->orig_start;
3019
Chris Masond1310b22008-01-24 16:13:08 -05003020 free_extent_map(em);
3021 em = NULL;
3022
3023 /* we've found a hole, just zero and go on */
3024 if (block_start == EXTENT_MAP_HOLE) {
3025 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00003026 struct extent_state *cached = NULL;
3027
Cong Wang7ac687d2011-11-25 23:14:28 +08003028 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02003029 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003030 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08003031 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05003032
3033 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00003034 &cached, GFP_NOFS);
Filipe Manana7f042a82016-01-27 19:17:20 +00003035 unlock_extent_cached(tree, cur,
David Sterbae43bbe52017-12-12 21:43:52 +01003036 cur + iosize - 1, &cached);
Chris Masond1310b22008-01-24 16:13:08 -05003037 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003038 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003039 continue;
3040 }
3041 /* the get_extent function already copied into the page */
Chris Mason9655d292009-09-02 15:22:30 -04003042 if (test_range_bit(tree, cur, cur_end,
3043 EXTENT_UPTODATE, 1, NULL)) {
Chris Masona1b32a52008-09-05 16:09:51 -04003044 check_page_uptodate(tree, page);
Filipe Manana7f042a82016-01-27 19:17:20 +00003045 unlock_extent(tree, cur, cur + iosize - 1);
Chris Masond1310b22008-01-24 16:13:08 -05003046 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003047 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003048 continue;
3049 }
Chris Mason70dec802008-01-29 09:59:12 -05003050 /* we have an inline extent but it didn't get marked up
3051 * to date. Error out
3052 */
3053 if (block_start == EXTENT_MAP_INLINE) {
3054 SetPageError(page);
Filipe Manana7f042a82016-01-27 19:17:20 +00003055 unlock_extent(tree, cur, cur + iosize - 1);
Chris Mason70dec802008-01-29 09:59:12 -05003056 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003057 pg_offset += iosize;
Chris Mason70dec802008-01-29 09:59:12 -05003058 continue;
3059 }
Chris Masond1310b22008-01-24 16:13:08 -05003060
David Sterba4b81ba42017-06-06 19:14:26 +02003061 ret = submit_extent_page(REQ_OP_READ | read_flags, tree, NULL,
David Sterba6273b7f2017-10-04 17:30:11 +02003062 page, offset, disk_io_size,
3063 pg_offset, bdev, bio,
Chris Masonc8b97812008-10-29 14:49:59 -04003064 end_bio_extent_readpage, mirror_num,
3065 *bio_flags,
Filipe Manana005efed2015-09-14 09:09:31 +01003066 this_bio_flag,
3067 force_bio_submit);
Josef Bacikc8f2f242013-02-11 11:33:00 -05003068 if (!ret) {
3069 nr++;
3070 *bio_flags = this_bio_flag;
3071 } else {
Chris Masond1310b22008-01-24 16:13:08 -05003072 SetPageError(page);
Filipe Manana7f042a82016-01-27 19:17:20 +00003073 unlock_extent(tree, cur, cur + iosize - 1);
Liu Bobaf863b2016-07-11 10:39:07 -07003074 goto out;
Josef Bacikedd33c92012-10-05 16:40:32 -04003075 }
Chris Masond1310b22008-01-24 16:13:08 -05003076 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003077 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003078 }
Dan Magenheimer90a887c2011-05-26 10:01:56 -06003079out:
Chris Masond1310b22008-01-24 16:13:08 -05003080 if (!nr) {
3081 if (!PageError(page))
3082 SetPageUptodate(page);
3083 unlock_page(page);
3084 }
Liu Bobaf863b2016-07-11 10:39:07 -07003085 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05003086}
3087
Miao Xie99740902013-07-25 19:22:36 +08003088static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3089 struct page *pages[], int nr_pages,
3090 u64 start, u64 end,
Miao Xie125bac012013-07-25 19:22:37 +08003091 struct extent_map **em_cached,
Nikolay Borisovd3fac6b2017-10-24 11:50:39 +03003092 struct bio **bio,
Mike Christie1f7ad752016-06-05 14:31:51 -05003093 unsigned long *bio_flags,
Filipe Manana808f80b2015-09-28 09:56:26 +01003094 u64 *prev_em_start)
Miao Xie99740902013-07-25 19:22:36 +08003095{
3096 struct inode *inode;
3097 struct btrfs_ordered_extent *ordered;
3098 int index;
3099
3100 inode = pages[0]->mapping->host;
3101 while (1) {
3102 lock_extent(tree, start, end);
Nikolay Borisova776c6f2017-02-20 13:50:49 +02003103 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
Miao Xie99740902013-07-25 19:22:36 +08003104 end - start + 1);
3105 if (!ordered)
3106 break;
3107 unlock_extent(tree, start, end);
3108 btrfs_start_ordered_extent(inode, ordered, 1);
3109 btrfs_put_ordered_extent(ordered);
3110 }
3111
3112 for (index = 0; index < nr_pages; index++) {
David Sterba4ef77692017-06-23 04:09:57 +02003113 __do_readpage(tree, pages[index], btrfs_get_extent, em_cached,
3114 bio, 0, bio_flags, 0, prev_em_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003115 put_page(pages[index]);
Miao Xie99740902013-07-25 19:22:36 +08003116 }
3117}
3118
3119static void __extent_readpages(struct extent_io_tree *tree,
3120 struct page *pages[],
David Sterbae4d17ef2017-06-23 04:09:57 +02003121 int nr_pages,
Miao Xie125bac012013-07-25 19:22:37 +08003122 struct extent_map **em_cached,
Nikolay Borisovd3fac6b2017-10-24 11:50:39 +03003123 struct bio **bio, unsigned long *bio_flags,
Filipe Manana808f80b2015-09-28 09:56:26 +01003124 u64 *prev_em_start)
Miao Xie99740902013-07-25 19:22:36 +08003125{
Stefan Behrens35a36212013-08-14 18:12:25 +02003126 u64 start = 0;
Miao Xie99740902013-07-25 19:22:36 +08003127 u64 end = 0;
3128 u64 page_start;
3129 int index;
Stefan Behrens35a36212013-08-14 18:12:25 +02003130 int first_index = 0;
Miao Xie99740902013-07-25 19:22:36 +08003131
3132 for (index = 0; index < nr_pages; index++) {
3133 page_start = page_offset(pages[index]);
3134 if (!end) {
3135 start = page_start;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003136 end = start + PAGE_SIZE - 1;
Miao Xie99740902013-07-25 19:22:36 +08003137 first_index = index;
3138 } else if (end + 1 == page_start) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003139 end += PAGE_SIZE;
Miao Xie99740902013-07-25 19:22:36 +08003140 } else {
3141 __do_contiguous_readpages(tree, &pages[first_index],
3142 index - first_index, start,
David Sterba4ef77692017-06-23 04:09:57 +02003143 end, em_cached,
Nikolay Borisovd3fac6b2017-10-24 11:50:39 +03003144 bio, bio_flags,
Mike Christie1f7ad752016-06-05 14:31:51 -05003145 prev_em_start);
Miao Xie99740902013-07-25 19:22:36 +08003146 start = page_start;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003147 end = start + PAGE_SIZE - 1;
Miao Xie99740902013-07-25 19:22:36 +08003148 first_index = index;
3149 }
3150 }
3151
3152 if (end)
3153 __do_contiguous_readpages(tree, &pages[first_index],
3154 index - first_index, start,
David Sterba4ef77692017-06-23 04:09:57 +02003155 end, em_cached, bio,
Nikolay Borisovd3fac6b2017-10-24 11:50:39 +03003156 bio_flags, prev_em_start);
Miao Xie99740902013-07-25 19:22:36 +08003157}
3158
3159static int __extent_read_full_page(struct extent_io_tree *tree,
3160 struct page *page,
3161 get_extent_t *get_extent,
3162 struct bio **bio, int mirror_num,
David Sterbaf1c77c52017-06-06 19:03:49 +02003163 unsigned long *bio_flags,
3164 unsigned int read_flags)
Miao Xie99740902013-07-25 19:22:36 +08003165{
3166 struct inode *inode = page->mapping->host;
3167 struct btrfs_ordered_extent *ordered;
3168 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003169 u64 end = start + PAGE_SIZE - 1;
Miao Xie99740902013-07-25 19:22:36 +08003170 int ret;
3171
3172 while (1) {
3173 lock_extent(tree, start, end);
Nikolay Borisova776c6f2017-02-20 13:50:49 +02003174 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003175 PAGE_SIZE);
Miao Xie99740902013-07-25 19:22:36 +08003176 if (!ordered)
3177 break;
3178 unlock_extent(tree, start, end);
3179 btrfs_start_ordered_extent(inode, ordered, 1);
3180 btrfs_put_ordered_extent(ordered);
3181 }
3182
Miao Xie125bac012013-07-25 19:22:37 +08003183 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
Mike Christie1f7ad752016-06-05 14:31:51 -05003184 bio_flags, read_flags, NULL);
Miao Xie99740902013-07-25 19:22:36 +08003185 return ret;
3186}
3187
Chris Masond1310b22008-01-24 16:13:08 -05003188int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02003189 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05003190{
3191 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04003192 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003193 int ret;
3194
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02003195 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
Mike Christie1f7ad752016-06-05 14:31:51 -05003196 &bio_flags, 0);
Chris Masond1310b22008-01-24 16:13:08 -05003197 if (bio)
Mike Christie1f7ad752016-06-05 14:31:51 -05003198 ret = submit_one_bio(bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05003199 return ret;
3200}
Chris Masond1310b22008-01-24 16:13:08 -05003201
David Sterba3d4b9492017-02-10 19:33:41 +01003202static void update_nr_written(struct writeback_control *wbc,
Liu Boa91326672016-03-07 16:56:21 -08003203 unsigned long nr_written)
Chris Mason11c83492009-04-20 15:50:09 -04003204{
3205 wbc->nr_to_write -= nr_written;
Chris Mason11c83492009-04-20 15:50:09 -04003206}
3207
Chris Masond1310b22008-01-24 16:13:08 -05003208/*
Chris Mason40f76582014-05-21 13:35:51 -07003209 * helper for __extent_writepage, doing all of the delayed allocation setup.
3210 *
3211 * This returns 1 if our fill_delalloc function did all the work required
3212 * to write the page (copy into inline extent). In this case the IO has
3213 * been started and the page is already unlocked.
3214 *
3215 * This returns 0 if all went well (page still locked)
3216 * This returns < 0 if there were errors (page still locked)
Chris Masond1310b22008-01-24 16:13:08 -05003217 */
Chris Mason40f76582014-05-21 13:35:51 -07003218static noinline_for_stack int writepage_delalloc(struct inode *inode,
3219 struct page *page, struct writeback_control *wbc,
3220 struct extent_page_data *epd,
3221 u64 delalloc_start,
3222 unsigned long *nr_written)
Chris Masond1310b22008-01-24 16:13:08 -05003223{
Chris Mason40f76582014-05-21 13:35:51 -07003224 struct extent_io_tree *tree = epd->tree;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003225 u64 page_end = delalloc_start + PAGE_SIZE - 1;
Chris Mason40f76582014-05-21 13:35:51 -07003226 u64 nr_delalloc;
3227 u64 delalloc_to_write = 0;
3228 u64 delalloc_end = 0;
3229 int ret;
3230 int page_started = 0;
3231
3232 if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
3233 return 0;
3234
3235 while (delalloc_end < page_end) {
3236 nr_delalloc = find_lock_delalloc_range(inode, tree,
3237 page,
3238 &delalloc_start,
3239 &delalloc_end,
Josef Bacikdcab6a32015-02-11 15:08:59 -05003240 BTRFS_MAX_EXTENT_SIZE);
Chris Mason40f76582014-05-21 13:35:51 -07003241 if (nr_delalloc == 0) {
3242 delalloc_start = delalloc_end + 1;
3243 continue;
3244 }
3245 ret = tree->ops->fill_delalloc(inode, page,
3246 delalloc_start,
3247 delalloc_end,
3248 &page_started,
Liu Bof82b7352017-10-23 23:18:16 -06003249 nr_written, wbc);
Chris Mason40f76582014-05-21 13:35:51 -07003250 /* File system has been set read-only */
3251 if (ret) {
3252 SetPageError(page);
3253 /* fill_delalloc should be return < 0 for error
3254 * but just in case, we use > 0 here meaning the
3255 * IO is started, so we don't want to return > 0
3256 * unless things are going well.
3257 */
3258 ret = ret < 0 ? ret : -EIO;
3259 goto done;
3260 }
3261 /*
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03003262 * delalloc_end is already one less than the total length, so
3263 * we don't subtract one from PAGE_SIZE
Chris Mason40f76582014-05-21 13:35:51 -07003264 */
3265 delalloc_to_write += (delalloc_end - delalloc_start +
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03003266 PAGE_SIZE) >> PAGE_SHIFT;
Chris Mason40f76582014-05-21 13:35:51 -07003267 delalloc_start = delalloc_end + 1;
3268 }
3269 if (wbc->nr_to_write < delalloc_to_write) {
3270 int thresh = 8192;
3271
3272 if (delalloc_to_write < thresh * 2)
3273 thresh = delalloc_to_write;
3274 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3275 thresh);
3276 }
3277
3278 /* did the fill delalloc function already unlock and start
3279 * the IO?
3280 */
3281 if (page_started) {
3282 /*
3283 * we've unlocked the page, so we can't update
3284 * the mapping's writeback index, just update
3285 * nr_to_write.
3286 */
3287 wbc->nr_to_write -= *nr_written;
3288 return 1;
3289 }
3290
3291 ret = 0;
3292
3293done:
3294 return ret;
3295}
3296
3297/*
3298 * helper for __extent_writepage. This calls the writepage start hooks,
3299 * and does the loop to map the page into extents and bios.
3300 *
3301 * We return 1 if the IO is started and the page is unlocked,
3302 * 0 if all went well (page still locked)
3303 * < 0 if there were errors (page still locked)
3304 */
3305static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3306 struct page *page,
3307 struct writeback_control *wbc,
3308 struct extent_page_data *epd,
3309 loff_t i_size,
3310 unsigned long nr_written,
David Sterbaf1c77c52017-06-06 19:03:49 +02003311 unsigned int write_flags, int *nr_ret)
Chris Mason40f76582014-05-21 13:35:51 -07003312{
Chris Masond1310b22008-01-24 16:13:08 -05003313 struct extent_io_tree *tree = epd->tree;
Miao Xie4eee4fa2012-12-21 09:17:45 +00003314 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003315 u64 page_end = start + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05003316 u64 end;
3317 u64 cur = start;
3318 u64 extent_offset;
Chris Masond1310b22008-01-24 16:13:08 -05003319 u64 block_start;
3320 u64 iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003321 struct extent_map *em;
3322 struct block_device *bdev;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003323 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003324 size_t blocksize;
Chris Mason40f76582014-05-21 13:35:51 -07003325 int ret = 0;
3326 int nr = 0;
3327 bool compressed;
Chris Masond1310b22008-01-24 16:13:08 -05003328
Chris Mason247e7432008-07-17 12:53:51 -04003329 if (tree->ops && tree->ops->writepage_start_hook) {
Chris Masonc8b97812008-10-29 14:49:59 -04003330 ret = tree->ops->writepage_start_hook(page, start,
3331 page_end);
Jeff Mahoney87826df2012-02-15 16:23:57 +01003332 if (ret) {
3333 /* Fixup worker will requeue */
3334 if (ret == -EBUSY)
3335 wbc->pages_skipped++;
3336 else
3337 redirty_page_for_writepage(wbc, page);
Chris Mason40f76582014-05-21 13:35:51 -07003338
David Sterba3d4b9492017-02-10 19:33:41 +01003339 update_nr_written(wbc, nr_written);
Chris Mason247e7432008-07-17 12:53:51 -04003340 unlock_page(page);
Liu Bobcf93482017-01-25 17:15:54 -08003341 return 1;
Chris Mason247e7432008-07-17 12:53:51 -04003342 }
3343 }
3344
Chris Mason11c83492009-04-20 15:50:09 -04003345 /*
3346 * we don't want to touch the inode after unlocking the page,
3347 * so we update the mapping writeback index now
3348 */
David Sterba3d4b9492017-02-10 19:33:41 +01003349 update_nr_written(wbc, nr_written + 1);
Chris Mason771ed682008-11-06 22:02:51 -05003350
Chris Masond1310b22008-01-24 16:13:08 -05003351 end = page_end;
Chris Mason40f76582014-05-21 13:35:51 -07003352 if (i_size <= start) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04003353 if (tree->ops && tree->ops->writepage_end_io_hook)
3354 tree->ops->writepage_end_io_hook(page, start,
3355 page_end, NULL, 1);
Chris Masond1310b22008-01-24 16:13:08 -05003356 goto done;
3357 }
3358
Chris Masond1310b22008-01-24 16:13:08 -05003359 blocksize = inode->i_sb->s_blocksize;
3360
3361 while (cur <= end) {
Chris Mason40f76582014-05-21 13:35:51 -07003362 u64 em_end;
David Sterba6273b7f2017-10-04 17:30:11 +02003363 u64 offset;
David Sterba58409ed2016-05-04 11:46:10 +02003364
Chris Mason40f76582014-05-21 13:35:51 -07003365 if (cur >= i_size) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04003366 if (tree->ops && tree->ops->writepage_end_io_hook)
3367 tree->ops->writepage_end_io_hook(page, cur,
3368 page_end, NULL, 1);
Chris Masond1310b22008-01-24 16:13:08 -05003369 break;
3370 }
David Sterba3c98c622017-06-23 04:01:08 +02003371 em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, cur,
Chris Masond1310b22008-01-24 16:13:08 -05003372 end - cur + 1, 1);
David Sterbac7040052011-04-19 18:00:01 +02003373 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05003374 SetPageError(page);
Filipe Manana61391d52014-05-09 17:17:40 +01003375 ret = PTR_ERR_OR_ZERO(em);
Chris Masond1310b22008-01-24 16:13:08 -05003376 break;
3377 }
3378
3379 extent_offset = cur - em->start;
Chris Mason40f76582014-05-21 13:35:51 -07003380 em_end = extent_map_end(em);
3381 BUG_ON(em_end <= cur);
Chris Masond1310b22008-01-24 16:13:08 -05003382 BUG_ON(end < cur);
Chris Mason40f76582014-05-21 13:35:51 -07003383 iosize = min(em_end - cur, end - cur + 1);
Qu Wenruofda28322013-02-26 08:10:22 +00003384 iosize = ALIGN(iosize, blocksize);
David Sterba6273b7f2017-10-04 17:30:11 +02003385 offset = em->block_start + extent_offset;
Chris Masond1310b22008-01-24 16:13:08 -05003386 bdev = em->bdev;
3387 block_start = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04003388 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
Chris Masond1310b22008-01-24 16:13:08 -05003389 free_extent_map(em);
3390 em = NULL;
3391
Chris Masonc8b97812008-10-29 14:49:59 -04003392 /*
3393 * compressed and inline extents are written through other
3394 * paths in the FS
3395 */
3396 if (compressed || block_start == EXTENT_MAP_HOLE ||
Chris Masond1310b22008-01-24 16:13:08 -05003397 block_start == EXTENT_MAP_INLINE) {
Chris Masonc8b97812008-10-29 14:49:59 -04003398 /*
3399 * end_io notification does not happen here for
3400 * compressed extents
3401 */
3402 if (!compressed && tree->ops &&
3403 tree->ops->writepage_end_io_hook)
Chris Masone6dcd2d2008-07-17 12:53:50 -04003404 tree->ops->writepage_end_io_hook(page, cur,
3405 cur + iosize - 1,
3406 NULL, 1);
Chris Masonc8b97812008-10-29 14:49:59 -04003407 else if (compressed) {
3408 /* we don't want to end_page_writeback on
3409 * a compressed extent. this happens
3410 * elsewhere
3411 */
3412 nr++;
3413 }
3414
3415 cur += iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003416 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003417 continue;
3418 }
Chris Masonc8b97812008-10-29 14:49:59 -04003419
David Sterba58409ed2016-05-04 11:46:10 +02003420 set_range_writeback(tree, cur, cur + iosize - 1);
3421 if (!PageWriteback(page)) {
3422 btrfs_err(BTRFS_I(inode)->root->fs_info,
3423 "page %lu not writeback, cur %llu end %llu",
3424 page->index, cur, end);
Chris Masond1310b22008-01-24 16:13:08 -05003425 }
David Sterba58409ed2016-05-04 11:46:10 +02003426
David Sterba4b81ba42017-06-06 19:14:26 +02003427 ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
David Sterba6273b7f2017-10-04 17:30:11 +02003428 page, offset, iosize, pg_offset,
David Sterbac2df8bb2017-02-10 19:29:38 +01003429 bdev, &epd->bio,
David Sterba58409ed2016-05-04 11:46:10 +02003430 end_bio_extent_writepage,
3431 0, 0, 0, false);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09003432 if (ret) {
Chris Masond1310b22008-01-24 16:13:08 -05003433 SetPageError(page);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09003434 if (PageWriteback(page))
3435 end_page_writeback(page);
3436 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04003437
Chris Masond1310b22008-01-24 16:13:08 -05003438 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003439 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003440 nr++;
3441 }
3442done:
Chris Mason40f76582014-05-21 13:35:51 -07003443 *nr_ret = nr;
Chris Mason40f76582014-05-21 13:35:51 -07003444 return ret;
3445}
3446
3447/*
3448 * the writepage semantics are similar to regular writepage. extent
3449 * records are inserted to lock ranges in the tree, and as dirty areas
3450 * are found, they are marked writeback. Then the lock bits are removed
3451 * and the end_io handler clears the writeback ranges
3452 */
3453static int __extent_writepage(struct page *page, struct writeback_control *wbc,
David Sterbaaab6e9e2017-11-30 18:00:02 +01003454 struct extent_page_data *epd)
Chris Mason40f76582014-05-21 13:35:51 -07003455{
3456 struct inode *inode = page->mapping->host;
Chris Mason40f76582014-05-21 13:35:51 -07003457 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003458 u64 page_end = start + PAGE_SIZE - 1;
Chris Mason40f76582014-05-21 13:35:51 -07003459 int ret;
3460 int nr = 0;
3461 size_t pg_offset = 0;
3462 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003463 unsigned long end_index = i_size >> PAGE_SHIFT;
David Sterbaf1c77c52017-06-06 19:03:49 +02003464 unsigned int write_flags = 0;
Chris Mason40f76582014-05-21 13:35:51 -07003465 unsigned long nr_written = 0;
3466
Liu Boff40adf2017-08-24 18:19:48 -06003467 write_flags = wbc_to_write_flags(wbc);
Chris Mason40f76582014-05-21 13:35:51 -07003468
3469 trace___extent_writepage(page, inode, wbc);
3470
3471 WARN_ON(!PageLocked(page));
3472
3473 ClearPageError(page);
3474
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003475 pg_offset = i_size & (PAGE_SIZE - 1);
Chris Mason40f76582014-05-21 13:35:51 -07003476 if (page->index > end_index ||
3477 (page->index == end_index && !pg_offset)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003478 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
Chris Mason40f76582014-05-21 13:35:51 -07003479 unlock_page(page);
3480 return 0;
3481 }
3482
3483 if (page->index == end_index) {
3484 char *userpage;
3485
3486 userpage = kmap_atomic(page);
3487 memset(userpage + pg_offset, 0,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003488 PAGE_SIZE - pg_offset);
Chris Mason40f76582014-05-21 13:35:51 -07003489 kunmap_atomic(userpage);
3490 flush_dcache_page(page);
3491 }
3492
3493 pg_offset = 0;
3494
3495 set_page_extent_mapped(page);
3496
3497 ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written);
3498 if (ret == 1)
3499 goto done_unlocked;
3500 if (ret)
3501 goto done;
3502
3503 ret = __extent_writepage_io(inode, page, wbc, epd,
3504 i_size, nr_written, write_flags, &nr);
3505 if (ret == 1)
3506 goto done_unlocked;
3507
3508done:
Chris Masond1310b22008-01-24 16:13:08 -05003509 if (nr == 0) {
3510 /* make sure the mapping tag for page dirty gets cleared */
3511 set_page_writeback(page);
3512 end_page_writeback(page);
3513 }
Filipe Manana61391d52014-05-09 17:17:40 +01003514 if (PageError(page)) {
3515 ret = ret < 0 ? ret : -EIO;
3516 end_extent_writepage(page, ret, start, page_end);
3517 }
Chris Masond1310b22008-01-24 16:13:08 -05003518 unlock_page(page);
Chris Mason40f76582014-05-21 13:35:51 -07003519 return ret;
Chris Mason771ed682008-11-06 22:02:51 -05003520
Chris Mason11c83492009-04-20 15:50:09 -04003521done_unlocked:
Chris Masond1310b22008-01-24 16:13:08 -05003522 return 0;
3523}
3524
Josef Bacikfd8b2b62013-04-24 16:41:19 -04003525void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003526{
NeilBrown74316202014-07-07 15:16:04 +10003527 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3528 TASK_UNINTERRUPTIBLE);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003529}
3530
Chris Mason0e378df2014-05-19 20:55:27 -07003531static noinline_for_stack int
3532lock_extent_buffer_for_io(struct extent_buffer *eb,
3533 struct btrfs_fs_info *fs_info,
3534 struct extent_page_data *epd)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003535{
3536 unsigned long i, num_pages;
3537 int flush = 0;
3538 int ret = 0;
3539
3540 if (!btrfs_try_tree_write_lock(eb)) {
3541 flush = 1;
3542 flush_write_bio(epd);
3543 btrfs_tree_lock(eb);
3544 }
3545
3546 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3547 btrfs_tree_unlock(eb);
3548 if (!epd->sync_io)
3549 return 0;
3550 if (!flush) {
3551 flush_write_bio(epd);
3552 flush = 1;
3553 }
Chris Masona098d8e82012-03-21 12:09:56 -04003554 while (1) {
3555 wait_on_extent_buffer_writeback(eb);
3556 btrfs_tree_lock(eb);
3557 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3558 break;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003559 btrfs_tree_unlock(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003560 }
3561 }
3562
Josef Bacik51561ff2012-07-20 16:25:24 -04003563 /*
3564 * We need to do this to prevent races in people who check if the eb is
3565 * under IO since we can end up having no IO bits set for a short period
3566 * of time.
3567 */
3568 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003569 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3570 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
Josef Bacik51561ff2012-07-20 16:25:24 -04003571 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003572 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
Nikolay Borisov104b4e52017-06-20 21:01:20 +03003573 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3574 -eb->len,
3575 fs_info->dirty_metadata_batch);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003576 ret = 1;
Josef Bacik51561ff2012-07-20 16:25:24 -04003577 } else {
3578 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003579 }
3580
3581 btrfs_tree_unlock(eb);
3582
3583 if (!ret)
3584 return ret;
3585
3586 num_pages = num_extent_pages(eb->start, eb->len);
3587 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02003588 struct page *p = eb->pages[i];
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003589
3590 if (!trylock_page(p)) {
3591 if (!flush) {
3592 flush_write_bio(epd);
3593 flush = 1;
3594 }
3595 lock_page(p);
3596 }
3597 }
3598
3599 return ret;
3600}
3601
3602static void end_extent_buffer_writeback(struct extent_buffer *eb)
3603{
3604 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003605 smp_mb__after_atomic();
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003606 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3607}
3608
Filipe Manana656f30d2014-09-26 12:25:56 +01003609static void set_btree_ioerr(struct page *page)
3610{
3611 struct extent_buffer *eb = (struct extent_buffer *)page->private;
Filipe Manana656f30d2014-09-26 12:25:56 +01003612
3613 SetPageError(page);
3614 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3615 return;
3616
3617 /*
3618 * If writeback for a btree extent that doesn't belong to a log tree
3619 * failed, increment the counter transaction->eb_write_errors.
3620 * We do this because while the transaction is running and before it's
3621 * committing (when we call filemap_fdata[write|wait]_range against
3622 * the btree inode), we might have
3623 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
3624 * returns an error or an error happens during writeback, when we're
3625 * committing the transaction we wouldn't know about it, since the pages
3626 * can be no longer dirty nor marked anymore for writeback (if a
3627 * subsequent modification to the extent buffer didn't happen before the
3628 * transaction commit), which makes filemap_fdata[write|wait]_range not
3629 * able to find the pages tagged with SetPageError at transaction
3630 * commit time. So if this happens we must abort the transaction,
3631 * otherwise we commit a super block with btree roots that point to
3632 * btree nodes/leafs whose content on disk is invalid - either garbage
3633 * or the content of some node/leaf from a past generation that got
3634 * cowed or deleted and is no longer valid.
3635 *
3636 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
3637 * not be enough - we need to distinguish between log tree extents vs
3638 * non-log tree extents, and the next filemap_fdatawait_range() call
3639 * will catch and clear such errors in the mapping - and that call might
3640 * be from a log sync and not from a transaction commit. Also, checking
3641 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
3642 * not done and would not be reliable - the eb might have been released
3643 * from memory and reading it back again means that flag would not be
3644 * set (since it's a runtime flag, not persisted on disk).
3645 *
3646 * Using the flags below in the btree inode also makes us achieve the
3647 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
3648 * writeback for all dirty pages and before filemap_fdatawait_range()
3649 * is called, the writeback for all dirty pages had already finished
3650 * with errors - because we were not using AS_EIO/AS_ENOSPC,
3651 * filemap_fdatawait_range() would return success, as it could not know
3652 * that writeback errors happened (the pages were no longer tagged for
3653 * writeback).
3654 */
3655 switch (eb->log_index) {
3656 case -1:
Josef Bacikafcdd122016-09-02 15:40:02 -04003657 set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags);
Filipe Manana656f30d2014-09-26 12:25:56 +01003658 break;
3659 case 0:
Josef Bacikafcdd122016-09-02 15:40:02 -04003660 set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags);
Filipe Manana656f30d2014-09-26 12:25:56 +01003661 break;
3662 case 1:
Josef Bacikafcdd122016-09-02 15:40:02 -04003663 set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags);
Filipe Manana656f30d2014-09-26 12:25:56 +01003664 break;
3665 default:
3666 BUG(); /* unexpected, logic error */
3667 }
3668}
3669
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02003670static void end_bio_extent_buffer_writepage(struct bio *bio)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003671{
Kent Overstreet2c30c712013-11-07 12:20:26 -08003672 struct bio_vec *bvec;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003673 struct extent_buffer *eb;
Kent Overstreet2c30c712013-11-07 12:20:26 -08003674 int i, done;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003675
David Sterbac09abff2017-07-13 18:10:07 +02003676 ASSERT(!bio_flagged(bio, BIO_CLONED));
Kent Overstreet2c30c712013-11-07 12:20:26 -08003677 bio_for_each_segment_all(bvec, bio, i) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003678 struct page *page = bvec->bv_page;
3679
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003680 eb = (struct extent_buffer *)page->private;
3681 BUG_ON(!eb);
3682 done = atomic_dec_and_test(&eb->io_pages);
3683
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02003684 if (bio->bi_status ||
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02003685 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003686 ClearPageUptodate(page);
Filipe Manana656f30d2014-09-26 12:25:56 +01003687 set_btree_ioerr(page);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003688 }
3689
3690 end_page_writeback(page);
3691
3692 if (!done)
3693 continue;
3694
3695 end_extent_buffer_writeback(eb);
Kent Overstreet2c30c712013-11-07 12:20:26 -08003696 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003697
3698 bio_put(bio);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003699}
3700
Chris Mason0e378df2014-05-19 20:55:27 -07003701static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003702 struct btrfs_fs_info *fs_info,
3703 struct writeback_control *wbc,
3704 struct extent_page_data *epd)
3705{
3706 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
Josef Bacikf28491e2013-12-16 13:24:27 -05003707 struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003708 u64 offset = eb->start;
Liu Bo851cd172016-09-23 13:44:44 -07003709 u32 nritems;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003710 unsigned long i, num_pages;
Liu Bo851cd172016-09-23 13:44:44 -07003711 unsigned long start, end;
Liu Boff40adf2017-08-24 18:19:48 -06003712 unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
Josef Bacikd7dbe9e2012-04-23 14:00:51 -04003713 int ret = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003714
Filipe Manana656f30d2014-09-26 12:25:56 +01003715 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003716 num_pages = num_extent_pages(eb->start, eb->len);
3717 atomic_set(&eb->io_pages, num_pages);
Josef Bacikde0022b2012-09-25 14:25:58 -04003718
Liu Bo851cd172016-09-23 13:44:44 -07003719 /* set btree blocks beyond nritems with 0 to avoid stale content. */
3720 nritems = btrfs_header_nritems(eb);
Liu Bo3eb548e2016-09-14 17:22:57 -07003721 if (btrfs_header_level(eb) > 0) {
Liu Bo3eb548e2016-09-14 17:22:57 -07003722 end = btrfs_node_key_ptr_offset(nritems);
3723
David Sterbab159fa22016-11-08 18:09:03 +01003724 memzero_extent_buffer(eb, end, eb->len - end);
Liu Bo851cd172016-09-23 13:44:44 -07003725 } else {
3726 /*
3727 * leaf:
3728 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
3729 */
3730 start = btrfs_item_nr_offset(nritems);
Nikolay Borisov3d9ec8c2017-05-29 09:43:43 +03003731 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, eb);
David Sterbab159fa22016-11-08 18:09:03 +01003732 memzero_extent_buffer(eb, start, end - start);
Liu Bo3eb548e2016-09-14 17:22:57 -07003733 }
3734
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003735 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02003736 struct page *p = eb->pages[i];
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003737
3738 clear_page_dirty_for_io(p);
3739 set_page_writeback(p);
David Sterba4b81ba42017-06-06 19:14:26 +02003740 ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
David Sterba6273b7f2017-10-04 17:30:11 +02003741 p, offset, PAGE_SIZE, 0, bdev,
David Sterbac2df8bb2017-02-10 19:29:38 +01003742 &epd->bio,
Mike Christie1f7ad752016-06-05 14:31:51 -05003743 end_bio_extent_buffer_writepage,
Liu Bo18fdc672017-09-13 12:18:22 -06003744 0, 0, 0, false);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003745 if (ret) {
Filipe Manana656f30d2014-09-26 12:25:56 +01003746 set_btree_ioerr(p);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09003747 if (PageWriteback(p))
3748 end_page_writeback(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003749 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3750 end_extent_buffer_writeback(eb);
3751 ret = -EIO;
3752 break;
3753 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003754 offset += PAGE_SIZE;
David Sterba3d4b9492017-02-10 19:33:41 +01003755 update_nr_written(wbc, 1);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003756 unlock_page(p);
3757 }
3758
3759 if (unlikely(ret)) {
3760 for (; i < num_pages; i++) {
Chris Masonbbf65cf2014-10-04 09:56:45 -07003761 struct page *p = eb->pages[i];
Liu Bo81465022014-09-23 22:22:33 +08003762 clear_page_dirty_for_io(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003763 unlock_page(p);
3764 }
3765 }
3766
3767 return ret;
3768}
3769
3770int btree_write_cache_pages(struct address_space *mapping,
3771 struct writeback_control *wbc)
3772{
3773 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3774 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3775 struct extent_buffer *eb, *prev_eb = NULL;
3776 struct extent_page_data epd = {
3777 .bio = NULL,
3778 .tree = tree,
3779 .extent_locked = 0,
3780 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3781 };
3782 int ret = 0;
3783 int done = 0;
3784 int nr_to_write_done = 0;
3785 struct pagevec pvec;
3786 int nr_pages;
3787 pgoff_t index;
3788 pgoff_t end; /* Inclusive */
3789 int scanned = 0;
3790 int tag;
3791
Mel Gorman86679822017-11-15 17:37:52 -08003792 pagevec_init(&pvec);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003793 if (wbc->range_cyclic) {
3794 index = mapping->writeback_index; /* Start from prev offset */
3795 end = -1;
3796 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003797 index = wbc->range_start >> PAGE_SHIFT;
3798 end = wbc->range_end >> PAGE_SHIFT;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003799 scanned = 1;
3800 }
3801 if (wbc->sync_mode == WB_SYNC_ALL)
3802 tag = PAGECACHE_TAG_TOWRITE;
3803 else
3804 tag = PAGECACHE_TAG_DIRTY;
3805retry:
3806 if (wbc->sync_mode == WB_SYNC_ALL)
3807 tag_pages_for_writeback(mapping, index, end);
3808 while (!done && !nr_to_write_done && (index <= end) &&
Jan Kara4006f432017-11-15 17:34:37 -08003809 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
Jan Kara67fd7072017-11-15 17:35:19 -08003810 tag))) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003811 unsigned i;
3812
3813 scanned = 1;
3814 for (i = 0; i < nr_pages; i++) {
3815 struct page *page = pvec.pages[i];
3816
3817 if (!PagePrivate(page))
3818 continue;
3819
Josef Bacikb5bae262012-09-14 13:43:01 -04003820 spin_lock(&mapping->private_lock);
3821 if (!PagePrivate(page)) {
3822 spin_unlock(&mapping->private_lock);
3823 continue;
3824 }
3825
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003826 eb = (struct extent_buffer *)page->private;
Josef Bacikb5bae262012-09-14 13:43:01 -04003827
3828 /*
3829 * Shouldn't happen and normally this would be a BUG_ON
3830 * but no sense in crashing the users box for something
3831 * we can survive anyway.
3832 */
Dulshani Gunawardhanafae7f212013-10-31 10:30:08 +05303833 if (WARN_ON(!eb)) {
Josef Bacikb5bae262012-09-14 13:43:01 -04003834 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003835 continue;
3836 }
3837
Josef Bacikb5bae262012-09-14 13:43:01 -04003838 if (eb == prev_eb) {
3839 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003840 continue;
3841 }
3842
Josef Bacikb5bae262012-09-14 13:43:01 -04003843 ret = atomic_inc_not_zero(&eb->refs);
3844 spin_unlock(&mapping->private_lock);
3845 if (!ret)
3846 continue;
3847
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003848 prev_eb = eb;
3849 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3850 if (!ret) {
3851 free_extent_buffer(eb);
3852 continue;
3853 }
3854
3855 ret = write_one_eb(eb, fs_info, wbc, &epd);
3856 if (ret) {
3857 done = 1;
3858 free_extent_buffer(eb);
3859 break;
3860 }
3861 free_extent_buffer(eb);
3862
3863 /*
3864 * the filesystem may choose to bump up nr_to_write.
3865 * We have to make sure to honor the new nr_to_write
3866 * at any time
3867 */
3868 nr_to_write_done = wbc->nr_to_write <= 0;
3869 }
3870 pagevec_release(&pvec);
3871 cond_resched();
3872 }
3873 if (!scanned && !done) {
3874 /*
3875 * We hit the last page and there is more work to be done: wrap
3876 * back to the start of the file
3877 */
3878 scanned = 1;
3879 index = 0;
3880 goto retry;
3881 }
3882 flush_write_bio(&epd);
3883 return ret;
3884}
3885
Chris Masond1310b22008-01-24 16:13:08 -05003886/**
Chris Mason4bef0842008-09-08 11:18:08 -04003887 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
Chris Masond1310b22008-01-24 16:13:08 -05003888 * @mapping: address space structure to write
3889 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
David Sterba935db852017-06-23 04:30:28 +02003890 * @data: data passed to __extent_writepage function
Chris Masond1310b22008-01-24 16:13:08 -05003891 *
3892 * If a page is already under I/O, write_cache_pages() skips it, even
3893 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3894 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3895 * and msync() need to guarantee that all the data which was dirty at the time
3896 * the call was made get new I/O started against them. If wbc->sync_mode is
3897 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3898 * existing IO to complete.
3899 */
David Sterba4242b642017-02-10 19:38:24 +01003900static int extent_write_cache_pages(struct address_space *mapping,
Chris Mason4bef0842008-09-08 11:18:08 -04003901 struct writeback_control *wbc,
David Sterbaaab6e9e2017-11-30 18:00:02 +01003902 struct extent_page_data *epd)
Chris Masond1310b22008-01-24 16:13:08 -05003903{
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04003904 struct inode *inode = mapping->host;
Chris Masond1310b22008-01-24 16:13:08 -05003905 int ret = 0;
3906 int done = 0;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003907 int nr_to_write_done = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003908 struct pagevec pvec;
3909 int nr_pages;
3910 pgoff_t index;
3911 pgoff_t end; /* Inclusive */
Liu Boa91326672016-03-07 16:56:21 -08003912 pgoff_t done_index;
3913 int range_whole = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003914 int scanned = 0;
Josef Bacikf7aaa062011-07-15 21:26:38 +00003915 int tag;
Chris Masond1310b22008-01-24 16:13:08 -05003916
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04003917 /*
3918 * We have to hold onto the inode so that ordered extents can do their
3919 * work when the IO finishes. The alternative to this is failing to add
3920 * an ordered extent if the igrab() fails there and that is a huge pain
3921 * to deal with, so instead just hold onto the inode throughout the
3922 * writepages operation. If it fails here we are freeing up the inode
3923 * anyway and we'd rather not waste our time writing out stuff that is
3924 * going to be truncated anyway.
3925 */
3926 if (!igrab(inode))
3927 return 0;
3928
Mel Gorman86679822017-11-15 17:37:52 -08003929 pagevec_init(&pvec);
Chris Masond1310b22008-01-24 16:13:08 -05003930 if (wbc->range_cyclic) {
3931 index = mapping->writeback_index; /* Start from prev offset */
3932 end = -1;
3933 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003934 index = wbc->range_start >> PAGE_SHIFT;
3935 end = wbc->range_end >> PAGE_SHIFT;
Liu Boa91326672016-03-07 16:56:21 -08003936 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
3937 range_whole = 1;
Chris Masond1310b22008-01-24 16:13:08 -05003938 scanned = 1;
3939 }
Josef Bacikf7aaa062011-07-15 21:26:38 +00003940 if (wbc->sync_mode == WB_SYNC_ALL)
3941 tag = PAGECACHE_TAG_TOWRITE;
3942 else
3943 tag = PAGECACHE_TAG_DIRTY;
Chris Masond1310b22008-01-24 16:13:08 -05003944retry:
Josef Bacikf7aaa062011-07-15 21:26:38 +00003945 if (wbc->sync_mode == WB_SYNC_ALL)
3946 tag_pages_for_writeback(mapping, index, end);
Liu Boa91326672016-03-07 16:56:21 -08003947 done_index = index;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003948 while (!done && !nr_to_write_done && (index <= end) &&
Jan Kara67fd7072017-11-15 17:35:19 -08003949 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
3950 &index, end, tag))) {
Chris Masond1310b22008-01-24 16:13:08 -05003951 unsigned i;
3952
3953 scanned = 1;
3954 for (i = 0; i < nr_pages; i++) {
3955 struct page *page = pvec.pages[i];
3956
Liu Boa91326672016-03-07 16:56:21 -08003957 done_index = page->index;
Chris Masond1310b22008-01-24 16:13:08 -05003958 /*
3959 * At this point we hold neither mapping->tree_lock nor
3960 * lock on the page itself: the page may be truncated or
3961 * invalidated (changing page->mapping to NULL), or even
3962 * swizzled back from swapper_space to tmpfs file
3963 * mapping
3964 */
Josef Bacikc8f2f242013-02-11 11:33:00 -05003965 if (!trylock_page(page)) {
David Sterbaaab6e9e2017-11-30 18:00:02 +01003966 flush_write_bio(epd);
Josef Bacikc8f2f242013-02-11 11:33:00 -05003967 lock_page(page);
Chris Mason01d658f2011-11-01 10:08:06 -04003968 }
Chris Masond1310b22008-01-24 16:13:08 -05003969
3970 if (unlikely(page->mapping != mapping)) {
3971 unlock_page(page);
3972 continue;
3973 }
3974
Chris Masond2c3f4f2008-11-19 12:44:22 -05003975 if (wbc->sync_mode != WB_SYNC_NONE) {
Chris Mason0e6bd952008-11-20 10:46:35 -05003976 if (PageWriteback(page))
David Sterbaaab6e9e2017-11-30 18:00:02 +01003977 flush_write_bio(epd);
Chris Masond1310b22008-01-24 16:13:08 -05003978 wait_on_page_writeback(page);
Chris Masond2c3f4f2008-11-19 12:44:22 -05003979 }
Chris Masond1310b22008-01-24 16:13:08 -05003980
3981 if (PageWriteback(page) ||
3982 !clear_page_dirty_for_io(page)) {
3983 unlock_page(page);
3984 continue;
3985 }
3986
David Sterbaaab6e9e2017-11-30 18:00:02 +01003987 ret = __extent_writepage(page, wbc, epd);
Chris Masond1310b22008-01-24 16:13:08 -05003988
3989 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3990 unlock_page(page);
3991 ret = 0;
3992 }
Liu Boa91326672016-03-07 16:56:21 -08003993 if (ret < 0) {
3994 /*
3995 * done_index is set past this page,
3996 * so media errors will not choke
3997 * background writeout for the entire
3998 * file. This has consequences for
3999 * range_cyclic semantics (ie. it may
4000 * not be suitable for data integrity
4001 * writeout).
4002 */
4003 done_index = page->index + 1;
4004 done = 1;
4005 break;
4006 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04004007
4008 /*
4009 * the filesystem may choose to bump up nr_to_write.
4010 * We have to make sure to honor the new nr_to_write
4011 * at any time
4012 */
4013 nr_to_write_done = wbc->nr_to_write <= 0;
Chris Masond1310b22008-01-24 16:13:08 -05004014 }
4015 pagevec_release(&pvec);
4016 cond_resched();
4017 }
Liu Bo894b36e2016-03-07 16:56:22 -08004018 if (!scanned && !done) {
Chris Masond1310b22008-01-24 16:13:08 -05004019 /*
4020 * We hit the last page and there is more work to be done: wrap
4021 * back to the start of the file
4022 */
4023 scanned = 1;
4024 index = 0;
4025 goto retry;
4026 }
Liu Boa91326672016-03-07 16:56:21 -08004027
4028 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
4029 mapping->writeback_index = done_index;
4030
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04004031 btrfs_add_delayed_iput(inode);
Liu Bo894b36e2016-03-07 16:56:22 -08004032 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05004033}
Chris Masond1310b22008-01-24 16:13:08 -05004034
David Sterbaaab6e9e2017-11-30 18:00:02 +01004035static void flush_write_bio(struct extent_page_data *epd)
Chris Masonffbd5172009-04-20 15:50:09 -04004036{
4037 if (epd->bio) {
Jeff Mahoney355808c2011-10-03 23:23:14 -04004038 int ret;
4039
Liu Bo18fdc672017-09-13 12:18:22 -06004040 ret = submit_one_bio(epd->bio, 0, 0);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01004041 BUG_ON(ret < 0); /* -ENOMEM */
Chris Masonffbd5172009-04-20 15:50:09 -04004042 epd->bio = NULL;
4043 }
4044}
4045
Nikolay Borisov0a9b0e52017-12-08 15:55:59 +02004046int extent_write_full_page(struct page *page, struct writeback_control *wbc)
Chris Masond1310b22008-01-24 16:13:08 -05004047{
4048 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05004049 struct extent_page_data epd = {
4050 .bio = NULL,
Nikolay Borisov0a9b0e52017-12-08 15:55:59 +02004051 .tree = &BTRFS_I(page->mapping->host)->io_tree,
Chris Mason771ed682008-11-06 22:02:51 -05004052 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04004053 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05004054 };
Chris Masond1310b22008-01-24 16:13:08 -05004055
Chris Masond1310b22008-01-24 16:13:08 -05004056 ret = __extent_writepage(page, wbc, &epd);
4057
David Sterbae2932ee2017-06-23 04:16:17 +02004058 flush_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05004059 return ret;
4060}
Chris Masond1310b22008-01-24 16:13:08 -05004061
Nikolay Borisov5e3ee232017-12-08 15:55:58 +02004062int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
Chris Mason771ed682008-11-06 22:02:51 -05004063 int mode)
4064{
4065 int ret = 0;
4066 struct address_space *mapping = inode->i_mapping;
Nikolay Borisov5e3ee232017-12-08 15:55:58 +02004067 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
Chris Mason771ed682008-11-06 22:02:51 -05004068 struct page *page;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004069 unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4070 PAGE_SHIFT;
Chris Mason771ed682008-11-06 22:02:51 -05004071
4072 struct extent_page_data epd = {
4073 .bio = NULL,
4074 .tree = tree,
Chris Mason771ed682008-11-06 22:02:51 -05004075 .extent_locked = 1,
Chris Masonffbd5172009-04-20 15:50:09 -04004076 .sync_io = mode == WB_SYNC_ALL,
Chris Mason771ed682008-11-06 22:02:51 -05004077 };
4078 struct writeback_control wbc_writepages = {
Chris Mason771ed682008-11-06 22:02:51 -05004079 .sync_mode = mode,
Chris Mason771ed682008-11-06 22:02:51 -05004080 .nr_to_write = nr_pages * 2,
4081 .range_start = start,
4082 .range_end = end + 1,
4083 };
4084
Chris Masond3977122009-01-05 21:25:51 -05004085 while (start <= end) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004086 page = find_get_page(mapping, start >> PAGE_SHIFT);
Chris Mason771ed682008-11-06 22:02:51 -05004087 if (clear_page_dirty_for_io(page))
4088 ret = __extent_writepage(page, &wbc_writepages, &epd);
4089 else {
4090 if (tree->ops && tree->ops->writepage_end_io_hook)
4091 tree->ops->writepage_end_io_hook(page, start,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004092 start + PAGE_SIZE - 1,
Chris Mason771ed682008-11-06 22:02:51 -05004093 NULL, 1);
4094 unlock_page(page);
4095 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004096 put_page(page);
4097 start += PAGE_SIZE;
Chris Mason771ed682008-11-06 22:02:51 -05004098 }
4099
David Sterbae2932ee2017-06-23 04:16:17 +02004100 flush_write_bio(&epd);
Chris Mason771ed682008-11-06 22:02:51 -05004101 return ret;
4102}
Chris Masond1310b22008-01-24 16:13:08 -05004103
4104int extent_writepages(struct extent_io_tree *tree,
4105 struct address_space *mapping,
Chris Masond1310b22008-01-24 16:13:08 -05004106 struct writeback_control *wbc)
4107{
4108 int ret = 0;
4109 struct extent_page_data epd = {
4110 .bio = NULL,
4111 .tree = tree,
Chris Mason771ed682008-11-06 22:02:51 -05004112 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04004113 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05004114 };
4115
David Sterba935db852017-06-23 04:30:28 +02004116 ret = extent_write_cache_pages(mapping, wbc, &epd);
David Sterbae2932ee2017-06-23 04:16:17 +02004117 flush_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05004118 return ret;
4119}
Chris Masond1310b22008-01-24 16:13:08 -05004120
4121int extent_readpages(struct extent_io_tree *tree,
4122 struct address_space *mapping,
David Sterba09325842017-06-23 04:09:57 +02004123 struct list_head *pages, unsigned nr_pages)
Chris Masond1310b22008-01-24 16:13:08 -05004124{
4125 struct bio *bio = NULL;
4126 unsigned page_idx;
Chris Masonc8b97812008-10-29 14:49:59 -04004127 unsigned long bio_flags = 0;
Liu Bo67c96842012-07-20 21:43:09 -06004128 struct page *pagepool[16];
4129 struct page *page;
Miao Xie125bac012013-07-25 19:22:37 +08004130 struct extent_map *em_cached = NULL;
Liu Bo67c96842012-07-20 21:43:09 -06004131 int nr = 0;
Filipe Manana808f80b2015-09-28 09:56:26 +01004132 u64 prev_em_start = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05004133
Chris Masond1310b22008-01-24 16:13:08 -05004134 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
Liu Bo67c96842012-07-20 21:43:09 -06004135 page = list_entry(pages->prev, struct page, lru);
Chris Masond1310b22008-01-24 16:13:08 -05004136
4137 prefetchw(&page->flags);
4138 list_del(&page->lru);
Liu Bo67c96842012-07-20 21:43:09 -06004139 if (add_to_page_cache_lru(page, mapping,
Michal Hocko8a5c7432016-07-26 15:24:53 -07004140 page->index,
4141 readahead_gfp_mask(mapping))) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004142 put_page(page);
Liu Bo67c96842012-07-20 21:43:09 -06004143 continue;
Chris Masond1310b22008-01-24 16:13:08 -05004144 }
Liu Bo67c96842012-07-20 21:43:09 -06004145
4146 pagepool[nr++] = page;
4147 if (nr < ARRAY_SIZE(pagepool))
4148 continue;
David Sterbae4d17ef2017-06-23 04:09:57 +02004149 __extent_readpages(tree, pagepool, nr, &em_cached, &bio,
4150 &bio_flags, &prev_em_start);
Liu Bo67c96842012-07-20 21:43:09 -06004151 nr = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004152 }
Miao Xie99740902013-07-25 19:22:36 +08004153 if (nr)
David Sterbae4d17ef2017-06-23 04:09:57 +02004154 __extent_readpages(tree, pagepool, nr, &em_cached, &bio,
4155 &bio_flags, &prev_em_start);
Liu Bo67c96842012-07-20 21:43:09 -06004156
Miao Xie125bac012013-07-25 19:22:37 +08004157 if (em_cached)
4158 free_extent_map(em_cached);
4159
Chris Masond1310b22008-01-24 16:13:08 -05004160 BUG_ON(!list_empty(pages));
4161 if (bio)
Mike Christie1f7ad752016-06-05 14:31:51 -05004162 return submit_one_bio(bio, 0, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05004163 return 0;
4164}
Chris Masond1310b22008-01-24 16:13:08 -05004165
4166/*
4167 * basic invalidatepage code, this waits on any locked or writeback
4168 * ranges corresponding to the page, and then deletes any extent state
4169 * records from the tree
4170 */
4171int extent_invalidatepage(struct extent_io_tree *tree,
4172 struct page *page, unsigned long offset)
4173{
Josef Bacik2ac55d42010-02-03 19:33:23 +00004174 struct extent_state *cached_state = NULL;
Miao Xie4eee4fa2012-12-21 09:17:45 +00004175 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004176 u64 end = start + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05004177 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4178
Qu Wenruofda28322013-02-26 08:10:22 +00004179 start += ALIGN(offset, blocksize);
Chris Masond1310b22008-01-24 16:13:08 -05004180 if (start > end)
4181 return 0;
4182
David Sterbaff13db42015-12-03 14:30:40 +01004183 lock_extent_bits(tree, start, end, &cached_state);
Chris Mason1edbb732009-09-02 13:24:36 -04004184 wait_on_page_writeback(page);
Chris Masond1310b22008-01-24 16:13:08 -05004185 clear_extent_bit(tree, start, end,
Josef Bacik32c00af2009-10-08 13:34:05 -04004186 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4187 EXTENT_DO_ACCOUNTING,
David Sterbaae0f1622017-10-31 16:37:52 +01004188 1, 1, &cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05004189 return 0;
4190}
Chris Masond1310b22008-01-24 16:13:08 -05004191
4192/*
Chris Mason7b13b7b2008-04-18 10:29:50 -04004193 * a helper for releasepage, this tests for areas of the page that
4194 * are locked or under IO and drops the related state bits if it is safe
4195 * to drop the page.
4196 */
Eric Sandeen48a3b632013-04-25 20:41:01 +00004197static int try_release_extent_state(struct extent_map_tree *map,
4198 struct extent_io_tree *tree,
4199 struct page *page, gfp_t mask)
Chris Mason7b13b7b2008-04-18 10:29:50 -04004200{
Miao Xie4eee4fa2012-12-21 09:17:45 +00004201 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004202 u64 end = start + PAGE_SIZE - 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004203 int ret = 1;
4204
Chris Mason211f90e2008-07-18 11:56:15 -04004205 if (test_range_bit(tree, start, end,
Chris Mason8b62b722009-09-02 16:53:46 -04004206 EXTENT_IOBITS, 0, NULL))
Chris Mason7b13b7b2008-04-18 10:29:50 -04004207 ret = 0;
4208 else {
Chris Mason11ef1602009-09-23 20:28:46 -04004209 /*
4210 * at this point we can safely clear everything except the
4211 * locked bit and the nodatasum bit
4212 */
David Sterba66b0c882017-10-31 16:30:47 +01004213 ret = __clear_extent_bit(tree, start, end,
Chris Mason11ef1602009-09-23 20:28:46 -04004214 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
David Sterba66b0c882017-10-31 16:30:47 +01004215 0, 0, NULL, mask, NULL);
Chris Masone3f24cc2011-02-14 12:52:08 -05004216
4217 /* if clear_extent_bit failed for enomem reasons,
4218 * we can't allow the release to continue.
4219 */
4220 if (ret < 0)
4221 ret = 0;
4222 else
4223 ret = 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004224 }
4225 return ret;
4226}
Chris Mason7b13b7b2008-04-18 10:29:50 -04004227
4228/*
Chris Masond1310b22008-01-24 16:13:08 -05004229 * a helper for releasepage. As long as there are no locked extents
4230 * in the range corresponding to the page, both state records and extent
4231 * map records are removed
4232 */
4233int try_release_extent_mapping(struct extent_map_tree *map,
Chris Mason70dec802008-01-29 09:59:12 -05004234 struct extent_io_tree *tree, struct page *page,
4235 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05004236{
4237 struct extent_map *em;
Miao Xie4eee4fa2012-12-21 09:17:45 +00004238 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004239 u64 end = start + PAGE_SIZE - 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004240
Mel Gormand0164ad2015-11-06 16:28:21 -08004241 if (gfpflags_allow_blocking(mask) &&
Byongho Leeee221842015-12-15 01:42:10 +09004242 page->mapping->host->i_size > SZ_16M) {
Yan39b56372008-02-15 10:40:50 -05004243 u64 len;
Chris Mason70dec802008-01-29 09:59:12 -05004244 while (start <= end) {
Yan39b56372008-02-15 10:40:50 -05004245 len = end - start + 1;
Chris Mason890871b2009-09-02 16:24:52 -04004246 write_lock(&map->lock);
Yan39b56372008-02-15 10:40:50 -05004247 em = lookup_extent_mapping(map, start, len);
Tsutomu Itoh285190d2012-02-16 16:23:58 +09004248 if (!em) {
Chris Mason890871b2009-09-02 16:24:52 -04004249 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05004250 break;
4251 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04004252 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4253 em->start != start) {
Chris Mason890871b2009-09-02 16:24:52 -04004254 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05004255 free_extent_map(em);
4256 break;
4257 }
4258 if (!test_range_bit(tree, em->start,
4259 extent_map_end(em) - 1,
Chris Mason8b62b722009-09-02 16:53:46 -04004260 EXTENT_LOCKED | EXTENT_WRITEBACK,
Chris Mason9655d292009-09-02 15:22:30 -04004261 0, NULL)) {
Chris Mason70dec802008-01-29 09:59:12 -05004262 remove_extent_mapping(map, em);
4263 /* once for the rb tree */
4264 free_extent_map(em);
4265 }
4266 start = extent_map_end(em);
Chris Mason890871b2009-09-02 16:24:52 -04004267 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05004268
4269 /* once for us */
Chris Masond1310b22008-01-24 16:13:08 -05004270 free_extent_map(em);
4271 }
Chris Masond1310b22008-01-24 16:13:08 -05004272 }
Chris Mason7b13b7b2008-04-18 10:29:50 -04004273 return try_release_extent_state(map, tree, page, mask);
Chris Masond1310b22008-01-24 16:13:08 -05004274}
Chris Masond1310b22008-01-24 16:13:08 -05004275
Chris Masonec29ed52011-02-23 16:23:20 -05004276/*
4277 * helper function for fiemap, which doesn't want to see any holes.
4278 * This maps until we find something past 'last'
4279 */
4280static struct extent_map *get_extent_skip_holes(struct inode *inode,
David Sterbae3350e12017-06-23 04:09:57 +02004281 u64 offset, u64 last)
Chris Masonec29ed52011-02-23 16:23:20 -05004282{
Jeff Mahoneyda170662016-06-15 09:22:56 -04004283 u64 sectorsize = btrfs_inode_sectorsize(inode);
Chris Masonec29ed52011-02-23 16:23:20 -05004284 struct extent_map *em;
4285 u64 len;
4286
4287 if (offset >= last)
4288 return NULL;
4289
Dulshani Gunawardhana67871252013-10-31 10:33:04 +05304290 while (1) {
Chris Masonec29ed52011-02-23 16:23:20 -05004291 len = last - offset;
4292 if (len == 0)
4293 break;
Qu Wenruofda28322013-02-26 08:10:22 +00004294 len = ALIGN(len, sectorsize);
David Sterbae3350e12017-06-23 04:09:57 +02004295 em = btrfs_get_extent_fiemap(BTRFS_I(inode), NULL, 0, offset,
4296 len, 0);
David Sterbac7040052011-04-19 18:00:01 +02004297 if (IS_ERR_OR_NULL(em))
Chris Masonec29ed52011-02-23 16:23:20 -05004298 return em;
4299
4300 /* if this isn't a hole return it */
Nikolay Borisov4a2d25c2017-11-23 10:51:43 +02004301 if (em->block_start != EXTENT_MAP_HOLE)
Chris Masonec29ed52011-02-23 16:23:20 -05004302 return em;
Chris Masonec29ed52011-02-23 16:23:20 -05004303
4304 /* this is a hole, advance to the next extent */
4305 offset = extent_map_end(em);
4306 free_extent_map(em);
4307 if (offset >= last)
4308 break;
4309 }
4310 return NULL;
4311}
4312
Qu Wenruo47518322017-04-07 10:43:15 +08004313/*
4314 * To cache previous fiemap extent
4315 *
4316 * Will be used for merging fiemap extent
4317 */
4318struct fiemap_cache {
4319 u64 offset;
4320 u64 phys;
4321 u64 len;
4322 u32 flags;
4323 bool cached;
4324};
4325
4326/*
4327 * Helper to submit fiemap extent.
4328 *
4329 * Will try to merge current fiemap extent specified by @offset, @phys,
4330 * @len and @flags with cached one.
4331 * And only when we fails to merge, cached one will be submitted as
4332 * fiemap extent.
4333 *
4334 * Return value is the same as fiemap_fill_next_extent().
4335 */
4336static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
4337 struct fiemap_cache *cache,
4338 u64 offset, u64 phys, u64 len, u32 flags)
4339{
4340 int ret = 0;
4341
4342 if (!cache->cached)
4343 goto assign;
4344
4345 /*
4346 * Sanity check, extent_fiemap() should have ensured that new
4347 * fiemap extent won't overlap with cahced one.
4348 * Not recoverable.
4349 *
4350 * NOTE: Physical address can overlap, due to compression
4351 */
4352 if (cache->offset + cache->len > offset) {
4353 WARN_ON(1);
4354 return -EINVAL;
4355 }
4356
4357 /*
4358 * Only merges fiemap extents if
4359 * 1) Their logical addresses are continuous
4360 *
4361 * 2) Their physical addresses are continuous
4362 * So truly compressed (physical size smaller than logical size)
4363 * extents won't get merged with each other
4364 *
4365 * 3) Share same flags except FIEMAP_EXTENT_LAST
4366 * So regular extent won't get merged with prealloc extent
4367 */
4368 if (cache->offset + cache->len == offset &&
4369 cache->phys + cache->len == phys &&
4370 (cache->flags & ~FIEMAP_EXTENT_LAST) ==
4371 (flags & ~FIEMAP_EXTENT_LAST)) {
4372 cache->len += len;
4373 cache->flags |= flags;
4374 goto try_submit_last;
4375 }
4376
4377 /* Not mergeable, need to submit cached one */
4378 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4379 cache->len, cache->flags);
4380 cache->cached = false;
4381 if (ret)
4382 return ret;
4383assign:
4384 cache->cached = true;
4385 cache->offset = offset;
4386 cache->phys = phys;
4387 cache->len = len;
4388 cache->flags = flags;
4389try_submit_last:
4390 if (cache->flags & FIEMAP_EXTENT_LAST) {
4391 ret = fiemap_fill_next_extent(fieinfo, cache->offset,
4392 cache->phys, cache->len, cache->flags);
4393 cache->cached = false;
4394 }
4395 return ret;
4396}
4397
4398/*
Qu Wenruo848c23b2017-06-22 10:01:21 +08004399 * Emit last fiemap cache
Qu Wenruo47518322017-04-07 10:43:15 +08004400 *
Qu Wenruo848c23b2017-06-22 10:01:21 +08004401 * The last fiemap cache may still be cached in the following case:
4402 * 0 4k 8k
4403 * |<- Fiemap range ->|
4404 * |<------------ First extent ----------->|
4405 *
4406 * In this case, the first extent range will be cached but not emitted.
4407 * So we must emit it before ending extent_fiemap().
Qu Wenruo47518322017-04-07 10:43:15 +08004408 */
Qu Wenruo848c23b2017-06-22 10:01:21 +08004409static int emit_last_fiemap_cache(struct btrfs_fs_info *fs_info,
4410 struct fiemap_extent_info *fieinfo,
4411 struct fiemap_cache *cache)
Qu Wenruo47518322017-04-07 10:43:15 +08004412{
4413 int ret;
4414
4415 if (!cache->cached)
4416 return 0;
4417
Qu Wenruo47518322017-04-07 10:43:15 +08004418 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4419 cache->len, cache->flags);
4420 cache->cached = false;
4421 if (ret > 0)
4422 ret = 0;
4423 return ret;
4424}
4425
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004426int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
David Sterba2135fb92017-06-23 04:09:57 +02004427 __u64 start, __u64 len)
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004428{
Josef Bacik975f84f2010-11-23 19:36:57 +00004429 int ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004430 u64 off = start;
4431 u64 max = start + len;
4432 u32 flags = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00004433 u32 found_type;
4434 u64 last;
Chris Masonec29ed52011-02-23 16:23:20 -05004435 u64 last_for_get_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004436 u64 disko = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05004437 u64 isize = i_size_read(inode);
Josef Bacik975f84f2010-11-23 19:36:57 +00004438 struct btrfs_key found_key;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004439 struct extent_map *em = NULL;
Josef Bacik2ac55d42010-02-03 19:33:23 +00004440 struct extent_state *cached_state = NULL;
Josef Bacik975f84f2010-11-23 19:36:57 +00004441 struct btrfs_path *path;
Josef Bacikdc046b12014-09-10 16:20:45 -04004442 struct btrfs_root *root = BTRFS_I(inode)->root;
Qu Wenruo47518322017-04-07 10:43:15 +08004443 struct fiemap_cache cache = { 0 };
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004444 int end = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05004445 u64 em_start = 0;
4446 u64 em_len = 0;
4447 u64 em_end = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004448
4449 if (len == 0)
4450 return -EINVAL;
4451
Josef Bacik975f84f2010-11-23 19:36:57 +00004452 path = btrfs_alloc_path();
4453 if (!path)
4454 return -ENOMEM;
4455 path->leave_spinning = 1;
4456
Jeff Mahoneyda170662016-06-15 09:22:56 -04004457 start = round_down(start, btrfs_inode_sectorsize(inode));
4458 len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
Josef Bacik4d479cf2011-11-17 11:34:31 -05004459
Chris Masonec29ed52011-02-23 16:23:20 -05004460 /*
4461 * lookup the last file extent. We're not using i_size here
4462 * because there might be preallocation past i_size
4463 */
David Sterbaf85b7372017-01-20 14:54:07 +01004464 ret = btrfs_lookup_file_extent(NULL, root, path,
4465 btrfs_ino(BTRFS_I(inode)), -1, 0);
Josef Bacik975f84f2010-11-23 19:36:57 +00004466 if (ret < 0) {
4467 btrfs_free_path(path);
4468 return ret;
Liu Bo2d324f52016-05-17 17:21:48 -07004469 } else {
4470 WARN_ON(!ret);
4471 if (ret == 1)
4472 ret = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00004473 }
Liu Bo2d324f52016-05-17 17:21:48 -07004474
Josef Bacik975f84f2010-11-23 19:36:57 +00004475 path->slots[0]--;
Josef Bacik975f84f2010-11-23 19:36:57 +00004476 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
David Sterba962a2982014-06-04 18:41:45 +02004477 found_type = found_key.type;
Josef Bacik975f84f2010-11-23 19:36:57 +00004478
Chris Masonec29ed52011-02-23 16:23:20 -05004479 /* No extents, but there might be delalloc bits */
Nikolay Borisov4a0cc7c2017-01-10 20:35:31 +02004480 if (found_key.objectid != btrfs_ino(BTRFS_I(inode)) ||
Josef Bacik975f84f2010-11-23 19:36:57 +00004481 found_type != BTRFS_EXTENT_DATA_KEY) {
Chris Masonec29ed52011-02-23 16:23:20 -05004482 /* have to trust i_size as the end */
4483 last = (u64)-1;
4484 last_for_get_extent = isize;
4485 } else {
4486 /*
4487 * remember the start of the last extent. There are a
4488 * bunch of different factors that go into the length of the
4489 * extent, so its much less complex to remember where it started
4490 */
4491 last = found_key.offset;
4492 last_for_get_extent = last + 1;
Josef Bacik975f84f2010-11-23 19:36:57 +00004493 }
Liu Bofe09e162013-09-22 12:54:23 +08004494 btrfs_release_path(path);
Josef Bacik975f84f2010-11-23 19:36:57 +00004495
Chris Masonec29ed52011-02-23 16:23:20 -05004496 /*
4497 * we might have some extents allocated but more delalloc past those
4498 * extents. so, we trust isize unless the start of the last extent is
4499 * beyond isize
4500 */
4501 if (last < isize) {
4502 last = (u64)-1;
4503 last_for_get_extent = isize;
4504 }
4505
David Sterbaff13db42015-12-03 14:30:40 +01004506 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01004507 &cached_state);
Chris Masonec29ed52011-02-23 16:23:20 -05004508
David Sterbae3350e12017-06-23 04:09:57 +02004509 em = get_extent_skip_holes(inode, start, last_for_get_extent);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004510 if (!em)
4511 goto out;
4512 if (IS_ERR(em)) {
4513 ret = PTR_ERR(em);
4514 goto out;
4515 }
Josef Bacik975f84f2010-11-23 19:36:57 +00004516
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004517 while (!end) {
Josef Bacikb76bb702013-07-05 13:52:51 -04004518 u64 offset_in_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004519
Chris Masonea8efc72011-03-08 11:54:40 -05004520 /* break if the extent we found is outside the range */
4521 if (em->start >= max || extent_map_end(em) < off)
4522 break;
4523
4524 /*
4525 * get_extent may return an extent that starts before our
4526 * requested range. We have to make sure the ranges
4527 * we return to fiemap always move forward and don't
4528 * overlap, so adjust the offsets here
4529 */
4530 em_start = max(em->start, off);
4531
4532 /*
4533 * record the offset from the start of the extent
Josef Bacikb76bb702013-07-05 13:52:51 -04004534 * for adjusting the disk offset below. Only do this if the
4535 * extent isn't compressed since our in ram offset may be past
4536 * what we have actually allocated on disk.
Chris Masonea8efc72011-03-08 11:54:40 -05004537 */
Josef Bacikb76bb702013-07-05 13:52:51 -04004538 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4539 offset_in_extent = em_start - em->start;
Chris Masonec29ed52011-02-23 16:23:20 -05004540 em_end = extent_map_end(em);
Chris Masonea8efc72011-03-08 11:54:40 -05004541 em_len = em_end - em_start;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004542 disko = 0;
4543 flags = 0;
4544
Chris Masonea8efc72011-03-08 11:54:40 -05004545 /*
4546 * bump off for our next call to get_extent
4547 */
4548 off = extent_map_end(em);
4549 if (off >= max)
4550 end = 1;
4551
Heiko Carstens93dbfad2009-04-03 10:33:45 -04004552 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004553 end = 1;
4554 flags |= FIEMAP_EXTENT_LAST;
Heiko Carstens93dbfad2009-04-03 10:33:45 -04004555 } else if (em->block_start == EXTENT_MAP_INLINE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004556 flags |= (FIEMAP_EXTENT_DATA_INLINE |
4557 FIEMAP_EXTENT_NOT_ALIGNED);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04004558 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004559 flags |= (FIEMAP_EXTENT_DELALLOC |
4560 FIEMAP_EXTENT_UNKNOWN);
Josef Bacikdc046b12014-09-10 16:20:45 -04004561 } else if (fieinfo->fi_extents_max) {
4562 u64 bytenr = em->block_start -
4563 (em->start - em->orig_start);
Liu Bofe09e162013-09-22 12:54:23 +08004564
Chris Masonea8efc72011-03-08 11:54:40 -05004565 disko = em->block_start + offset_in_extent;
Liu Bofe09e162013-09-22 12:54:23 +08004566
4567 /*
4568 * As btrfs supports shared space, this information
4569 * can be exported to userspace tools via
Josef Bacikdc046b12014-09-10 16:20:45 -04004570 * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0
4571 * then we're just getting a count and we can skip the
4572 * lookup stuff.
Liu Bofe09e162013-09-22 12:54:23 +08004573 */
Edmund Nadolskibb739cf2017-06-28 21:56:58 -06004574 ret = btrfs_check_shared(root,
4575 btrfs_ino(BTRFS_I(inode)),
4576 bytenr);
Josef Bacikdc046b12014-09-10 16:20:45 -04004577 if (ret < 0)
Liu Bofe09e162013-09-22 12:54:23 +08004578 goto out_free;
Josef Bacikdc046b12014-09-10 16:20:45 -04004579 if (ret)
Liu Bofe09e162013-09-22 12:54:23 +08004580 flags |= FIEMAP_EXTENT_SHARED;
Josef Bacikdc046b12014-09-10 16:20:45 -04004581 ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004582 }
4583 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4584 flags |= FIEMAP_EXTENT_ENCODED;
Josef Bacik0d2b2372015-05-19 10:44:04 -04004585 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4586 flags |= FIEMAP_EXTENT_UNWRITTEN;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004587
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004588 free_extent_map(em);
4589 em = NULL;
Chris Masonec29ed52011-02-23 16:23:20 -05004590 if ((em_start >= last) || em_len == (u64)-1 ||
4591 (last == (u64)-1 && isize <= em_end)) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004592 flags |= FIEMAP_EXTENT_LAST;
4593 end = 1;
4594 }
4595
Chris Masonec29ed52011-02-23 16:23:20 -05004596 /* now scan forward to see if this is really the last extent. */
David Sterbae3350e12017-06-23 04:09:57 +02004597 em = get_extent_skip_holes(inode, off, last_for_get_extent);
Chris Masonec29ed52011-02-23 16:23:20 -05004598 if (IS_ERR(em)) {
4599 ret = PTR_ERR(em);
4600 goto out;
4601 }
4602 if (!em) {
Josef Bacik975f84f2010-11-23 19:36:57 +00004603 flags |= FIEMAP_EXTENT_LAST;
4604 end = 1;
4605 }
Qu Wenruo47518322017-04-07 10:43:15 +08004606 ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
4607 em_len, flags);
Chengyu Song26e726a2015-03-24 18:12:56 -04004608 if (ret) {
4609 if (ret == 1)
4610 ret = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05004611 goto out_free;
Chengyu Song26e726a2015-03-24 18:12:56 -04004612 }
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004613 }
4614out_free:
Qu Wenruo47518322017-04-07 10:43:15 +08004615 if (!ret)
Qu Wenruo848c23b2017-06-22 10:01:21 +08004616 ret = emit_last_fiemap_cache(root->fs_info, fieinfo, &cache);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004617 free_extent_map(em);
4618out:
Liu Bofe09e162013-09-22 12:54:23 +08004619 btrfs_free_path(path);
Liu Boa52f4cd2013-05-01 16:23:41 +00004620 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
David Sterbae43bbe52017-12-12 21:43:52 +01004621 &cached_state);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004622 return ret;
4623}
4624
Chris Mason727011e2010-08-06 13:21:20 -04004625static void __free_extent_buffer(struct extent_buffer *eb)
4626{
Eric Sandeen6d49ba12013-04-22 16:12:31 +00004627 btrfs_leak_debug_del(&eb->leak_list);
Chris Mason727011e2010-08-06 13:21:20 -04004628 kmem_cache_free(extent_buffer_cache, eb);
4629}
4630
Josef Bacika26e8c92014-03-28 17:07:27 -04004631int extent_buffer_under_io(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004632{
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004633 return (atomic_read(&eb->io_pages) ||
4634 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4635 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
Chris Masond1310b22008-01-24 16:13:08 -05004636}
4637
Miao Xie897ca6e92010-10-26 20:57:29 -04004638/*
4639 * Helper for releasing extent buffer page.
4640 */
David Sterbaa50924e2014-07-31 00:51:36 +02004641static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
Miao Xie897ca6e92010-10-26 20:57:29 -04004642{
4643 unsigned long index;
4644 struct page *page;
Jan Schmidt815a51c2012-05-16 17:00:02 +02004645 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
Miao Xie897ca6e92010-10-26 20:57:29 -04004646
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004647 BUG_ON(extent_buffer_under_io(eb));
Miao Xie897ca6e92010-10-26 20:57:29 -04004648
David Sterbaa50924e2014-07-31 00:51:36 +02004649 index = num_extent_pages(eb->start, eb->len);
4650 if (index == 0)
Miao Xie897ca6e92010-10-26 20:57:29 -04004651 return;
4652
4653 do {
4654 index--;
David Sterbafb85fc92014-07-31 01:03:53 +02004655 page = eb->pages[index];
Forrest Liu5d2361d2015-02-09 17:31:45 +08004656 if (!page)
4657 continue;
4658 if (mapped)
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004659 spin_lock(&page->mapping->private_lock);
Forrest Liu5d2361d2015-02-09 17:31:45 +08004660 /*
4661 * We do this since we'll remove the pages after we've
4662 * removed the eb from the radix tree, so we could race
4663 * and have this page now attached to the new eb. So
4664 * only clear page_private if it's still connected to
4665 * this eb.
4666 */
4667 if (PagePrivate(page) &&
4668 page->private == (unsigned long)eb) {
4669 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4670 BUG_ON(PageDirty(page));
4671 BUG_ON(PageWriteback(page));
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004672 /*
Forrest Liu5d2361d2015-02-09 17:31:45 +08004673 * We need to make sure we haven't be attached
4674 * to a new eb.
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004675 */
Forrest Liu5d2361d2015-02-09 17:31:45 +08004676 ClearPagePrivate(page);
4677 set_page_private(page, 0);
4678 /* One for the page private */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004679 put_page(page);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004680 }
Forrest Liu5d2361d2015-02-09 17:31:45 +08004681
4682 if (mapped)
4683 spin_unlock(&page->mapping->private_lock);
4684
Nicholas D Steeves01327612016-05-19 21:18:45 -04004685 /* One for when we allocated the page */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004686 put_page(page);
David Sterbaa50924e2014-07-31 00:51:36 +02004687 } while (index != 0);
Miao Xie897ca6e92010-10-26 20:57:29 -04004688}
4689
4690/*
4691 * Helper for releasing the extent buffer.
4692 */
4693static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4694{
David Sterbaa50924e2014-07-31 00:51:36 +02004695 btrfs_release_extent_buffer_page(eb);
Miao Xie897ca6e92010-10-26 20:57:29 -04004696 __free_extent_buffer(eb);
4697}
4698
Josef Bacikf28491e2013-12-16 13:24:27 -05004699static struct extent_buffer *
4700__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
David Sterba23d79d82014-06-15 02:55:29 +02004701 unsigned long len)
Josef Bacikdb7f3432013-08-07 14:54:37 -04004702{
4703 struct extent_buffer *eb = NULL;
4704
Michal Hockod1b5c562015-08-19 14:17:40 +02004705 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004706 eb->start = start;
4707 eb->len = len;
Josef Bacikf28491e2013-12-16 13:24:27 -05004708 eb->fs_info = fs_info;
Josef Bacikdb7f3432013-08-07 14:54:37 -04004709 eb->bflags = 0;
4710 rwlock_init(&eb->lock);
4711 atomic_set(&eb->write_locks, 0);
4712 atomic_set(&eb->read_locks, 0);
4713 atomic_set(&eb->blocking_readers, 0);
4714 atomic_set(&eb->blocking_writers, 0);
4715 atomic_set(&eb->spinning_readers, 0);
4716 atomic_set(&eb->spinning_writers, 0);
4717 eb->lock_nested = 0;
4718 init_waitqueue_head(&eb->write_lock_wq);
4719 init_waitqueue_head(&eb->read_lock_wq);
4720
4721 btrfs_leak_debug_add(&eb->leak_list, &buffers);
4722
4723 spin_lock_init(&eb->refs_lock);
4724 atomic_set(&eb->refs, 1);
4725 atomic_set(&eb->io_pages, 0);
4726
4727 /*
4728 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4729 */
4730 BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4731 > MAX_INLINE_EXTENT_BUFFER_SIZE);
4732 BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4733
4734 return eb;
4735}
4736
4737struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4738{
4739 unsigned long i;
4740 struct page *p;
4741 struct extent_buffer *new;
4742 unsigned long num_pages = num_extent_pages(src->start, src->len);
4743
David Sterba3f556f72014-06-15 03:20:26 +02004744 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004745 if (new == NULL)
4746 return NULL;
4747
4748 for (i = 0; i < num_pages; i++) {
Josef Bacik9ec72672013-08-07 16:57:23 -04004749 p = alloc_page(GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004750 if (!p) {
4751 btrfs_release_extent_buffer(new);
4752 return NULL;
4753 }
4754 attach_extent_buffer_page(new, p);
4755 WARN_ON(PageDirty(p));
4756 SetPageUptodate(p);
4757 new->pages[i] = p;
David Sterbafba1acf2016-11-08 17:56:24 +01004758 copy_page(page_address(p), page_address(src->pages[i]));
Josef Bacikdb7f3432013-08-07 14:54:37 -04004759 }
4760
Josef Bacikdb7f3432013-08-07 14:54:37 -04004761 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4762 set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4763
4764 return new;
4765}
4766
Omar Sandoval0f331222015-09-29 20:50:31 -07004767struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4768 u64 start, unsigned long len)
Josef Bacikdb7f3432013-08-07 14:54:37 -04004769{
4770 struct extent_buffer *eb;
David Sterba3f556f72014-06-15 03:20:26 +02004771 unsigned long num_pages;
Josef Bacikdb7f3432013-08-07 14:54:37 -04004772 unsigned long i;
4773
Omar Sandoval0f331222015-09-29 20:50:31 -07004774 num_pages = num_extent_pages(start, len);
David Sterba3f556f72014-06-15 03:20:26 +02004775
4776 eb = __alloc_extent_buffer(fs_info, start, len);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004777 if (!eb)
4778 return NULL;
4779
4780 for (i = 0; i < num_pages; i++) {
Josef Bacik9ec72672013-08-07 16:57:23 -04004781 eb->pages[i] = alloc_page(GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004782 if (!eb->pages[i])
4783 goto err;
4784 }
4785 set_extent_buffer_uptodate(eb);
4786 btrfs_set_header_nritems(eb, 0);
4787 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4788
4789 return eb;
4790err:
4791 for (; i > 0; i--)
4792 __free_page(eb->pages[i - 1]);
4793 __free_extent_buffer(eb);
4794 return NULL;
4795}
4796
Omar Sandoval0f331222015-09-29 20:50:31 -07004797struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -04004798 u64 start)
Omar Sandoval0f331222015-09-29 20:50:31 -07004799{
Jeff Mahoneyda170662016-06-15 09:22:56 -04004800 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
Omar Sandoval0f331222015-09-29 20:50:31 -07004801}
4802
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004803static void check_buffer_tree_ref(struct extent_buffer *eb)
4804{
Chris Mason242e18c2013-01-29 17:49:37 -05004805 int refs;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004806 /* the ref bit is tricky. We have to make sure it is set
4807 * if we have the buffer dirty. Otherwise the
4808 * code to free a buffer can end up dropping a dirty
4809 * page
4810 *
4811 * Once the ref bit is set, it won't go away while the
4812 * buffer is dirty or in writeback, and it also won't
4813 * go away while we have the reference count on the
4814 * eb bumped.
4815 *
4816 * We can't just set the ref bit without bumping the
4817 * ref on the eb because free_extent_buffer might
4818 * see the ref bit and try to clear it. If this happens
4819 * free_extent_buffer might end up dropping our original
4820 * ref by mistake and freeing the page before we are able
4821 * to add one more ref.
4822 *
4823 * So bump the ref count first, then set the bit. If someone
4824 * beat us to it, drop the ref we added.
4825 */
Chris Mason242e18c2013-01-29 17:49:37 -05004826 refs = atomic_read(&eb->refs);
4827 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4828 return;
4829
Josef Bacik594831c2012-07-20 16:11:08 -04004830 spin_lock(&eb->refs_lock);
4831 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004832 atomic_inc(&eb->refs);
Josef Bacik594831c2012-07-20 16:11:08 -04004833 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004834}
4835
Mel Gorman2457aec2014-06-04 16:10:31 -07004836static void mark_extent_buffer_accessed(struct extent_buffer *eb,
4837 struct page *accessed)
Josef Bacik5df42352012-03-15 18:24:42 -04004838{
4839 unsigned long num_pages, i;
4840
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004841 check_buffer_tree_ref(eb);
4842
Josef Bacik5df42352012-03-15 18:24:42 -04004843 num_pages = num_extent_pages(eb->start, eb->len);
4844 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02004845 struct page *p = eb->pages[i];
4846
Mel Gorman2457aec2014-06-04 16:10:31 -07004847 if (p != accessed)
4848 mark_page_accessed(p);
Josef Bacik5df42352012-03-15 18:24:42 -04004849 }
4850}
4851
Josef Bacikf28491e2013-12-16 13:24:27 -05004852struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4853 u64 start)
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004854{
4855 struct extent_buffer *eb;
4856
4857 rcu_read_lock();
Josef Bacikf28491e2013-12-16 13:24:27 -05004858 eb = radix_tree_lookup(&fs_info->buffer_radix,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004859 start >> PAGE_SHIFT);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004860 if (eb && atomic_inc_not_zero(&eb->refs)) {
4861 rcu_read_unlock();
Filipe Manana062c19e2015-04-23 11:28:48 +01004862 /*
4863 * Lock our eb's refs_lock to avoid races with
4864 * free_extent_buffer. When we get our eb it might be flagged
4865 * with EXTENT_BUFFER_STALE and another task running
4866 * free_extent_buffer might have seen that flag set,
4867 * eb->refs == 2, that the buffer isn't under IO (dirty and
4868 * writeback flags not set) and it's still in the tree (flag
4869 * EXTENT_BUFFER_TREE_REF set), therefore being in the process
4870 * of decrementing the extent buffer's reference count twice.
4871 * So here we could race and increment the eb's reference count,
4872 * clear its stale flag, mark it as dirty and drop our reference
4873 * before the other task finishes executing free_extent_buffer,
4874 * which would later result in an attempt to free an extent
4875 * buffer that is dirty.
4876 */
4877 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
4878 spin_lock(&eb->refs_lock);
4879 spin_unlock(&eb->refs_lock);
4880 }
Mel Gorman2457aec2014-06-04 16:10:31 -07004881 mark_extent_buffer_accessed(eb, NULL);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004882 return eb;
4883 }
4884 rcu_read_unlock();
4885
4886 return NULL;
4887}
4888
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04004889#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4890struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -04004891 u64 start)
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04004892{
4893 struct extent_buffer *eb, *exists = NULL;
4894 int ret;
4895
4896 eb = find_extent_buffer(fs_info, start);
4897 if (eb)
4898 return eb;
Jeff Mahoneyda170662016-06-15 09:22:56 -04004899 eb = alloc_dummy_extent_buffer(fs_info, start);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04004900 if (!eb)
4901 return NULL;
4902 eb->fs_info = fs_info;
4903again:
David Sterbae1860a72016-05-09 14:11:38 +02004904 ret = radix_tree_preload(GFP_NOFS);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04004905 if (ret)
4906 goto free_eb;
4907 spin_lock(&fs_info->buffer_lock);
4908 ret = radix_tree_insert(&fs_info->buffer_radix,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004909 start >> PAGE_SHIFT, eb);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04004910 spin_unlock(&fs_info->buffer_lock);
4911 radix_tree_preload_end();
4912 if (ret == -EEXIST) {
4913 exists = find_extent_buffer(fs_info, start);
4914 if (exists)
4915 goto free_eb;
4916 else
4917 goto again;
4918 }
4919 check_buffer_tree_ref(eb);
4920 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4921
4922 /*
4923 * We will free dummy extent buffer's if they come into
4924 * free_extent_buffer with a ref count of 2, but if we are using this we
4925 * want the buffers to stay in memory until we're done with them, so
4926 * bump the ref count again.
4927 */
4928 atomic_inc(&eb->refs);
4929 return eb;
4930free_eb:
4931 btrfs_release_extent_buffer(eb);
4932 return exists;
4933}
4934#endif
4935
Josef Bacikf28491e2013-12-16 13:24:27 -05004936struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
David Sterbace3e6982014-06-15 03:00:04 +02004937 u64 start)
Chris Masond1310b22008-01-24 16:13:08 -05004938{
Jeff Mahoneyda170662016-06-15 09:22:56 -04004939 unsigned long len = fs_info->nodesize;
Chris Masond1310b22008-01-24 16:13:08 -05004940 unsigned long num_pages = num_extent_pages(start, len);
4941 unsigned long i;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004942 unsigned long index = start >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05004943 struct extent_buffer *eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04004944 struct extent_buffer *exists = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05004945 struct page *p;
Josef Bacikf28491e2013-12-16 13:24:27 -05004946 struct address_space *mapping = fs_info->btree_inode->i_mapping;
Chris Masond1310b22008-01-24 16:13:08 -05004947 int uptodate = 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04004948 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05004949
Jeff Mahoneyda170662016-06-15 09:22:56 -04004950 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
Liu Boc871b0f2016-06-06 12:01:23 -07004951 btrfs_err(fs_info, "bad tree block start %llu", start);
4952 return ERR_PTR(-EINVAL);
4953 }
4954
Josef Bacikf28491e2013-12-16 13:24:27 -05004955 eb = find_extent_buffer(fs_info, start);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004956 if (eb)
Chris Mason6af118ce2008-07-22 11:18:07 -04004957 return eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04004958
David Sterba23d79d82014-06-15 02:55:29 +02004959 eb = __alloc_extent_buffer(fs_info, start, len);
Peter2b114d12008-04-01 11:21:40 -04004960 if (!eb)
Liu Boc871b0f2016-06-06 12:01:23 -07004961 return ERR_PTR(-ENOMEM);
Chris Masond1310b22008-01-24 16:13:08 -05004962
Chris Mason727011e2010-08-06 13:21:20 -04004963 for (i = 0; i < num_pages; i++, index++) {
Michal Hockod1b5c562015-08-19 14:17:40 +02004964 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
Liu Boc871b0f2016-06-06 12:01:23 -07004965 if (!p) {
4966 exists = ERR_PTR(-ENOMEM);
Chris Mason6af118ce2008-07-22 11:18:07 -04004967 goto free_eb;
Liu Boc871b0f2016-06-06 12:01:23 -07004968 }
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004969
4970 spin_lock(&mapping->private_lock);
4971 if (PagePrivate(p)) {
4972 /*
4973 * We could have already allocated an eb for this page
4974 * and attached one so lets see if we can get a ref on
4975 * the existing eb, and if we can we know it's good and
4976 * we can just return that one, else we know we can just
4977 * overwrite page->private.
4978 */
4979 exists = (struct extent_buffer *)p->private;
4980 if (atomic_inc_not_zero(&exists->refs)) {
4981 spin_unlock(&mapping->private_lock);
4982 unlock_page(p);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004983 put_page(p);
Mel Gorman2457aec2014-06-04 16:10:31 -07004984 mark_extent_buffer_accessed(exists, p);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004985 goto free_eb;
4986 }
Omar Sandoval5ca64f42015-02-24 02:47:05 -08004987 exists = NULL;
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004988
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004989 /*
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004990 * Do this so attach doesn't complain and we need to
4991 * drop the ref the old guy had.
4992 */
4993 ClearPagePrivate(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004994 WARN_ON(PageDirty(p));
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004995 put_page(p);
Chris Masond1310b22008-01-24 16:13:08 -05004996 }
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004997 attach_extent_buffer_page(eb, p);
4998 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004999 WARN_ON(PageDirty(p));
Chris Mason727011e2010-08-06 13:21:20 -04005000 eb->pages[i] = p;
Chris Masond1310b22008-01-24 16:13:08 -05005001 if (!PageUptodate(p))
5002 uptodate = 0;
Chris Masoneb14ab82011-02-10 12:35:00 -05005003
5004 /*
5005 * see below about how we avoid a nasty race with release page
5006 * and why we unlock later
5007 */
Chris Masond1310b22008-01-24 16:13:08 -05005008 }
5009 if (uptodate)
Chris Masonb4ce94d2009-02-04 09:25:08 -05005010 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Josef Bacik115391d2012-03-09 09:51:43 -05005011again:
David Sterbae1860a72016-05-09 14:11:38 +02005012 ret = radix_tree_preload(GFP_NOFS);
Liu Boc871b0f2016-06-06 12:01:23 -07005013 if (ret) {
5014 exists = ERR_PTR(ret);
Miao Xie19fe0a82010-10-26 20:57:29 -04005015 goto free_eb;
Liu Boc871b0f2016-06-06 12:01:23 -07005016 }
Miao Xie19fe0a82010-10-26 20:57:29 -04005017
Josef Bacikf28491e2013-12-16 13:24:27 -05005018 spin_lock(&fs_info->buffer_lock);
5019 ret = radix_tree_insert(&fs_info->buffer_radix,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005020 start >> PAGE_SHIFT, eb);
Josef Bacikf28491e2013-12-16 13:24:27 -05005021 spin_unlock(&fs_info->buffer_lock);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005022 radix_tree_preload_end();
Miao Xie19fe0a82010-10-26 20:57:29 -04005023 if (ret == -EEXIST) {
Josef Bacikf28491e2013-12-16 13:24:27 -05005024 exists = find_extent_buffer(fs_info, start);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005025 if (exists)
5026 goto free_eb;
5027 else
Josef Bacik115391d2012-03-09 09:51:43 -05005028 goto again;
Chris Mason6af118ce2008-07-22 11:18:07 -04005029 }
Chris Mason6af118ce2008-07-22 11:18:07 -04005030 /* add one reference for the tree */
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005031 check_buffer_tree_ref(eb);
Josef Bacik34b41ac2013-12-13 10:41:51 -05005032 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
Chris Masoneb14ab82011-02-10 12:35:00 -05005033
5034 /*
5035 * there is a race where release page may have
5036 * tried to find this extent buffer in the radix
5037 * but failed. It will tell the VM it is safe to
5038 * reclaim the, and it will clear the page private bit.
5039 * We must make sure to set the page private bit properly
5040 * after the extent buffer is in the radix tree so
5041 * it doesn't get lost
5042 */
Chris Mason727011e2010-08-06 13:21:20 -04005043 SetPageChecked(eb->pages[0]);
5044 for (i = 1; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005045 p = eb->pages[i];
Chris Mason727011e2010-08-06 13:21:20 -04005046 ClearPageChecked(p);
5047 unlock_page(p);
5048 }
5049 unlock_page(eb->pages[0]);
Chris Masond1310b22008-01-24 16:13:08 -05005050 return eb;
5051
Chris Mason6af118ce2008-07-22 11:18:07 -04005052free_eb:
Omar Sandoval5ca64f42015-02-24 02:47:05 -08005053 WARN_ON(!atomic_dec_and_test(&eb->refs));
Chris Mason727011e2010-08-06 13:21:20 -04005054 for (i = 0; i < num_pages; i++) {
5055 if (eb->pages[i])
5056 unlock_page(eb->pages[i]);
5057 }
Chris Masoneb14ab82011-02-10 12:35:00 -05005058
Miao Xie897ca6e92010-10-26 20:57:29 -04005059 btrfs_release_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04005060 return exists;
Chris Masond1310b22008-01-24 16:13:08 -05005061}
Chris Masond1310b22008-01-24 16:13:08 -05005062
Josef Bacik3083ee22012-03-09 16:01:49 -05005063static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
5064{
5065 struct extent_buffer *eb =
5066 container_of(head, struct extent_buffer, rcu_head);
5067
5068 __free_extent_buffer(eb);
5069}
5070
Josef Bacik3083ee22012-03-09 16:01:49 -05005071/* Expects to have eb->eb_lock already held */
David Sterbaf7a52a42013-04-26 14:56:29 +00005072static int release_extent_buffer(struct extent_buffer *eb)
Josef Bacik3083ee22012-03-09 16:01:49 -05005073{
5074 WARN_ON(atomic_read(&eb->refs) == 0);
5075 if (atomic_dec_and_test(&eb->refs)) {
Josef Bacik34b41ac2013-12-13 10:41:51 -05005076 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
Josef Bacikf28491e2013-12-16 13:24:27 -05005077 struct btrfs_fs_info *fs_info = eb->fs_info;
Josef Bacik3083ee22012-03-09 16:01:49 -05005078
Jan Schmidt815a51c2012-05-16 17:00:02 +02005079 spin_unlock(&eb->refs_lock);
Josef Bacik3083ee22012-03-09 16:01:49 -05005080
Josef Bacikf28491e2013-12-16 13:24:27 -05005081 spin_lock(&fs_info->buffer_lock);
5082 radix_tree_delete(&fs_info->buffer_radix,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005083 eb->start >> PAGE_SHIFT);
Josef Bacikf28491e2013-12-16 13:24:27 -05005084 spin_unlock(&fs_info->buffer_lock);
Josef Bacik34b41ac2013-12-13 10:41:51 -05005085 } else {
5086 spin_unlock(&eb->refs_lock);
Jan Schmidt815a51c2012-05-16 17:00:02 +02005087 }
Josef Bacik3083ee22012-03-09 16:01:49 -05005088
5089 /* Should be safe to release our pages at this point */
David Sterbaa50924e2014-07-31 00:51:36 +02005090 btrfs_release_extent_buffer_page(eb);
Josef Bacikbcb7e442015-03-16 17:38:02 -04005091#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5092 if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) {
5093 __free_extent_buffer(eb);
5094 return 1;
5095 }
5096#endif
Josef Bacik3083ee22012-03-09 16:01:49 -05005097 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
Josef Bacike64860a2012-07-20 16:05:36 -04005098 return 1;
Josef Bacik3083ee22012-03-09 16:01:49 -05005099 }
5100 spin_unlock(&eb->refs_lock);
Josef Bacike64860a2012-07-20 16:05:36 -04005101
5102 return 0;
Josef Bacik3083ee22012-03-09 16:01:49 -05005103}
5104
Chris Masond1310b22008-01-24 16:13:08 -05005105void free_extent_buffer(struct extent_buffer *eb)
5106{
Chris Mason242e18c2013-01-29 17:49:37 -05005107 int refs;
5108 int old;
Chris Masond1310b22008-01-24 16:13:08 -05005109 if (!eb)
5110 return;
5111
Chris Mason242e18c2013-01-29 17:49:37 -05005112 while (1) {
5113 refs = atomic_read(&eb->refs);
5114 if (refs <= 3)
5115 break;
5116 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
5117 if (old == refs)
5118 return;
5119 }
5120
Josef Bacik3083ee22012-03-09 16:01:49 -05005121 spin_lock(&eb->refs_lock);
5122 if (atomic_read(&eb->refs) == 2 &&
Jan Schmidt815a51c2012-05-16 17:00:02 +02005123 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
5124 atomic_dec(&eb->refs);
5125
5126 if (atomic_read(&eb->refs) == 2 &&
Josef Bacik3083ee22012-03-09 16:01:49 -05005127 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005128 !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05005129 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5130 atomic_dec(&eb->refs);
Chris Masond1310b22008-01-24 16:13:08 -05005131
Josef Bacik3083ee22012-03-09 16:01:49 -05005132 /*
5133 * I know this is terrible, but it's temporary until we stop tracking
5134 * the uptodate bits and such for the extent buffers.
5135 */
David Sterbaf7a52a42013-04-26 14:56:29 +00005136 release_extent_buffer(eb);
Chris Masond1310b22008-01-24 16:13:08 -05005137}
Chris Masond1310b22008-01-24 16:13:08 -05005138
Josef Bacik3083ee22012-03-09 16:01:49 -05005139void free_extent_buffer_stale(struct extent_buffer *eb)
5140{
5141 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05005142 return;
5143
Josef Bacik3083ee22012-03-09 16:01:49 -05005144 spin_lock(&eb->refs_lock);
5145 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
5146
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005147 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05005148 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5149 atomic_dec(&eb->refs);
David Sterbaf7a52a42013-04-26 14:56:29 +00005150 release_extent_buffer(eb);
Chris Masond1310b22008-01-24 16:13:08 -05005151}
5152
Chris Mason1d4284b2012-03-28 20:31:37 -04005153void clear_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05005154{
Chris Masond1310b22008-01-24 16:13:08 -05005155 unsigned long i;
5156 unsigned long num_pages;
5157 struct page *page;
5158
Chris Masond1310b22008-01-24 16:13:08 -05005159 num_pages = num_extent_pages(eb->start, eb->len);
5160
5161 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005162 page = eb->pages[i];
Chris Masonb9473432009-03-13 11:00:37 -04005163 if (!PageDirty(page))
Chris Masond2c3f4f2008-11-19 12:44:22 -05005164 continue;
5165
Chris Masona61e6f22008-07-22 11:18:08 -04005166 lock_page(page);
Chris Masoneb14ab82011-02-10 12:35:00 -05005167 WARN_ON(!PagePrivate(page));
5168
Chris Masond1310b22008-01-24 16:13:08 -05005169 clear_page_dirty_for_io(page);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04005170 spin_lock_irq(&page->mapping->tree_lock);
Chris Masond1310b22008-01-24 16:13:08 -05005171 if (!PageDirty(page)) {
5172 radix_tree_tag_clear(&page->mapping->page_tree,
5173 page_index(page),
5174 PAGECACHE_TAG_DIRTY);
5175 }
Sven Wegener0ee0fda2008-07-30 16:54:26 -04005176 spin_unlock_irq(&page->mapping->tree_lock);
Chris Masonbf0da8c2011-11-04 12:29:37 -04005177 ClearPageError(page);
Chris Masona61e6f22008-07-22 11:18:08 -04005178 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05005179 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005180 WARN_ON(atomic_read(&eb->refs) == 0);
Chris Masond1310b22008-01-24 16:13:08 -05005181}
Chris Masond1310b22008-01-24 16:13:08 -05005182
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005183int set_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05005184{
5185 unsigned long i;
5186 unsigned long num_pages;
Chris Masonb9473432009-03-13 11:00:37 -04005187 int was_dirty = 0;
Chris Masond1310b22008-01-24 16:13:08 -05005188
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005189 check_buffer_tree_ref(eb);
5190
Chris Masonb9473432009-03-13 11:00:37 -04005191 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005192
Chris Masond1310b22008-01-24 16:13:08 -05005193 num_pages = num_extent_pages(eb->start, eb->len);
Josef Bacik3083ee22012-03-09 16:01:49 -05005194 WARN_ON(atomic_read(&eb->refs) == 0);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005195 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
5196
Chris Masonb9473432009-03-13 11:00:37 -04005197 for (i = 0; i < num_pages; i++)
David Sterbafb85fc92014-07-31 01:03:53 +02005198 set_page_dirty(eb->pages[i]);
Chris Masonb9473432009-03-13 11:00:37 -04005199 return was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05005200}
Chris Masond1310b22008-01-24 16:13:08 -05005201
David Sterba69ba3922015-12-03 13:08:59 +01005202void clear_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Mason1259ab72008-05-12 13:39:03 -04005203{
5204 unsigned long i;
5205 struct page *page;
5206 unsigned long num_pages;
5207
Chris Masonb4ce94d2009-02-04 09:25:08 -05005208 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005209 num_pages = num_extent_pages(eb->start, eb->len);
Chris Mason1259ab72008-05-12 13:39:03 -04005210 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005211 page = eb->pages[i];
Chris Mason33958dc2008-07-30 10:29:12 -04005212 if (page)
5213 ClearPageUptodate(page);
Chris Mason1259ab72008-05-12 13:39:03 -04005214 }
Chris Mason1259ab72008-05-12 13:39:03 -04005215}
5216
David Sterba09c25a82015-12-03 13:08:59 +01005217void set_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05005218{
5219 unsigned long i;
5220 struct page *page;
5221 unsigned long num_pages;
5222
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005223 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05005224 num_pages = num_extent_pages(eb->start, eb->len);
Chris Masond1310b22008-01-24 16:13:08 -05005225 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005226 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005227 SetPageUptodate(page);
5228 }
Chris Masond1310b22008-01-24 16:13:08 -05005229}
Chris Masond1310b22008-01-24 16:13:08 -05005230
Chris Masond1310b22008-01-24 16:13:08 -05005231int read_extent_buffer_pages(struct extent_io_tree *tree,
David Sterba6af49db2017-06-23 04:09:57 +02005232 struct extent_buffer *eb, int wait, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05005233{
5234 unsigned long i;
Chris Masond1310b22008-01-24 16:13:08 -05005235 struct page *page;
5236 int err;
5237 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04005238 int locked_pages = 0;
5239 int all_uptodate = 1;
Chris Masond1310b22008-01-24 16:13:08 -05005240 unsigned long num_pages;
Chris Mason727011e2010-08-06 13:21:20 -04005241 unsigned long num_reads = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05005242 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04005243 unsigned long bio_flags = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05005244
Chris Masonb4ce94d2009-02-04 09:25:08 -05005245 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
Chris Masond1310b22008-01-24 16:13:08 -05005246 return 0;
5247
Chris Masond1310b22008-01-24 16:13:08 -05005248 num_pages = num_extent_pages(eb->start, eb->len);
Josef Bacik8436ea912016-09-02 15:40:03 -04005249 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005250 page = eb->pages[i];
Arne Jansenbb82ab82011-06-10 14:06:53 +02005251 if (wait == WAIT_NONE) {
David Woodhouse2db04962008-08-07 11:19:43 -04005252 if (!trylock_page(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04005253 goto unlock_exit;
Chris Masond1310b22008-01-24 16:13:08 -05005254 } else {
5255 lock_page(page);
5256 }
Chris Masonce9adaa2008-04-09 16:28:12 -04005257 locked_pages++;
Liu Bo2571e732016-08-03 12:33:01 -07005258 }
5259 /*
5260 * We need to firstly lock all pages to make sure that
5261 * the uptodate bit of our pages won't be affected by
5262 * clear_extent_buffer_uptodate().
5263 */
Josef Bacik8436ea912016-09-02 15:40:03 -04005264 for (i = 0; i < num_pages; i++) {
Liu Bo2571e732016-08-03 12:33:01 -07005265 page = eb->pages[i];
Chris Mason727011e2010-08-06 13:21:20 -04005266 if (!PageUptodate(page)) {
5267 num_reads++;
Chris Masonce9adaa2008-04-09 16:28:12 -04005268 all_uptodate = 0;
Chris Mason727011e2010-08-06 13:21:20 -04005269 }
Chris Masonce9adaa2008-04-09 16:28:12 -04005270 }
Liu Bo2571e732016-08-03 12:33:01 -07005271
Chris Masonce9adaa2008-04-09 16:28:12 -04005272 if (all_uptodate) {
Josef Bacik8436ea912016-09-02 15:40:03 -04005273 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masonce9adaa2008-04-09 16:28:12 -04005274 goto unlock_exit;
5275 }
5276
Filipe Manana656f30d2014-09-26 12:25:56 +01005277 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
Josef Bacik5cf1ab52012-04-16 09:42:26 -04005278 eb->read_mirror = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005279 atomic_set(&eb->io_pages, num_reads);
Josef Bacik8436ea912016-09-02 15:40:03 -04005280 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005281 page = eb->pages[i];
Liu Bobaf863b2016-07-11 10:39:07 -07005282
Chris Masonce9adaa2008-04-09 16:28:12 -04005283 if (!PageUptodate(page)) {
Liu Bobaf863b2016-07-11 10:39:07 -07005284 if (ret) {
5285 atomic_dec(&eb->io_pages);
5286 unlock_page(page);
5287 continue;
5288 }
5289
Chris Masonf1885912008-04-09 16:28:12 -04005290 ClearPageError(page);
Chris Masona86c12c2008-02-07 10:50:54 -05005291 err = __extent_read_full_page(tree, page,
David Sterba6af49db2017-06-23 04:09:57 +02005292 btree_get_extent, &bio,
Josef Bacikd4c7ca82013-04-19 19:49:09 -04005293 mirror_num, &bio_flags,
Mike Christie1f7ad752016-06-05 14:31:51 -05005294 REQ_META);
Liu Bobaf863b2016-07-11 10:39:07 -07005295 if (err) {
Chris Masond1310b22008-01-24 16:13:08 -05005296 ret = err;
Liu Bobaf863b2016-07-11 10:39:07 -07005297 /*
5298 * We use &bio in above __extent_read_full_page,
5299 * so we ensure that if it returns error, the
5300 * current page fails to add itself to bio and
5301 * it's been unlocked.
5302 *
5303 * We must dec io_pages by ourselves.
5304 */
5305 atomic_dec(&eb->io_pages);
5306 }
Chris Masond1310b22008-01-24 16:13:08 -05005307 } else {
5308 unlock_page(page);
5309 }
5310 }
5311
Jeff Mahoney355808c2011-10-03 23:23:14 -04005312 if (bio) {
Mike Christie1f7ad752016-06-05 14:31:51 -05005313 err = submit_one_bio(bio, mirror_num, bio_flags);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01005314 if (err)
5315 return err;
Jeff Mahoney355808c2011-10-03 23:23:14 -04005316 }
Chris Masona86c12c2008-02-07 10:50:54 -05005317
Arne Jansenbb82ab82011-06-10 14:06:53 +02005318 if (ret || wait != WAIT_COMPLETE)
Chris Masond1310b22008-01-24 16:13:08 -05005319 return ret;
Chris Masond3977122009-01-05 21:25:51 -05005320
Josef Bacik8436ea912016-09-02 15:40:03 -04005321 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005322 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005323 wait_on_page_locked(page);
Chris Masond3977122009-01-05 21:25:51 -05005324 if (!PageUptodate(page))
Chris Masond1310b22008-01-24 16:13:08 -05005325 ret = -EIO;
Chris Masond1310b22008-01-24 16:13:08 -05005326 }
Chris Masond3977122009-01-05 21:25:51 -05005327
Chris Masond1310b22008-01-24 16:13:08 -05005328 return ret;
Chris Masonce9adaa2008-04-09 16:28:12 -04005329
5330unlock_exit:
Chris Masond3977122009-01-05 21:25:51 -05005331 while (locked_pages > 0) {
Chris Masonce9adaa2008-04-09 16:28:12 -04005332 locked_pages--;
Josef Bacik8436ea912016-09-02 15:40:03 -04005333 page = eb->pages[locked_pages];
5334 unlock_page(page);
Chris Masonce9adaa2008-04-09 16:28:12 -04005335 }
5336 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05005337}
Chris Masond1310b22008-01-24 16:13:08 -05005338
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06005339void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
5340 unsigned long start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05005341{
5342 size_t cur;
5343 size_t offset;
5344 struct page *page;
5345 char *kaddr;
5346 char *dst = (char *)dstv;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005347 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5348 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005349
Liu Bof716abd2017-08-09 11:10:16 -06005350 if (start + len > eb->len) {
5351 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5352 eb->start, eb->len, start, len);
5353 memset(dst, 0, len);
5354 return;
5355 }
Chris Masond1310b22008-01-24 16:13:08 -05005356
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005357 offset = (start_offset + start) & (PAGE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005358
Chris Masond3977122009-01-05 21:25:51 -05005359 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005360 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005361
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005362 cur = min(len, (PAGE_SIZE - offset));
Chris Masona6591712011-07-19 12:04:14 -04005363 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005364 memcpy(dst, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005365
5366 dst += cur;
5367 len -= cur;
5368 offset = 0;
5369 i++;
5370 }
5371}
Chris Masond1310b22008-01-24 16:13:08 -05005372
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06005373int read_extent_buffer_to_user(const struct extent_buffer *eb,
5374 void __user *dstv,
5375 unsigned long start, unsigned long len)
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005376{
5377 size_t cur;
5378 size_t offset;
5379 struct page *page;
5380 char *kaddr;
5381 char __user *dst = (char __user *)dstv;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005382 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5383 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005384 int ret = 0;
5385
5386 WARN_ON(start > eb->len);
5387 WARN_ON(start + len > eb->start + eb->len);
5388
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005389 offset = (start_offset + start) & (PAGE_SIZE - 1);
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005390
5391 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005392 page = eb->pages[i];
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005393
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005394 cur = min(len, (PAGE_SIZE - offset));
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005395 kaddr = page_address(page);
5396 if (copy_to_user(dst, kaddr + offset, cur)) {
5397 ret = -EFAULT;
5398 break;
5399 }
5400
5401 dst += cur;
5402 len -= cur;
5403 offset = 0;
5404 i++;
5405 }
5406
5407 return ret;
5408}
5409
Liu Bo415b35a2016-06-17 19:16:21 -07005410/*
5411 * return 0 if the item is found within a page.
5412 * return 1 if the item spans two pages.
5413 * return -EINVAL otherwise.
5414 */
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06005415int map_private_extent_buffer(const struct extent_buffer *eb,
5416 unsigned long start, unsigned long min_len,
5417 char **map, unsigned long *map_start,
5418 unsigned long *map_len)
Chris Masond1310b22008-01-24 16:13:08 -05005419{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005420 size_t offset = start & (PAGE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005421 char *kaddr;
5422 struct page *p;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005423 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5424 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005425 unsigned long end_i = (start_offset + start + min_len - 1) >>
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005426 PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005427
Liu Bof716abd2017-08-09 11:10:16 -06005428 if (start + min_len > eb->len) {
5429 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5430 eb->start, eb->len, start, min_len);
5431 return -EINVAL;
5432 }
5433
Chris Masond1310b22008-01-24 16:13:08 -05005434 if (i != end_i)
Liu Bo415b35a2016-06-17 19:16:21 -07005435 return 1;
Chris Masond1310b22008-01-24 16:13:08 -05005436
5437 if (i == 0) {
5438 offset = start_offset;
5439 *map_start = 0;
5440 } else {
5441 offset = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005442 *map_start = ((u64)i << PAGE_SHIFT) - start_offset;
Chris Masond1310b22008-01-24 16:13:08 -05005443 }
Chris Masond3977122009-01-05 21:25:51 -05005444
David Sterbafb85fc92014-07-31 01:03:53 +02005445 p = eb->pages[i];
Chris Masona6591712011-07-19 12:04:14 -04005446 kaddr = page_address(p);
Chris Masond1310b22008-01-24 16:13:08 -05005447 *map = kaddr + offset;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005448 *map_len = PAGE_SIZE - offset;
Chris Masond1310b22008-01-24 16:13:08 -05005449 return 0;
5450}
Chris Masond1310b22008-01-24 16:13:08 -05005451
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06005452int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
5453 unsigned long start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05005454{
5455 size_t cur;
5456 size_t offset;
5457 struct page *page;
5458 char *kaddr;
5459 char *ptr = (char *)ptrv;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005460 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5461 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005462 int ret = 0;
5463
5464 WARN_ON(start > eb->len);
5465 WARN_ON(start + len > eb->start + eb->len);
5466
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005467 offset = (start_offset + start) & (PAGE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005468
Chris Masond3977122009-01-05 21:25:51 -05005469 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005470 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005471
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005472 cur = min(len, (PAGE_SIZE - offset));
Chris Masond1310b22008-01-24 16:13:08 -05005473
Chris Masona6591712011-07-19 12:04:14 -04005474 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005475 ret = memcmp(ptr, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005476 if (ret)
5477 break;
5478
5479 ptr += cur;
5480 len -= cur;
5481 offset = 0;
5482 i++;
5483 }
5484 return ret;
5485}
Chris Masond1310b22008-01-24 16:13:08 -05005486
David Sterbaf157bf72016-11-09 17:43:38 +01005487void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
5488 const void *srcv)
5489{
5490 char *kaddr;
5491
5492 WARN_ON(!PageUptodate(eb->pages[0]));
5493 kaddr = page_address(eb->pages[0]);
5494 memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
5495 BTRFS_FSID_SIZE);
5496}
5497
5498void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv)
5499{
5500 char *kaddr;
5501
5502 WARN_ON(!PageUptodate(eb->pages[0]));
5503 kaddr = page_address(eb->pages[0]);
5504 memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
5505 BTRFS_FSID_SIZE);
5506}
5507
Chris Masond1310b22008-01-24 16:13:08 -05005508void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5509 unsigned long start, unsigned long len)
5510{
5511 size_t cur;
5512 size_t offset;
5513 struct page *page;
5514 char *kaddr;
5515 char *src = (char *)srcv;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005516 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5517 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005518
5519 WARN_ON(start > eb->len);
5520 WARN_ON(start + len > eb->start + eb->len);
5521
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005522 offset = (start_offset + start) & (PAGE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005523
Chris Masond3977122009-01-05 21:25:51 -05005524 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005525 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005526 WARN_ON(!PageUptodate(page));
5527
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005528 cur = min(len, PAGE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04005529 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005530 memcpy(kaddr + offset, src, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005531
5532 src += cur;
5533 len -= cur;
5534 offset = 0;
5535 i++;
5536 }
5537}
Chris Masond1310b22008-01-24 16:13:08 -05005538
David Sterbab159fa22016-11-08 18:09:03 +01005539void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
5540 unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05005541{
5542 size_t cur;
5543 size_t offset;
5544 struct page *page;
5545 char *kaddr;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005546 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5547 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005548
5549 WARN_ON(start > eb->len);
5550 WARN_ON(start + len > eb->start + eb->len);
5551
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005552 offset = (start_offset + start) & (PAGE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005553
Chris Masond3977122009-01-05 21:25:51 -05005554 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005555 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005556 WARN_ON(!PageUptodate(page));
5557
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005558 cur = min(len, PAGE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04005559 kaddr = page_address(page);
David Sterbab159fa22016-11-08 18:09:03 +01005560 memset(kaddr + offset, 0, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005561
5562 len -= cur;
5563 offset = 0;
5564 i++;
5565 }
5566}
Chris Masond1310b22008-01-24 16:13:08 -05005567
David Sterba58e80122016-11-08 18:30:31 +01005568void copy_extent_buffer_full(struct extent_buffer *dst,
5569 struct extent_buffer *src)
5570{
5571 int i;
5572 unsigned num_pages;
5573
5574 ASSERT(dst->len == src->len);
5575
5576 num_pages = num_extent_pages(dst->start, dst->len);
5577 for (i = 0; i < num_pages; i++)
5578 copy_page(page_address(dst->pages[i]),
5579 page_address(src->pages[i]));
5580}
5581
Chris Masond1310b22008-01-24 16:13:08 -05005582void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5583 unsigned long dst_offset, unsigned long src_offset,
5584 unsigned long len)
5585{
5586 u64 dst_len = dst->len;
5587 size_t cur;
5588 size_t offset;
5589 struct page *page;
5590 char *kaddr;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005591 size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5592 unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005593
5594 WARN_ON(src->len != dst_len);
5595
5596 offset = (start_offset + dst_offset) &
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005597 (PAGE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005598
Chris Masond3977122009-01-05 21:25:51 -05005599 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005600 page = dst->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005601 WARN_ON(!PageUptodate(page));
5602
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005603 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
Chris Masond1310b22008-01-24 16:13:08 -05005604
Chris Masona6591712011-07-19 12:04:14 -04005605 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005606 read_extent_buffer(src, kaddr + offset, src_offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005607
5608 src_offset += cur;
5609 len -= cur;
5610 offset = 0;
5611 i++;
5612 }
5613}
Chris Masond1310b22008-01-24 16:13:08 -05005614
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005615void le_bitmap_set(u8 *map, unsigned int start, int len)
5616{
5617 u8 *p = map + BIT_BYTE(start);
5618 const unsigned int size = start + len;
5619 int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE);
5620 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start);
5621
5622 while (len - bits_to_set >= 0) {
5623 *p |= mask_to_set;
5624 len -= bits_to_set;
5625 bits_to_set = BITS_PER_BYTE;
Dan Carpenter9c894692016-10-12 11:33:21 +03005626 mask_to_set = ~0;
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005627 p++;
5628 }
5629 if (len) {
5630 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5631 *p |= mask_to_set;
5632 }
5633}
5634
5635void le_bitmap_clear(u8 *map, unsigned int start, int len)
5636{
5637 u8 *p = map + BIT_BYTE(start);
5638 const unsigned int size = start + len;
5639 int bits_to_clear = BITS_PER_BYTE - (start % BITS_PER_BYTE);
5640 u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(start);
5641
5642 while (len - bits_to_clear >= 0) {
5643 *p &= ~mask_to_clear;
5644 len -= bits_to_clear;
5645 bits_to_clear = BITS_PER_BYTE;
Dan Carpenter9c894692016-10-12 11:33:21 +03005646 mask_to_clear = ~0;
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005647 p++;
5648 }
5649 if (len) {
5650 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5651 *p &= ~mask_to_clear;
5652 }
5653}
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005654
5655/*
5656 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5657 * given bit number
5658 * @eb: the extent buffer
5659 * @start: offset of the bitmap item in the extent buffer
5660 * @nr: bit number
5661 * @page_index: return index of the page in the extent buffer that contains the
5662 * given bit number
5663 * @page_offset: return offset into the page given by page_index
5664 *
5665 * This helper hides the ugliness of finding the byte in an extent buffer which
5666 * contains a given bit.
5667 */
5668static inline void eb_bitmap_offset(struct extent_buffer *eb,
5669 unsigned long start, unsigned long nr,
5670 unsigned long *page_index,
5671 size_t *page_offset)
5672{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005673 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005674 size_t byte_offset = BIT_BYTE(nr);
5675 size_t offset;
5676
5677 /*
5678 * The byte we want is the offset of the extent buffer + the offset of
5679 * the bitmap item in the extent buffer + the offset of the byte in the
5680 * bitmap item.
5681 */
5682 offset = start_offset + start + byte_offset;
5683
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005684 *page_index = offset >> PAGE_SHIFT;
5685 *page_offset = offset & (PAGE_SIZE - 1);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005686}
5687
5688/**
5689 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
5690 * @eb: the extent buffer
5691 * @start: offset of the bitmap item in the extent buffer
5692 * @nr: bit number to test
5693 */
5694int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
5695 unsigned long nr)
5696{
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005697 u8 *kaddr;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005698 struct page *page;
5699 unsigned long i;
5700 size_t offset;
5701
5702 eb_bitmap_offset(eb, start, nr, &i, &offset);
5703 page = eb->pages[i];
5704 WARN_ON(!PageUptodate(page));
5705 kaddr = page_address(page);
5706 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
5707}
5708
5709/**
5710 * extent_buffer_bitmap_set - set an area of a bitmap
5711 * @eb: the extent buffer
5712 * @start: offset of the bitmap item in the extent buffer
5713 * @pos: bit number of the first bit
5714 * @len: number of bits to set
5715 */
5716void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5717 unsigned long pos, unsigned long len)
5718{
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005719 u8 *kaddr;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005720 struct page *page;
5721 unsigned long i;
5722 size_t offset;
5723 const unsigned int size = pos + len;
5724 int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005725 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005726
5727 eb_bitmap_offset(eb, start, pos, &i, &offset);
5728 page = eb->pages[i];
5729 WARN_ON(!PageUptodate(page));
5730 kaddr = page_address(page);
5731
5732 while (len >= bits_to_set) {
5733 kaddr[offset] |= mask_to_set;
5734 len -= bits_to_set;
5735 bits_to_set = BITS_PER_BYTE;
Dan Carpenter9c894692016-10-12 11:33:21 +03005736 mask_to_set = ~0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005737 if (++offset >= PAGE_SIZE && len > 0) {
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005738 offset = 0;
5739 page = eb->pages[++i];
5740 WARN_ON(!PageUptodate(page));
5741 kaddr = page_address(page);
5742 }
5743 }
5744 if (len) {
5745 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5746 kaddr[offset] |= mask_to_set;
5747 }
5748}
5749
5750
5751/**
5752 * extent_buffer_bitmap_clear - clear an area of a bitmap
5753 * @eb: the extent buffer
5754 * @start: offset of the bitmap item in the extent buffer
5755 * @pos: bit number of the first bit
5756 * @len: number of bits to clear
5757 */
5758void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5759 unsigned long pos, unsigned long len)
5760{
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005761 u8 *kaddr;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005762 struct page *page;
5763 unsigned long i;
5764 size_t offset;
5765 const unsigned int size = pos + len;
5766 int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005767 u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005768
5769 eb_bitmap_offset(eb, start, pos, &i, &offset);
5770 page = eb->pages[i];
5771 WARN_ON(!PageUptodate(page));
5772 kaddr = page_address(page);
5773
5774 while (len >= bits_to_clear) {
5775 kaddr[offset] &= ~mask_to_clear;
5776 len -= bits_to_clear;
5777 bits_to_clear = BITS_PER_BYTE;
Dan Carpenter9c894692016-10-12 11:33:21 +03005778 mask_to_clear = ~0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005779 if (++offset >= PAGE_SIZE && len > 0) {
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005780 offset = 0;
5781 page = eb->pages[++i];
5782 WARN_ON(!PageUptodate(page));
5783 kaddr = page_address(page);
5784 }
5785 }
5786 if (len) {
5787 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5788 kaddr[offset] &= ~mask_to_clear;
5789 }
5790}
5791
Sergei Trofimovich33872062011-04-11 21:52:52 +00005792static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5793{
5794 unsigned long distance = (src > dst) ? src - dst : dst - src;
5795 return distance < len;
5796}
5797
Chris Masond1310b22008-01-24 16:13:08 -05005798static void copy_pages(struct page *dst_page, struct page *src_page,
5799 unsigned long dst_off, unsigned long src_off,
5800 unsigned long len)
5801{
Chris Masona6591712011-07-19 12:04:14 -04005802 char *dst_kaddr = page_address(dst_page);
Chris Masond1310b22008-01-24 16:13:08 -05005803 char *src_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04005804 int must_memmove = 0;
Chris Masond1310b22008-01-24 16:13:08 -05005805
Sergei Trofimovich33872062011-04-11 21:52:52 +00005806 if (dst_page != src_page) {
Chris Masona6591712011-07-19 12:04:14 -04005807 src_kaddr = page_address(src_page);
Sergei Trofimovich33872062011-04-11 21:52:52 +00005808 } else {
Chris Masond1310b22008-01-24 16:13:08 -05005809 src_kaddr = dst_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04005810 if (areas_overlap(src_off, dst_off, len))
5811 must_memmove = 1;
Sergei Trofimovich33872062011-04-11 21:52:52 +00005812 }
Chris Masond1310b22008-01-24 16:13:08 -05005813
Chris Mason727011e2010-08-06 13:21:20 -04005814 if (must_memmove)
5815 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5816 else
5817 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
Chris Masond1310b22008-01-24 16:13:08 -05005818}
5819
5820void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5821 unsigned long src_offset, unsigned long len)
5822{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04005823 struct btrfs_fs_info *fs_info = dst->fs_info;
Chris Masond1310b22008-01-24 16:13:08 -05005824 size_t cur;
5825 size_t dst_off_in_page;
5826 size_t src_off_in_page;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005827 size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005828 unsigned long dst_i;
5829 unsigned long src_i;
5830
5831 if (src_offset + len > dst->len) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04005832 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04005833 "memmove bogus src_offset %lu move len %lu dst len %lu",
5834 src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05005835 BUG_ON(1);
5836 }
5837 if (dst_offset + len > dst->len) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04005838 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04005839 "memmove bogus dst_offset %lu move len %lu dst len %lu",
5840 dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05005841 BUG_ON(1);
5842 }
5843
Chris Masond3977122009-01-05 21:25:51 -05005844 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05005845 dst_off_in_page = (start_offset + dst_offset) &
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005846 (PAGE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005847 src_off_in_page = (start_offset + src_offset) &
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005848 (PAGE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005849
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005850 dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
5851 src_i = (start_offset + src_offset) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005852
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005853 cur = min(len, (unsigned long)(PAGE_SIZE -
Chris Masond1310b22008-01-24 16:13:08 -05005854 src_off_in_page));
5855 cur = min_t(unsigned long, cur,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005856 (unsigned long)(PAGE_SIZE - dst_off_in_page));
Chris Masond1310b22008-01-24 16:13:08 -05005857
David Sterbafb85fc92014-07-31 01:03:53 +02005858 copy_pages(dst->pages[dst_i], dst->pages[src_i],
Chris Masond1310b22008-01-24 16:13:08 -05005859 dst_off_in_page, src_off_in_page, cur);
5860
5861 src_offset += cur;
5862 dst_offset += cur;
5863 len -= cur;
5864 }
5865}
Chris Masond1310b22008-01-24 16:13:08 -05005866
5867void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5868 unsigned long src_offset, unsigned long len)
5869{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04005870 struct btrfs_fs_info *fs_info = dst->fs_info;
Chris Masond1310b22008-01-24 16:13:08 -05005871 size_t cur;
5872 size_t dst_off_in_page;
5873 size_t src_off_in_page;
5874 unsigned long dst_end = dst_offset + len - 1;
5875 unsigned long src_end = src_offset + len - 1;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005876 size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005877 unsigned long dst_i;
5878 unsigned long src_i;
5879
5880 if (src_offset + len > dst->len) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04005881 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04005882 "memmove bogus src_offset %lu move len %lu len %lu",
5883 src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05005884 BUG_ON(1);
5885 }
5886 if (dst_offset + len > dst->len) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04005887 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04005888 "memmove bogus dst_offset %lu move len %lu len %lu",
5889 dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05005890 BUG_ON(1);
5891 }
Chris Mason727011e2010-08-06 13:21:20 -04005892 if (dst_offset < src_offset) {
Chris Masond1310b22008-01-24 16:13:08 -05005893 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5894 return;
5895 }
Chris Masond3977122009-01-05 21:25:51 -05005896 while (len > 0) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005897 dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
5898 src_i = (start_offset + src_end) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005899
5900 dst_off_in_page = (start_offset + dst_end) &
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005901 (PAGE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005902 src_off_in_page = (start_offset + src_end) &
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005903 (PAGE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005904
5905 cur = min_t(unsigned long, len, src_off_in_page + 1);
5906 cur = min(cur, dst_off_in_page + 1);
David Sterbafb85fc92014-07-31 01:03:53 +02005907 copy_pages(dst->pages[dst_i], dst->pages[src_i],
Chris Masond1310b22008-01-24 16:13:08 -05005908 dst_off_in_page - cur + 1,
5909 src_off_in_page - cur + 1, cur);
5910
5911 dst_end -= cur;
5912 src_end -= cur;
5913 len -= cur;
5914 }
5915}
Chris Mason6af118ce2008-07-22 11:18:07 -04005916
David Sterbaf7a52a42013-04-26 14:56:29 +00005917int try_release_extent_buffer(struct page *page)
Miao Xie19fe0a82010-10-26 20:57:29 -04005918{
Chris Mason6af118ce2008-07-22 11:18:07 -04005919 struct extent_buffer *eb;
Miao Xie897ca6e92010-10-26 20:57:29 -04005920
Miao Xie19fe0a82010-10-26 20:57:29 -04005921 /*
Nicholas D Steeves01327612016-05-19 21:18:45 -04005922 * We need to make sure nobody is attaching this page to an eb right
Josef Bacik3083ee22012-03-09 16:01:49 -05005923 * now.
Miao Xie19fe0a82010-10-26 20:57:29 -04005924 */
Josef Bacik3083ee22012-03-09 16:01:49 -05005925 spin_lock(&page->mapping->private_lock);
5926 if (!PagePrivate(page)) {
5927 spin_unlock(&page->mapping->private_lock);
5928 return 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04005929 }
5930
Josef Bacik3083ee22012-03-09 16:01:49 -05005931 eb = (struct extent_buffer *)page->private;
5932 BUG_ON(!eb);
Miao Xie19fe0a82010-10-26 20:57:29 -04005933
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005934 /*
Josef Bacik3083ee22012-03-09 16:01:49 -05005935 * This is a little awful but should be ok, we need to make sure that
5936 * the eb doesn't disappear out from under us while we're looking at
5937 * this page.
5938 */
5939 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005940 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
Josef Bacik3083ee22012-03-09 16:01:49 -05005941 spin_unlock(&eb->refs_lock);
5942 spin_unlock(&page->mapping->private_lock);
5943 return 0;
5944 }
5945 spin_unlock(&page->mapping->private_lock);
5946
Josef Bacik3083ee22012-03-09 16:01:49 -05005947 /*
5948 * If tree ref isn't set then we know the ref on this eb is a real ref,
5949 * so just return, this page will likely be freed soon anyway.
5950 */
5951 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5952 spin_unlock(&eb->refs_lock);
5953 return 0;
5954 }
Josef Bacik3083ee22012-03-09 16:01:49 -05005955
David Sterbaf7a52a42013-04-26 14:56:29 +00005956 return release_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04005957}