blob: 932d2e0be8d7f3d4ae99e66fe1a5a1498494ca89 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
David Sterbac1d7c512018-04-03 19:23:33 +02002
Chris Masond1310b22008-01-24 16:13:08 -05003#include <linux/bitops.h>
4#include <linux/slab.h>
5#include <linux/bio.h>
6#include <linux/mm.h>
Chris Masond1310b22008-01-24 16:13:08 -05007#include <linux/pagemap.h>
8#include <linux/page-flags.h>
Chris Masond1310b22008-01-24 16:13:08 -05009#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
Chris Masond1310b22008-01-24 16:13:08 -050012#include <linux/writeback.h>
13#include <linux/pagevec.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070014#include <linux/prefetch.h>
Dan Magenheimer90a887c2011-05-26 10:01:56 -060015#include <linux/cleancache.h>
Chris Masond1310b22008-01-24 16:13:08 -050016#include "extent_io.h"
17#include "extent_map.h"
David Woodhouse902b22f2008-08-20 08:51:49 -040018#include "ctree.h"
19#include "btrfs_inode.h"
Jan Schmidt4a54c8c2011-07-22 15:41:52 +020020#include "volumes.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010021#include "check-integrity.h"
Josef Bacik0b32f4b2012-03-13 09:38:00 -040022#include "locking.h"
Josef Bacik606686e2012-06-04 14:03:51 -040023#include "rcu-string.h"
Liu Bofe09e162013-09-22 12:54:23 +080024#include "backref.h"
David Sterba6af49db2017-06-23 04:09:57 +020025#include "disk-io.h"
Chris Masond1310b22008-01-24 16:13:08 -050026
Chris Masond1310b22008-01-24 16:13:08 -050027static struct kmem_cache *extent_state_cache;
28static struct kmem_cache *extent_buffer_cache;
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -040029static struct bio_set btrfs_bioset;
Chris Masond1310b22008-01-24 16:13:08 -050030
Filipe Manana27a35072014-07-06 20:09:59 +010031static inline bool extent_state_in_tree(const struct extent_state *state)
32{
33 return !RB_EMPTY_NODE(&state->rb_node);
34}
35
Eric Sandeen6d49ba12013-04-22 16:12:31 +000036#ifdef CONFIG_BTRFS_DEBUG
Chris Masond1310b22008-01-24 16:13:08 -050037static LIST_HEAD(buffers);
38static LIST_HEAD(states);
Chris Mason4bef0842008-09-08 11:18:08 -040039
Chris Masond3977122009-01-05 21:25:51 -050040static DEFINE_SPINLOCK(leak_lock);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000041
42static inline
43void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
44{
45 unsigned long flags;
46
47 spin_lock_irqsave(&leak_lock, flags);
48 list_add(new, head);
49 spin_unlock_irqrestore(&leak_lock, flags);
50}
51
52static inline
53void btrfs_leak_debug_del(struct list_head *entry)
54{
55 unsigned long flags;
56
57 spin_lock_irqsave(&leak_lock, flags);
58 list_del(entry);
59 spin_unlock_irqrestore(&leak_lock, flags);
60}
61
62static inline
63void btrfs_leak_debug_check(void)
64{
65 struct extent_state *state;
66 struct extent_buffer *eb;
67
68 while (!list_empty(&states)) {
69 state = list_entry(states.next, struct extent_state, leak_list);
David Sterba9ee49a042015-01-14 19:52:13 +010070 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
Filipe Manana27a35072014-07-06 20:09:59 +010071 state->start, state->end, state->state,
72 extent_state_in_tree(state),
Elena Reshetovab7ac31b2017-03-03 10:55:19 +020073 refcount_read(&state->refs));
Eric Sandeen6d49ba12013-04-22 16:12:31 +000074 list_del(&state->leak_list);
75 kmem_cache_free(extent_state_cache, state);
76 }
77
78 while (!list_empty(&buffers)) {
79 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
Liu Boaf2679e2018-01-25 11:02:48 -070080 pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n",
81 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000082 list_del(&eb->leak_list);
83 kmem_cache_free(extent_buffer_cache, eb);
84 }
85}
David Sterba8d599ae2013-04-30 15:22:23 +000086
Josef Bacika5dee372013-12-13 10:02:44 -050087#define btrfs_debug_check_extent_io_range(tree, start, end) \
88 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
David Sterba8d599ae2013-04-30 15:22:23 +000089static inline void __btrfs_debug_check_extent_io_range(const char *caller,
Josef Bacika5dee372013-12-13 10:02:44 -050090 struct extent_io_tree *tree, u64 start, u64 end)
David Sterba8d599ae2013-04-30 15:22:23 +000091{
Nikolay Borisov65a680f2018-11-01 14:09:49 +020092 struct inode *inode = tree->private_data;
93 u64 isize;
94
95 if (!inode || !is_data_inode(inode))
96 return;
97
98 isize = i_size_read(inode);
99 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
100 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
101 "%s: ino %llu isize %llu odd range [%llu,%llu]",
102 caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
103 }
David Sterba8d599ae2013-04-30 15:22:23 +0000104}
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000105#else
106#define btrfs_leak_debug_add(new, head) do {} while (0)
107#define btrfs_leak_debug_del(entry) do {} while (0)
108#define btrfs_leak_debug_check() do {} while (0)
David Sterba8d599ae2013-04-30 15:22:23 +0000109#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
Chris Mason4bef0842008-09-08 11:18:08 -0400110#endif
Chris Masond1310b22008-01-24 16:13:08 -0500111
Chris Masond1310b22008-01-24 16:13:08 -0500112struct tree_entry {
113 u64 start;
114 u64 end;
Chris Masond1310b22008-01-24 16:13:08 -0500115 struct rb_node rb_node;
116};
117
118struct extent_page_data {
119 struct bio *bio;
120 struct extent_io_tree *tree;
Chris Mason771ed682008-11-06 22:02:51 -0500121 /* tells writepage not to lock the state bits for this range
122 * it still does the unlocking
123 */
Chris Masonffbd5172009-04-20 15:50:09 -0400124 unsigned int extent_locked:1;
125
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600126 /* tells the submit_bio code to use REQ_SYNC */
Chris Masonffbd5172009-04-20 15:50:09 -0400127 unsigned int sync_io:1;
Chris Masond1310b22008-01-24 16:13:08 -0500128};
129
David Sterba57599c72018-03-01 17:56:34 +0100130static int add_extent_changeset(struct extent_state *state, unsigned bits,
Qu Wenruod38ed272015-10-12 14:53:37 +0800131 struct extent_changeset *changeset,
132 int set)
133{
134 int ret;
135
136 if (!changeset)
David Sterba57599c72018-03-01 17:56:34 +0100137 return 0;
Qu Wenruod38ed272015-10-12 14:53:37 +0800138 if (set && (state->state & bits) == bits)
David Sterba57599c72018-03-01 17:56:34 +0100139 return 0;
Qu Wenruofefdc552015-10-12 15:35:38 +0800140 if (!set && (state->state & bits) == 0)
David Sterba57599c72018-03-01 17:56:34 +0100141 return 0;
Qu Wenruod38ed272015-10-12 14:53:37 +0800142 changeset->bytes_changed += state->end - state->start + 1;
David Sterba53d32352017-02-13 13:42:29 +0100143 ret = ulist_add(&changeset->range_changed, state->start, state->end,
Qu Wenruod38ed272015-10-12 14:53:37 +0800144 GFP_ATOMIC);
David Sterba57599c72018-03-01 17:56:34 +0100145 return ret;
Qu Wenruod38ed272015-10-12 14:53:37 +0800146}
147
Qu Wenruobb58eb92019-01-25 13:09:15 +0800148static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
149 unsigned long bio_flags)
150{
151 blk_status_t ret = 0;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800152 struct extent_io_tree *tree = bio->bi_private;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800153
154 bio->bi_private = NULL;
155
156 if (tree->ops)
157 ret = tree->ops->submit_bio_hook(tree->private_data, bio,
Nikolay Borisov50489a52019-04-10 19:46:04 +0300158 mirror_num, bio_flags);
Qu Wenruobb58eb92019-01-25 13:09:15 +0800159 else
160 btrfsic_submit_bio(bio);
161
162 return blk_status_to_errno(ret);
163}
164
Qu Wenruo30659762019-03-20 14:27:42 +0800165/* Cleanup unsubmitted bios */
166static void end_write_bio(struct extent_page_data *epd, int ret)
167{
168 if (epd->bio) {
169 epd->bio->bi_status = errno_to_blk_status(ret);
170 bio_endio(epd->bio);
171 epd->bio = NULL;
172 }
173}
174
Qu Wenruof4340622019-03-20 14:27:41 +0800175/*
176 * Submit bio from extent page data via submit_one_bio
177 *
178 * Return 0 if everything is OK.
179 * Return <0 for error.
180 */
181static int __must_check flush_write_bio(struct extent_page_data *epd)
Qu Wenruobb58eb92019-01-25 13:09:15 +0800182{
Qu Wenruof4340622019-03-20 14:27:41 +0800183 int ret = 0;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800184
Qu Wenruof4340622019-03-20 14:27:41 +0800185 if (epd->bio) {
Qu Wenruobb58eb92019-01-25 13:09:15 +0800186 ret = submit_one_bio(epd->bio, 0, 0);
Qu Wenruof4340622019-03-20 14:27:41 +0800187 /*
188 * Clean up of epd->bio is handled by its endio function.
189 * And endio is either triggered by successful bio execution
190 * or the error handler of submit bio hook.
191 * So at this point, no matter what happened, we don't need
192 * to clean up epd->bio.
193 */
Qu Wenruobb58eb92019-01-25 13:09:15 +0800194 epd->bio = NULL;
195 }
Qu Wenruof4340622019-03-20 14:27:41 +0800196 return ret;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800197}
David Sterbae2932ee2017-06-23 04:16:17 +0200198
Chris Masond1310b22008-01-24 16:13:08 -0500199int __init extent_io_init(void)
200{
David Sterba837e1972012-09-07 03:00:48 -0600201 extent_state_cache = kmem_cache_create("btrfs_extent_state",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +0200202 sizeof(struct extent_state), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300203 SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500204 if (!extent_state_cache)
205 return -ENOMEM;
206
David Sterba837e1972012-09-07 03:00:48 -0600207 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +0200208 sizeof(struct extent_buffer), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300209 SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500210 if (!extent_buffer_cache)
211 goto free_state_cache;
Chris Mason9be33952013-05-17 18:30:14 -0400212
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400213 if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
214 offsetof(struct btrfs_io_bio, bio),
215 BIOSET_NEED_BVECS))
Chris Mason9be33952013-05-17 18:30:14 -0400216 goto free_buffer_cache;
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700217
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400218 if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700219 goto free_bioset;
220
Chris Masond1310b22008-01-24 16:13:08 -0500221 return 0;
222
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700223free_bioset:
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400224 bioset_exit(&btrfs_bioset);
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700225
Chris Mason9be33952013-05-17 18:30:14 -0400226free_buffer_cache:
227 kmem_cache_destroy(extent_buffer_cache);
228 extent_buffer_cache = NULL;
229
Chris Masond1310b22008-01-24 16:13:08 -0500230free_state_cache:
231 kmem_cache_destroy(extent_state_cache);
Chris Mason9be33952013-05-17 18:30:14 -0400232 extent_state_cache = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500233 return -ENOMEM;
234}
235
David Sterbae67c7182018-02-19 17:24:18 +0100236void __cold extent_io_exit(void)
Chris Masond1310b22008-01-24 16:13:08 -0500237{
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000238 btrfs_leak_debug_check();
Kirill A. Shutemov8c0a8532012-09-26 11:33:07 +1000239
240 /*
241 * Make sure all delayed rcu free are flushed before we
242 * destroy caches.
243 */
244 rcu_barrier();
Kinglong Mee5598e902016-01-29 21:36:35 +0800245 kmem_cache_destroy(extent_state_cache);
246 kmem_cache_destroy(extent_buffer_cache);
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400247 bioset_exit(&btrfs_bioset);
Chris Masond1310b22008-01-24 16:13:08 -0500248}
249
Qu Wenruoc258d6e2019-03-01 10:47:58 +0800250void extent_io_tree_init(struct btrfs_fs_info *fs_info,
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800251 struct extent_io_tree *tree, unsigned int owner,
252 void *private_data)
Chris Masond1310b22008-01-24 16:13:08 -0500253{
Qu Wenruoc258d6e2019-03-01 10:47:58 +0800254 tree->fs_info = fs_info;
Eric Paris6bef4d32010-02-23 19:43:04 +0000255 tree->state = RB_ROOT;
Chris Masond1310b22008-01-24 16:13:08 -0500256 tree->ops = NULL;
257 tree->dirty_bytes = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500258 spin_lock_init(&tree->lock);
Josef Bacikc6100a42017-05-05 11:57:13 -0400259 tree->private_data = private_data;
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800260 tree->owner = owner;
Chris Masond1310b22008-01-24 16:13:08 -0500261}
Chris Masond1310b22008-01-24 16:13:08 -0500262
Nikolay Borisov41e7acd2019-03-25 14:31:24 +0200263void extent_io_tree_release(struct extent_io_tree *tree)
264{
265 spin_lock(&tree->lock);
266 /*
267 * Do a single barrier for the waitqueue_active check here, the state
268 * of the waitqueue should not change once extent_io_tree_release is
269 * called.
270 */
271 smp_mb();
272 while (!RB_EMPTY_ROOT(&tree->state)) {
273 struct rb_node *node;
274 struct extent_state *state;
275
276 node = rb_first(&tree->state);
277 state = rb_entry(node, struct extent_state, rb_node);
278 rb_erase(&state->rb_node, &tree->state);
279 RB_CLEAR_NODE(&state->rb_node);
280 /*
281 * btree io trees aren't supposed to have tasks waiting for
282 * changes in the flags of extent states ever.
283 */
284 ASSERT(!waitqueue_active(&state->wq));
285 free_extent_state(state);
286
287 cond_resched_lock(&tree->lock);
288 }
289 spin_unlock(&tree->lock);
290}
291
Christoph Hellwigb2950862008-12-02 09:54:17 -0500292static struct extent_state *alloc_extent_state(gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500293{
294 struct extent_state *state;
Chris Masond1310b22008-01-24 16:13:08 -0500295
Michal Hocko3ba7ab22017-01-09 15:39:02 +0100296 /*
297 * The given mask might be not appropriate for the slab allocator,
298 * drop the unsupported bits
299 */
300 mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
Chris Masond1310b22008-01-24 16:13:08 -0500301 state = kmem_cache_alloc(extent_state_cache, mask);
Peter2b114d12008-04-01 11:21:40 -0400302 if (!state)
Chris Masond1310b22008-01-24 16:13:08 -0500303 return state;
304 state->state = 0;
David Sterba47dc1962016-02-11 13:24:13 +0100305 state->failrec = NULL;
Filipe Manana27a35072014-07-06 20:09:59 +0100306 RB_CLEAR_NODE(&state->rb_node);
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000307 btrfs_leak_debug_add(&state->leak_list, &states);
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200308 refcount_set(&state->refs, 1);
Chris Masond1310b22008-01-24 16:13:08 -0500309 init_waitqueue_head(&state->wq);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100310 trace_alloc_extent_state(state, mask, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500311 return state;
312}
Chris Masond1310b22008-01-24 16:13:08 -0500313
Chris Mason4845e442010-05-25 20:56:50 -0400314void free_extent_state(struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500315{
Chris Masond1310b22008-01-24 16:13:08 -0500316 if (!state)
317 return;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200318 if (refcount_dec_and_test(&state->refs)) {
Filipe Manana27a35072014-07-06 20:09:59 +0100319 WARN_ON(extent_state_in_tree(state));
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000320 btrfs_leak_debug_del(&state->leak_list);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100321 trace_free_extent_state(state, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500322 kmem_cache_free(extent_state_cache, state);
323 }
324}
Chris Masond1310b22008-01-24 16:13:08 -0500325
Filipe Mananaf2071b22014-02-12 15:05:53 +0000326static struct rb_node *tree_insert(struct rb_root *root,
327 struct rb_node *search_start,
328 u64 offset,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000329 struct rb_node *node,
330 struct rb_node ***p_in,
331 struct rb_node **parent_in)
Chris Masond1310b22008-01-24 16:13:08 -0500332{
Filipe Mananaf2071b22014-02-12 15:05:53 +0000333 struct rb_node **p;
Chris Masond3977122009-01-05 21:25:51 -0500334 struct rb_node *parent = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500335 struct tree_entry *entry;
336
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000337 if (p_in && parent_in) {
338 p = *p_in;
339 parent = *parent_in;
340 goto do_insert;
341 }
342
Filipe Mananaf2071b22014-02-12 15:05:53 +0000343 p = search_start ? &search_start : &root->rb_node;
Chris Masond3977122009-01-05 21:25:51 -0500344 while (*p) {
Chris Masond1310b22008-01-24 16:13:08 -0500345 parent = *p;
346 entry = rb_entry(parent, struct tree_entry, rb_node);
347
348 if (offset < entry->start)
349 p = &(*p)->rb_left;
350 else if (offset > entry->end)
351 p = &(*p)->rb_right;
352 else
353 return parent;
354 }
355
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000356do_insert:
Chris Masond1310b22008-01-24 16:13:08 -0500357 rb_link_node(node, parent, p);
358 rb_insert_color(node, root);
359 return NULL;
360}
361
Nikolay Borisov8666e632019-06-05 14:50:04 +0300362/**
363 * __etree_search - searche @tree for an entry that contains @offset. Such
364 * entry would have entry->start <= offset && entry->end >= offset.
365 *
366 * @tree - the tree to search
367 * @offset - offset that should fall within an entry in @tree
368 * @next_ret - pointer to the first entry whose range ends after @offset
369 * @prev - pointer to the first entry whose range begins before @offset
370 * @p_ret - pointer where new node should be anchored (used when inserting an
371 * entry in the tree)
372 * @parent_ret - points to entry which would have been the parent of the entry,
373 * containing @offset
374 *
375 * This function returns a pointer to the entry that contains @offset byte
376 * address. If no such entry exists, then NULL is returned and the other
377 * pointer arguments to the function are filled, otherwise the found entry is
378 * returned and other pointers are left untouched.
379 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500380static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000381 struct rb_node **next_ret,
Nikolay Borisov352646c2019-01-30 16:51:00 +0200382 struct rb_node **prev_ret,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000383 struct rb_node ***p_ret,
384 struct rb_node **parent_ret)
Chris Masond1310b22008-01-24 16:13:08 -0500385{
Chris Mason80ea96b2008-02-01 14:51:59 -0500386 struct rb_root *root = &tree->state;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000387 struct rb_node **n = &root->rb_node;
Chris Masond1310b22008-01-24 16:13:08 -0500388 struct rb_node *prev = NULL;
389 struct rb_node *orig_prev = NULL;
390 struct tree_entry *entry;
391 struct tree_entry *prev_entry = NULL;
392
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000393 while (*n) {
394 prev = *n;
395 entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500396 prev_entry = entry;
397
398 if (offset < entry->start)
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000399 n = &(*n)->rb_left;
Chris Masond1310b22008-01-24 16:13:08 -0500400 else if (offset > entry->end)
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000401 n = &(*n)->rb_right;
Chris Masond3977122009-01-05 21:25:51 -0500402 else
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000403 return *n;
Chris Masond1310b22008-01-24 16:13:08 -0500404 }
405
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000406 if (p_ret)
407 *p_ret = n;
408 if (parent_ret)
409 *parent_ret = prev;
410
Nikolay Borisov352646c2019-01-30 16:51:00 +0200411 if (next_ret) {
Chris Masond1310b22008-01-24 16:13:08 -0500412 orig_prev = prev;
Chris Masond3977122009-01-05 21:25:51 -0500413 while (prev && offset > prev_entry->end) {
Chris Masond1310b22008-01-24 16:13:08 -0500414 prev = rb_next(prev);
415 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
416 }
Nikolay Borisov352646c2019-01-30 16:51:00 +0200417 *next_ret = prev;
Chris Masond1310b22008-01-24 16:13:08 -0500418 prev = orig_prev;
419 }
420
Nikolay Borisov352646c2019-01-30 16:51:00 +0200421 if (prev_ret) {
Chris Masond1310b22008-01-24 16:13:08 -0500422 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500423 while (prev && offset < prev_entry->start) {
Chris Masond1310b22008-01-24 16:13:08 -0500424 prev = rb_prev(prev);
425 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
426 }
Nikolay Borisov352646c2019-01-30 16:51:00 +0200427 *prev_ret = prev;
Chris Masond1310b22008-01-24 16:13:08 -0500428 }
429 return NULL;
430}
431
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000432static inline struct rb_node *
433tree_search_for_insert(struct extent_io_tree *tree,
434 u64 offset,
435 struct rb_node ***p_ret,
436 struct rb_node **parent_ret)
Chris Masond1310b22008-01-24 16:13:08 -0500437{
Nikolay Borisov352646c2019-01-30 16:51:00 +0200438 struct rb_node *next= NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500439 struct rb_node *ret;
Chris Mason70dec802008-01-29 09:59:12 -0500440
Nikolay Borisov352646c2019-01-30 16:51:00 +0200441 ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
Chris Masond3977122009-01-05 21:25:51 -0500442 if (!ret)
Nikolay Borisov352646c2019-01-30 16:51:00 +0200443 return next;
Chris Masond1310b22008-01-24 16:13:08 -0500444 return ret;
445}
446
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000447static inline struct rb_node *tree_search(struct extent_io_tree *tree,
448 u64 offset)
449{
450 return tree_search_for_insert(tree, offset, NULL, NULL);
451}
452
Chris Masond1310b22008-01-24 16:13:08 -0500453/*
454 * utility function to look for merge candidates inside a given range.
455 * Any extents with matching state are merged together into a single
456 * extent in the tree. Extents with EXTENT_IO in their state field
457 * are not merged because the end_io handlers need to be able to do
458 * operations on them without sleeping (or doing allocations/splits).
459 *
460 * This should be called with the tree lock held.
461 */
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000462static void merge_state(struct extent_io_tree *tree,
463 struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500464{
465 struct extent_state *other;
466 struct rb_node *other_node;
467
Nikolay Borisov88826792019-03-14 15:28:31 +0200468 if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000469 return;
Chris Masond1310b22008-01-24 16:13:08 -0500470
471 other_node = rb_prev(&state->rb_node);
472 if (other_node) {
473 other = rb_entry(other_node, struct extent_state, rb_node);
474 if (other->end == state->start - 1 &&
475 other->state == state->state) {
Nikolay Borisov5c848192018-11-01 14:09:52 +0200476 if (tree->private_data &&
477 is_data_inode(tree->private_data))
478 btrfs_merge_delalloc_extent(tree->private_data,
479 state, other);
Chris Masond1310b22008-01-24 16:13:08 -0500480 state->start = other->start;
Chris Masond1310b22008-01-24 16:13:08 -0500481 rb_erase(&other->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100482 RB_CLEAR_NODE(&other->rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500483 free_extent_state(other);
484 }
485 }
486 other_node = rb_next(&state->rb_node);
487 if (other_node) {
488 other = rb_entry(other_node, struct extent_state, rb_node);
489 if (other->start == state->end + 1 &&
490 other->state == state->state) {
Nikolay Borisov5c848192018-11-01 14:09:52 +0200491 if (tree->private_data &&
492 is_data_inode(tree->private_data))
493 btrfs_merge_delalloc_extent(tree->private_data,
494 state, other);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400495 state->end = other->end;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400496 rb_erase(&other->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100497 RB_CLEAR_NODE(&other->rb_node);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400498 free_extent_state(other);
Chris Masond1310b22008-01-24 16:13:08 -0500499 }
500 }
Chris Masond1310b22008-01-24 16:13:08 -0500501}
502
Xiao Guangrong3150b692011-07-14 03:19:08 +0000503static void set_state_bits(struct extent_io_tree *tree,
Qu Wenruod38ed272015-10-12 14:53:37 +0800504 struct extent_state *state, unsigned *bits,
505 struct extent_changeset *changeset);
Xiao Guangrong3150b692011-07-14 03:19:08 +0000506
Chris Masond1310b22008-01-24 16:13:08 -0500507/*
508 * insert an extent_state struct into the tree. 'bits' are set on the
509 * struct before it is inserted.
510 *
511 * This may return -EEXIST if the extent is already there, in which case the
512 * state struct is freed.
513 *
514 * The tree lock is not taken internally. This is a utility function and
515 * probably isn't what you want to call (see set/clear_extent_bit).
516 */
517static int insert_state(struct extent_io_tree *tree,
518 struct extent_state *state, u64 start, u64 end,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000519 struct rb_node ***p,
520 struct rb_node **parent,
Qu Wenruod38ed272015-10-12 14:53:37 +0800521 unsigned *bits, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500522{
523 struct rb_node *node;
524
David Sterba27922372019-06-18 20:00:05 +0200525 if (end < start) {
526 btrfs_err(tree->fs_info,
527 "insert state: end < start %llu %llu", end, start);
528 WARN_ON(1);
529 }
Chris Masond1310b22008-01-24 16:13:08 -0500530 state->start = start;
531 state->end = end;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400532
Qu Wenruod38ed272015-10-12 14:53:37 +0800533 set_state_bits(tree, state, bits, changeset);
Xiao Guangrong3150b692011-07-14 03:19:08 +0000534
Filipe Mananaf2071b22014-02-12 15:05:53 +0000535 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
Chris Masond1310b22008-01-24 16:13:08 -0500536 if (node) {
537 struct extent_state *found;
538 found = rb_entry(node, struct extent_state, rb_node);
David Sterba27922372019-06-18 20:00:05 +0200539 btrfs_err(tree->fs_info,
540 "found node %llu %llu on insert of %llu %llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200541 found->start, found->end, start, end);
Chris Masond1310b22008-01-24 16:13:08 -0500542 return -EEXIST;
543 }
544 merge_state(tree, state);
545 return 0;
546}
547
548/*
549 * split a given extent state struct in two, inserting the preallocated
550 * struct 'prealloc' as the newly created second half. 'split' indicates an
551 * offset inside 'orig' where it should be split.
552 *
553 * Before calling,
554 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
555 * are two extent state structs in the tree:
556 * prealloc: [orig->start, split - 1]
557 * orig: [ split, orig->end ]
558 *
559 * The tree locks are not taken by this function. They need to be held
560 * by the caller.
561 */
562static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
563 struct extent_state *prealloc, u64 split)
564{
565 struct rb_node *node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400566
Nikolay Borisovabbb55f2018-11-01 14:09:53 +0200567 if (tree->private_data && is_data_inode(tree->private_data))
568 btrfs_split_delalloc_extent(tree->private_data, orig, split);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400569
Chris Masond1310b22008-01-24 16:13:08 -0500570 prealloc->start = orig->start;
571 prealloc->end = split - 1;
572 prealloc->state = orig->state;
573 orig->start = split;
574
Filipe Mananaf2071b22014-02-12 15:05:53 +0000575 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
576 &prealloc->rb_node, NULL, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500577 if (node) {
Chris Masond1310b22008-01-24 16:13:08 -0500578 free_extent_state(prealloc);
579 return -EEXIST;
580 }
581 return 0;
582}
583
Li Zefancdc6a392012-03-12 16:39:48 +0800584static struct extent_state *next_state(struct extent_state *state)
585{
586 struct rb_node *next = rb_next(&state->rb_node);
587 if (next)
588 return rb_entry(next, struct extent_state, rb_node);
589 else
590 return NULL;
591}
592
Chris Masond1310b22008-01-24 16:13:08 -0500593/*
594 * utility function to clear some bits in an extent state struct.
Andrea Gelmini52042d82018-11-28 12:05:13 +0100595 * it will optionally wake up anyone waiting on this state (wake == 1).
Chris Masond1310b22008-01-24 16:13:08 -0500596 *
597 * If no bits are set on the state struct after clearing things, the
598 * struct is freed and removed from the tree
599 */
Li Zefancdc6a392012-03-12 16:39:48 +0800600static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
601 struct extent_state *state,
Qu Wenruofefdc552015-10-12 15:35:38 +0800602 unsigned *bits, int wake,
603 struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500604{
Li Zefancdc6a392012-03-12 16:39:48 +0800605 struct extent_state *next;
David Sterba9ee49a042015-01-14 19:52:13 +0100606 unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
David Sterba57599c72018-03-01 17:56:34 +0100607 int ret;
Chris Masond1310b22008-01-24 16:13:08 -0500608
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400609 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500610 u64 range = state->end - state->start + 1;
611 WARN_ON(range > tree->dirty_bytes);
612 tree->dirty_bytes -= range;
613 }
Nikolay Borisova36bb5f2018-11-01 14:09:51 +0200614
615 if (tree->private_data && is_data_inode(tree->private_data))
616 btrfs_clear_delalloc_extent(tree->private_data, state, bits);
617
David Sterba57599c72018-03-01 17:56:34 +0100618 ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
619 BUG_ON(ret < 0);
Josef Bacik32c00af2009-10-08 13:34:05 -0400620 state->state &= ~bits_to_clear;
Chris Masond1310b22008-01-24 16:13:08 -0500621 if (wake)
622 wake_up(&state->wq);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400623 if (state->state == 0) {
Li Zefancdc6a392012-03-12 16:39:48 +0800624 next = next_state(state);
Filipe Manana27a35072014-07-06 20:09:59 +0100625 if (extent_state_in_tree(state)) {
Chris Masond1310b22008-01-24 16:13:08 -0500626 rb_erase(&state->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100627 RB_CLEAR_NODE(&state->rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500628 free_extent_state(state);
629 } else {
630 WARN_ON(1);
631 }
632 } else {
633 merge_state(tree, state);
Li Zefancdc6a392012-03-12 16:39:48 +0800634 next = next_state(state);
Chris Masond1310b22008-01-24 16:13:08 -0500635 }
Li Zefancdc6a392012-03-12 16:39:48 +0800636 return next;
Chris Masond1310b22008-01-24 16:13:08 -0500637}
638
Xiao Guangrong82337672011-04-20 06:44:57 +0000639static struct extent_state *
640alloc_extent_state_atomic(struct extent_state *prealloc)
641{
642 if (!prealloc)
643 prealloc = alloc_extent_state(GFP_ATOMIC);
644
645 return prealloc;
646}
647
Eric Sandeen48a3b632013-04-25 20:41:01 +0000648static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400649{
David Sterba05912a32018-07-18 19:23:45 +0200650 struct inode *inode = tree->private_data;
651
652 btrfs_panic(btrfs_sb(inode->i_sb), err,
653 "locking error: extent tree was modified by another thread while locked");
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400654}
655
Chris Masond1310b22008-01-24 16:13:08 -0500656/*
657 * clear some bits on a range in the tree. This may require splitting
658 * or inserting elements in the tree, so the gfp mask is used to
659 * indicate which allocations or sleeping are allowed.
660 *
661 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
662 * the given range from the tree regardless of state (ie for truncate).
663 *
664 * the range [start, end] is inclusive.
665 *
Jeff Mahoney6763af82012-03-01 14:56:29 +0100666 * This takes the tree lock, and returns 0 on success and < 0 on error.
Chris Masond1310b22008-01-24 16:13:08 -0500667 */
David Sterba66b0c882017-10-31 16:30:47 +0100668int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruofefdc552015-10-12 15:35:38 +0800669 unsigned bits, int wake, int delete,
670 struct extent_state **cached_state,
671 gfp_t mask, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500672{
673 struct extent_state *state;
Chris Mason2c64c532009-09-02 15:04:12 -0400674 struct extent_state *cached;
Chris Masond1310b22008-01-24 16:13:08 -0500675 struct extent_state *prealloc = NULL;
676 struct rb_node *node;
Yan Zheng5c939df2009-05-27 09:16:03 -0400677 u64 last_end;
Chris Masond1310b22008-01-24 16:13:08 -0500678 int err;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000679 int clear = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500680
Josef Bacika5dee372013-12-13 10:02:44 -0500681 btrfs_debug_check_extent_io_range(tree, start, end);
Qu Wenruoa1d19842019-03-01 10:48:00 +0800682 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
David Sterba8d599ae2013-04-30 15:22:23 +0000683
Josef Bacik7ee9e442013-06-21 16:37:03 -0400684 if (bits & EXTENT_DELALLOC)
685 bits |= EXTENT_NORESERVE;
686
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400687 if (delete)
688 bits |= ~EXTENT_CTLBITS;
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400689
Nikolay Borisov88826792019-03-14 15:28:31 +0200690 if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
Josef Bacik2ac55d42010-02-03 19:33:23 +0000691 clear = 1;
Chris Masond1310b22008-01-24 16:13:08 -0500692again:
Mel Gormand0164ad2015-11-06 16:28:21 -0800693 if (!prealloc && gfpflags_allow_blocking(mask)) {
Filipe Mananac7bc6312014-11-03 14:12:57 +0000694 /*
695 * Don't care for allocation failure here because we might end
696 * up not needing the pre-allocated extent state at all, which
697 * is the case if we only have in the tree extent states that
698 * cover our input range and don't cover too any other range.
699 * If we end up needing a new extent state we allocate it later.
700 */
Chris Masond1310b22008-01-24 16:13:08 -0500701 prealloc = alloc_extent_state(mask);
Chris Masond1310b22008-01-24 16:13:08 -0500702 }
703
Chris Masoncad321a2008-12-17 14:51:42 -0500704 spin_lock(&tree->lock);
Chris Mason2c64c532009-09-02 15:04:12 -0400705 if (cached_state) {
706 cached = *cached_state;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000707
708 if (clear) {
709 *cached_state = NULL;
710 cached_state = NULL;
711 }
712
Filipe Manana27a35072014-07-06 20:09:59 +0100713 if (cached && extent_state_in_tree(cached) &&
714 cached->start <= start && cached->end > start) {
Josef Bacik2ac55d42010-02-03 19:33:23 +0000715 if (clear)
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200716 refcount_dec(&cached->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400717 state = cached;
Chris Mason42daec22009-09-23 19:51:09 -0400718 goto hit_next;
Chris Mason2c64c532009-09-02 15:04:12 -0400719 }
Josef Bacik2ac55d42010-02-03 19:33:23 +0000720 if (clear)
721 free_extent_state(cached);
Chris Mason2c64c532009-09-02 15:04:12 -0400722 }
Chris Masond1310b22008-01-24 16:13:08 -0500723 /*
724 * this search will find the extents that end after
725 * our range starts
726 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500727 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500728 if (!node)
729 goto out;
730 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason2c64c532009-09-02 15:04:12 -0400731hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500732 if (state->start > end)
733 goto out;
734 WARN_ON(state->end < start);
Yan Zheng5c939df2009-05-27 09:16:03 -0400735 last_end = state->end;
Chris Masond1310b22008-01-24 16:13:08 -0500736
Liu Bo04493142012-02-16 18:34:37 +0800737 /* the state doesn't have the wanted bits, go ahead */
Li Zefancdc6a392012-03-12 16:39:48 +0800738 if (!(state->state & bits)) {
739 state = next_state(state);
Liu Bo04493142012-02-16 18:34:37 +0800740 goto next;
Li Zefancdc6a392012-03-12 16:39:48 +0800741 }
Liu Bo04493142012-02-16 18:34:37 +0800742
Chris Masond1310b22008-01-24 16:13:08 -0500743 /*
744 * | ---- desired range ---- |
745 * | state | or
746 * | ------------- state -------------- |
747 *
748 * We need to split the extent we found, and may flip
749 * bits on second half.
750 *
751 * If the extent we found extends past our range, we
752 * just split and search again. It'll get split again
753 * the next time though.
754 *
755 * If the extent we found is inside our range, we clear
756 * the desired bit on it.
757 */
758
759 if (state->start < start) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000760 prealloc = alloc_extent_state_atomic(prealloc);
761 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500762 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400763 if (err)
764 extent_io_tree_panic(tree, err);
765
Chris Masond1310b22008-01-24 16:13:08 -0500766 prealloc = NULL;
767 if (err)
768 goto out;
769 if (state->end <= end) {
Qu Wenruofefdc552015-10-12 15:35:38 +0800770 state = clear_state_bit(tree, state, &bits, wake,
771 changeset);
Liu Bod1ac6e42012-05-10 18:10:39 +0800772 goto next;
Chris Masond1310b22008-01-24 16:13:08 -0500773 }
774 goto search_again;
775 }
776 /*
777 * | ---- desired range ---- |
778 * | state |
779 * We need to split the extent, and clear the bit
780 * on the first half
781 */
782 if (state->start <= end && state->end > end) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000783 prealloc = alloc_extent_state_atomic(prealloc);
784 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500785 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400786 if (err)
787 extent_io_tree_panic(tree, err);
788
Chris Masond1310b22008-01-24 16:13:08 -0500789 if (wake)
790 wake_up(&state->wq);
Chris Mason42daec22009-09-23 19:51:09 -0400791
Qu Wenruofefdc552015-10-12 15:35:38 +0800792 clear_state_bit(tree, prealloc, &bits, wake, changeset);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400793
Chris Masond1310b22008-01-24 16:13:08 -0500794 prealloc = NULL;
795 goto out;
796 }
Chris Mason42daec22009-09-23 19:51:09 -0400797
Qu Wenruofefdc552015-10-12 15:35:38 +0800798 state = clear_state_bit(tree, state, &bits, wake, changeset);
Liu Bo04493142012-02-16 18:34:37 +0800799next:
Yan Zheng5c939df2009-05-27 09:16:03 -0400800 if (last_end == (u64)-1)
801 goto out;
802 start = last_end + 1;
Li Zefancdc6a392012-03-12 16:39:48 +0800803 if (start <= end && state && !need_resched())
Liu Bo692e5752012-02-16 18:34:36 +0800804 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500805
806search_again:
807 if (start > end)
808 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500809 spin_unlock(&tree->lock);
Mel Gormand0164ad2015-11-06 16:28:21 -0800810 if (gfpflags_allow_blocking(mask))
Chris Masond1310b22008-01-24 16:13:08 -0500811 cond_resched();
812 goto again;
David Sterba7ab5cb22016-04-27 01:02:15 +0200813
814out:
815 spin_unlock(&tree->lock);
816 if (prealloc)
817 free_extent_state(prealloc);
818
819 return 0;
820
Chris Masond1310b22008-01-24 16:13:08 -0500821}
Chris Masond1310b22008-01-24 16:13:08 -0500822
Jeff Mahoney143bede2012-03-01 14:56:26 +0100823static void wait_on_state(struct extent_io_tree *tree,
824 struct extent_state *state)
Christoph Hellwig641f5212008-12-02 06:36:10 -0500825 __releases(tree->lock)
826 __acquires(tree->lock)
Chris Masond1310b22008-01-24 16:13:08 -0500827{
828 DEFINE_WAIT(wait);
829 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
Chris Masoncad321a2008-12-17 14:51:42 -0500830 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500831 schedule();
Chris Masoncad321a2008-12-17 14:51:42 -0500832 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500833 finish_wait(&state->wq, &wait);
Chris Masond1310b22008-01-24 16:13:08 -0500834}
835
836/*
837 * waits for one or more bits to clear on a range in the state tree.
838 * The range [start, end] is inclusive.
839 * The tree lock is taken by this function
840 */
David Sterba41074882013-04-29 13:38:46 +0000841static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
842 unsigned long bits)
Chris Masond1310b22008-01-24 16:13:08 -0500843{
844 struct extent_state *state;
845 struct rb_node *node;
846
Josef Bacika5dee372013-12-13 10:02:44 -0500847 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +0000848
Chris Masoncad321a2008-12-17 14:51:42 -0500849 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500850again:
851 while (1) {
852 /*
853 * this search will find all the extents that end after
854 * our range starts
855 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500856 node = tree_search(tree, start);
Filipe Mananac50d3e72014-03-31 14:53:25 +0100857process_node:
Chris Masond1310b22008-01-24 16:13:08 -0500858 if (!node)
859 break;
860
861 state = rb_entry(node, struct extent_state, rb_node);
862
863 if (state->start > end)
864 goto out;
865
866 if (state->state & bits) {
867 start = state->start;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200868 refcount_inc(&state->refs);
Chris Masond1310b22008-01-24 16:13:08 -0500869 wait_on_state(tree, state);
870 free_extent_state(state);
871 goto again;
872 }
873 start = state->end + 1;
874
875 if (start > end)
876 break;
877
Filipe Mananac50d3e72014-03-31 14:53:25 +0100878 if (!cond_resched_lock(&tree->lock)) {
879 node = rb_next(node);
880 goto process_node;
881 }
Chris Masond1310b22008-01-24 16:13:08 -0500882 }
883out:
Chris Masoncad321a2008-12-17 14:51:42 -0500884 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500885}
Chris Masond1310b22008-01-24 16:13:08 -0500886
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000887static void set_state_bits(struct extent_io_tree *tree,
Chris Masond1310b22008-01-24 16:13:08 -0500888 struct extent_state *state,
Qu Wenruod38ed272015-10-12 14:53:37 +0800889 unsigned *bits, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500890{
David Sterba9ee49a042015-01-14 19:52:13 +0100891 unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
David Sterba57599c72018-03-01 17:56:34 +0100892 int ret;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400893
Nikolay Borisove06a1fc2018-11-01 14:09:50 +0200894 if (tree->private_data && is_data_inode(tree->private_data))
895 btrfs_set_delalloc_extent(tree->private_data, state, bits);
896
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400897 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500898 u64 range = state->end - state->start + 1;
899 tree->dirty_bytes += range;
900 }
David Sterba57599c72018-03-01 17:56:34 +0100901 ret = add_extent_changeset(state, bits_to_set, changeset, 1);
902 BUG_ON(ret < 0);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400903 state->state |= bits_to_set;
Chris Masond1310b22008-01-24 16:13:08 -0500904}
905
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100906static void cache_state_if_flags(struct extent_state *state,
907 struct extent_state **cached_ptr,
David Sterba9ee49a042015-01-14 19:52:13 +0100908 unsigned flags)
Chris Mason2c64c532009-09-02 15:04:12 -0400909{
910 if (cached_ptr && !(*cached_ptr)) {
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100911 if (!flags || (state->state & flags)) {
Chris Mason2c64c532009-09-02 15:04:12 -0400912 *cached_ptr = state;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200913 refcount_inc(&state->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400914 }
915 }
916}
917
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100918static void cache_state(struct extent_state *state,
919 struct extent_state **cached_ptr)
920{
921 return cache_state_if_flags(state, cached_ptr,
Nikolay Borisov88826792019-03-14 15:28:31 +0200922 EXTENT_LOCKED | EXTENT_BOUNDARY);
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100923}
924
Chris Masond1310b22008-01-24 16:13:08 -0500925/*
Chris Mason1edbb732009-09-02 13:24:36 -0400926 * set some bits on a range in the tree. This may require allocations or
927 * sleeping, so the gfp mask is used to indicate what is allowed.
Chris Masond1310b22008-01-24 16:13:08 -0500928 *
Chris Mason1edbb732009-09-02 13:24:36 -0400929 * If any of the exclusive bits are set, this will fail with -EEXIST if some
930 * part of the range already has the desired bits set. The start of the
931 * existing range is returned in failed_start in this case.
Chris Masond1310b22008-01-24 16:13:08 -0500932 *
Chris Mason1edbb732009-09-02 13:24:36 -0400933 * [start, end] is inclusive This takes the tree lock.
Chris Masond1310b22008-01-24 16:13:08 -0500934 */
Chris Mason1edbb732009-09-02 13:24:36 -0400935
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +0100936static int __must_check
937__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100938 unsigned bits, unsigned exclusive_bits,
David Sterba41074882013-04-29 13:38:46 +0000939 u64 *failed_start, struct extent_state **cached_state,
Qu Wenruod38ed272015-10-12 14:53:37 +0800940 gfp_t mask, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500941{
942 struct extent_state *state;
943 struct extent_state *prealloc = NULL;
944 struct rb_node *node;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000945 struct rb_node **p;
946 struct rb_node *parent;
Chris Masond1310b22008-01-24 16:13:08 -0500947 int err = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500948 u64 last_start;
949 u64 last_end;
Chris Mason42daec22009-09-23 19:51:09 -0400950
Josef Bacika5dee372013-12-13 10:02:44 -0500951 btrfs_debug_check_extent_io_range(tree, start, end);
Qu Wenruoa1d19842019-03-01 10:48:00 +0800952 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
David Sterba8d599ae2013-04-30 15:22:23 +0000953
Chris Masond1310b22008-01-24 16:13:08 -0500954again:
Mel Gormand0164ad2015-11-06 16:28:21 -0800955 if (!prealloc && gfpflags_allow_blocking(mask)) {
David Sterba059f7912016-04-27 01:03:45 +0200956 /*
957 * Don't care for allocation failure here because we might end
958 * up not needing the pre-allocated extent state at all, which
959 * is the case if we only have in the tree extent states that
960 * cover our input range and don't cover too any other range.
961 * If we end up needing a new extent state we allocate it later.
962 */
Chris Masond1310b22008-01-24 16:13:08 -0500963 prealloc = alloc_extent_state(mask);
Chris Masond1310b22008-01-24 16:13:08 -0500964 }
965
Chris Masoncad321a2008-12-17 14:51:42 -0500966 spin_lock(&tree->lock);
Chris Mason9655d292009-09-02 15:22:30 -0400967 if (cached_state && *cached_state) {
968 state = *cached_state;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400969 if (state->start <= start && state->end > start &&
Filipe Manana27a35072014-07-06 20:09:59 +0100970 extent_state_in_tree(state)) {
Chris Mason9655d292009-09-02 15:22:30 -0400971 node = &state->rb_node;
972 goto hit_next;
973 }
974 }
Chris Masond1310b22008-01-24 16:13:08 -0500975 /*
976 * this search will find all the extents that end after
977 * our range starts.
978 */
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000979 node = tree_search_for_insert(tree, start, &p, &parent);
Chris Masond1310b22008-01-24 16:13:08 -0500980 if (!node) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000981 prealloc = alloc_extent_state_atomic(prealloc);
982 BUG_ON(!prealloc);
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000983 err = insert_state(tree, prealloc, start, end,
Qu Wenruod38ed272015-10-12 14:53:37 +0800984 &p, &parent, &bits, changeset);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400985 if (err)
986 extent_io_tree_panic(tree, err);
987
Filipe David Borba Mananac42ac0b2013-11-26 15:01:34 +0000988 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500989 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500990 goto out;
991 }
Chris Masond1310b22008-01-24 16:13:08 -0500992 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason40431d62009-08-05 12:57:59 -0400993hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500994 last_start = state->start;
995 last_end = state->end;
996
997 /*
998 * | ---- desired range ---- |
999 * | state |
1000 *
1001 * Just lock what we found and keep going
1002 */
1003 if (state->start == start && state->end <= end) {
Chris Mason1edbb732009-09-02 13:24:36 -04001004 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001005 *failed_start = state->start;
1006 err = -EEXIST;
1007 goto out;
1008 }
Chris Mason42daec22009-09-23 19:51:09 -04001009
Qu Wenruod38ed272015-10-12 14:53:37 +08001010 set_state_bits(tree, state, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -04001011 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001012 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -04001013 if (last_end == (u64)-1)
1014 goto out;
1015 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001016 state = next_state(state);
1017 if (start < end && state && state->start == start &&
1018 !need_resched())
1019 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -05001020 goto search_again;
1021 }
1022
1023 /*
1024 * | ---- desired range ---- |
1025 * | state |
1026 * or
1027 * | ------------- state -------------- |
1028 *
1029 * We need to split the extent we found, and may flip bits on
1030 * second half.
1031 *
1032 * If the extent we found extends past our
1033 * range, we just split and search again. It'll get split
1034 * again the next time though.
1035 *
1036 * If the extent we found is inside our range, we set the
1037 * desired bit on it.
1038 */
1039 if (state->start < start) {
Chris Mason1edbb732009-09-02 13:24:36 -04001040 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001041 *failed_start = start;
1042 err = -EEXIST;
1043 goto out;
1044 }
Xiao Guangrong82337672011-04-20 06:44:57 +00001045
1046 prealloc = alloc_extent_state_atomic(prealloc);
1047 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -05001048 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001049 if (err)
1050 extent_io_tree_panic(tree, err);
1051
Chris Masond1310b22008-01-24 16:13:08 -05001052 prealloc = NULL;
1053 if (err)
1054 goto out;
1055 if (state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001056 set_state_bits(tree, state, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -04001057 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001058 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -04001059 if (last_end == (u64)-1)
1060 goto out;
1061 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001062 state = next_state(state);
1063 if (start < end && state && state->start == start &&
1064 !need_resched())
1065 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -05001066 }
1067 goto search_again;
1068 }
1069 /*
1070 * | ---- desired range ---- |
1071 * | state | or | state |
1072 *
1073 * There's a hole, we need to insert something in it and
1074 * ignore the extent we found.
1075 */
1076 if (state->start > start) {
1077 u64 this_end;
1078 if (end < last_start)
1079 this_end = end;
1080 else
Chris Masond3977122009-01-05 21:25:51 -05001081 this_end = last_start - 1;
Xiao Guangrong82337672011-04-20 06:44:57 +00001082
1083 prealloc = alloc_extent_state_atomic(prealloc);
1084 BUG_ON(!prealloc);
Xiao Guangrongc7f895a2011-04-20 06:45:49 +00001085
1086 /*
1087 * Avoid to free 'prealloc' if it can be merged with
1088 * the later extent.
1089 */
Chris Masond1310b22008-01-24 16:13:08 -05001090 err = insert_state(tree, prealloc, start, this_end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001091 NULL, NULL, &bits, changeset);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001092 if (err)
1093 extent_io_tree_panic(tree, err);
1094
Chris Mason2c64c532009-09-02 15:04:12 -04001095 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001096 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05001097 start = this_end + 1;
1098 goto search_again;
1099 }
1100 /*
1101 * | ---- desired range ---- |
1102 * | state |
1103 * We need to split the extent, and set the bit
1104 * on the first half
1105 */
1106 if (state->start <= end && state->end > end) {
Chris Mason1edbb732009-09-02 13:24:36 -04001107 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001108 *failed_start = start;
1109 err = -EEXIST;
1110 goto out;
1111 }
Xiao Guangrong82337672011-04-20 06:44:57 +00001112
1113 prealloc = alloc_extent_state_atomic(prealloc);
1114 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -05001115 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001116 if (err)
1117 extent_io_tree_panic(tree, err);
Chris Masond1310b22008-01-24 16:13:08 -05001118
Qu Wenruod38ed272015-10-12 14:53:37 +08001119 set_state_bits(tree, prealloc, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -04001120 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001121 merge_state(tree, prealloc);
1122 prealloc = NULL;
1123 goto out;
1124 }
1125
David Sterbab5a4ba142016-04-27 01:02:15 +02001126search_again:
1127 if (start > end)
1128 goto out;
1129 spin_unlock(&tree->lock);
1130 if (gfpflags_allow_blocking(mask))
1131 cond_resched();
1132 goto again;
Chris Masond1310b22008-01-24 16:13:08 -05001133
1134out:
Chris Masoncad321a2008-12-17 14:51:42 -05001135 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001136 if (prealloc)
1137 free_extent_state(prealloc);
1138
1139 return err;
1140
Chris Masond1310b22008-01-24 16:13:08 -05001141}
Chris Masond1310b22008-01-24 16:13:08 -05001142
David Sterba41074882013-04-29 13:38:46 +00001143int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +01001144 unsigned bits, u64 * failed_start,
David Sterba41074882013-04-29 13:38:46 +00001145 struct extent_state **cached_state, gfp_t mask)
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001146{
1147 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
Qu Wenruod38ed272015-10-12 14:53:37 +08001148 cached_state, mask, NULL);
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001149}
1150
1151
Josef Bacik462d6fa2011-09-26 13:56:12 -04001152/**
Liu Bo10983f22012-07-11 15:26:19 +08001153 * convert_extent_bit - convert all bits in a given range from one bit to
1154 * another
Josef Bacik462d6fa2011-09-26 13:56:12 -04001155 * @tree: the io tree to search
1156 * @start: the start offset in bytes
1157 * @end: the end offset in bytes (inclusive)
1158 * @bits: the bits to set in this range
1159 * @clear_bits: the bits to clear in this range
Josef Bacike6138872012-09-27 17:07:30 -04001160 * @cached_state: state that we're going to cache
Josef Bacik462d6fa2011-09-26 13:56:12 -04001161 *
1162 * This will go through and set bits for the given range. If any states exist
1163 * already in this range they are set with the given bit and cleared of the
1164 * clear_bits. This is only meant to be used by things that are mergeable, ie
1165 * converting from say DELALLOC to DIRTY. This is not meant to be used with
1166 * boundary bits like LOCK.
David Sterba210aa272016-04-26 23:54:39 +02001167 *
1168 * All allocations are done with GFP_NOFS.
Josef Bacik462d6fa2011-09-26 13:56:12 -04001169 */
1170int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +01001171 unsigned bits, unsigned clear_bits,
David Sterba210aa272016-04-26 23:54:39 +02001172 struct extent_state **cached_state)
Josef Bacik462d6fa2011-09-26 13:56:12 -04001173{
1174 struct extent_state *state;
1175 struct extent_state *prealloc = NULL;
1176 struct rb_node *node;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001177 struct rb_node **p;
1178 struct rb_node *parent;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001179 int err = 0;
1180 u64 last_start;
1181 u64 last_end;
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001182 bool first_iteration = true;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001183
Josef Bacika5dee372013-12-13 10:02:44 -05001184 btrfs_debug_check_extent_io_range(tree, start, end);
Qu Wenruoa1d19842019-03-01 10:48:00 +08001185 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1186 clear_bits);
David Sterba8d599ae2013-04-30 15:22:23 +00001187
Josef Bacik462d6fa2011-09-26 13:56:12 -04001188again:
David Sterba210aa272016-04-26 23:54:39 +02001189 if (!prealloc) {
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001190 /*
1191 * Best effort, don't worry if extent state allocation fails
1192 * here for the first iteration. We might have a cached state
1193 * that matches exactly the target range, in which case no
1194 * extent state allocations are needed. We'll only know this
1195 * after locking the tree.
1196 */
David Sterba210aa272016-04-26 23:54:39 +02001197 prealloc = alloc_extent_state(GFP_NOFS);
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001198 if (!prealloc && !first_iteration)
Josef Bacik462d6fa2011-09-26 13:56:12 -04001199 return -ENOMEM;
1200 }
1201
1202 spin_lock(&tree->lock);
Josef Bacike6138872012-09-27 17:07:30 -04001203 if (cached_state && *cached_state) {
1204 state = *cached_state;
1205 if (state->start <= start && state->end > start &&
Filipe Manana27a35072014-07-06 20:09:59 +01001206 extent_state_in_tree(state)) {
Josef Bacike6138872012-09-27 17:07:30 -04001207 node = &state->rb_node;
1208 goto hit_next;
1209 }
1210 }
1211
Josef Bacik462d6fa2011-09-26 13:56:12 -04001212 /*
1213 * this search will find all the extents that end after
1214 * our range starts.
1215 */
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001216 node = tree_search_for_insert(tree, start, &p, &parent);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001217 if (!node) {
1218 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001219 if (!prealloc) {
1220 err = -ENOMEM;
1221 goto out;
1222 }
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001223 err = insert_state(tree, prealloc, start, end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001224 &p, &parent, &bits, NULL);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001225 if (err)
1226 extent_io_tree_panic(tree, err);
Filipe David Borba Mananac42ac0b2013-11-26 15:01:34 +00001227 cache_state(prealloc, cached_state);
1228 prealloc = NULL;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001229 goto out;
1230 }
1231 state = rb_entry(node, struct extent_state, rb_node);
1232hit_next:
1233 last_start = state->start;
1234 last_end = state->end;
1235
1236 /*
1237 * | ---- desired range ---- |
1238 * | state |
1239 *
1240 * Just lock what we found and keep going
1241 */
1242 if (state->start == start && state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001243 set_state_bits(tree, state, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001244 cache_state(state, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001245 state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001246 if (last_end == (u64)-1)
1247 goto out;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001248 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001249 if (start < end && state && state->start == start &&
1250 !need_resched())
1251 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001252 goto search_again;
1253 }
1254
1255 /*
1256 * | ---- desired range ---- |
1257 * | state |
1258 * or
1259 * | ------------- state -------------- |
1260 *
1261 * We need to split the extent we found, and may flip bits on
1262 * second half.
1263 *
1264 * If the extent we found extends past our
1265 * range, we just split and search again. It'll get split
1266 * again the next time though.
1267 *
1268 * If the extent we found is inside our range, we set the
1269 * desired bit on it.
1270 */
1271 if (state->start < start) {
1272 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001273 if (!prealloc) {
1274 err = -ENOMEM;
1275 goto out;
1276 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001277 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001278 if (err)
1279 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001280 prealloc = NULL;
1281 if (err)
1282 goto out;
1283 if (state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001284 set_state_bits(tree, state, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001285 cache_state(state, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001286 state = clear_state_bit(tree, state, &clear_bits, 0,
1287 NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001288 if (last_end == (u64)-1)
1289 goto out;
1290 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001291 if (start < end && state && state->start == start &&
1292 !need_resched())
1293 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001294 }
1295 goto search_again;
1296 }
1297 /*
1298 * | ---- desired range ---- |
1299 * | state | or | state |
1300 *
1301 * There's a hole, we need to insert something in it and
1302 * ignore the extent we found.
1303 */
1304 if (state->start > start) {
1305 u64 this_end;
1306 if (end < last_start)
1307 this_end = end;
1308 else
1309 this_end = last_start - 1;
1310
1311 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001312 if (!prealloc) {
1313 err = -ENOMEM;
1314 goto out;
1315 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001316
1317 /*
1318 * Avoid to free 'prealloc' if it can be merged with
1319 * the later extent.
1320 */
1321 err = insert_state(tree, prealloc, start, this_end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001322 NULL, NULL, &bits, NULL);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001323 if (err)
1324 extent_io_tree_panic(tree, err);
Josef Bacike6138872012-09-27 17:07:30 -04001325 cache_state(prealloc, cached_state);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001326 prealloc = NULL;
1327 start = this_end + 1;
1328 goto search_again;
1329 }
1330 /*
1331 * | ---- desired range ---- |
1332 * | state |
1333 * We need to split the extent, and set the bit
1334 * on the first half
1335 */
1336 if (state->start <= end && state->end > end) {
1337 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001338 if (!prealloc) {
1339 err = -ENOMEM;
1340 goto out;
1341 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001342
1343 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001344 if (err)
1345 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001346
Qu Wenruod38ed272015-10-12 14:53:37 +08001347 set_state_bits(tree, prealloc, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001348 cache_state(prealloc, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001349 clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001350 prealloc = NULL;
1351 goto out;
1352 }
1353
Josef Bacik462d6fa2011-09-26 13:56:12 -04001354search_again:
1355 if (start > end)
1356 goto out;
1357 spin_unlock(&tree->lock);
David Sterba210aa272016-04-26 23:54:39 +02001358 cond_resched();
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001359 first_iteration = false;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001360 goto again;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001361
1362out:
1363 spin_unlock(&tree->lock);
1364 if (prealloc)
1365 free_extent_state(prealloc);
1366
1367 return err;
1368}
1369
Chris Masond1310b22008-01-24 16:13:08 -05001370/* wrappers around set/clear extent bit */
Qu Wenruod38ed272015-10-12 14:53:37 +08001371int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba2c53b912016-04-26 23:54:39 +02001372 unsigned bits, struct extent_changeset *changeset)
Qu Wenruod38ed272015-10-12 14:53:37 +08001373{
1374 /*
1375 * We don't support EXTENT_LOCKED yet, as current changeset will
1376 * record any bits changed, so for EXTENT_LOCKED case, it will
1377 * either fail with -EEXIST or changeset will record the whole
1378 * range.
1379 */
1380 BUG_ON(bits & EXTENT_LOCKED);
1381
David Sterba2c53b912016-04-26 23:54:39 +02001382 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
Qu Wenruod38ed272015-10-12 14:53:37 +08001383 changeset);
1384}
1385
Nikolay Borisov4ca73652019-03-27 14:24:10 +02001386int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
1387 unsigned bits)
1388{
1389 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1390 GFP_NOWAIT, NULL);
1391}
1392
Qu Wenruofefdc552015-10-12 15:35:38 +08001393int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1394 unsigned bits, int wake, int delete,
David Sterbaae0f1622017-10-31 16:37:52 +01001395 struct extent_state **cached)
Qu Wenruofefdc552015-10-12 15:35:38 +08001396{
1397 return __clear_extent_bit(tree, start, end, bits, wake, delete,
David Sterbaae0f1622017-10-31 16:37:52 +01001398 cached, GFP_NOFS, NULL);
Qu Wenruofefdc552015-10-12 15:35:38 +08001399}
1400
Qu Wenruofefdc552015-10-12 15:35:38 +08001401int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaf734c442016-04-26 23:54:39 +02001402 unsigned bits, struct extent_changeset *changeset)
Qu Wenruofefdc552015-10-12 15:35:38 +08001403{
1404 /*
1405 * Don't support EXTENT_LOCKED case, same reason as
1406 * set_record_extent_bits().
1407 */
1408 BUG_ON(bits & EXTENT_LOCKED);
1409
David Sterbaf734c442016-04-26 23:54:39 +02001410 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
Qu Wenruofefdc552015-10-12 15:35:38 +08001411 changeset);
1412}
1413
Chris Masond352ac62008-09-29 15:18:18 -04001414/*
1415 * either insert or lock state struct between start and end use mask to tell
1416 * us if waiting is desired.
1417 */
Chris Mason1edbb732009-09-02 13:24:36 -04001418int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaff13db42015-12-03 14:30:40 +01001419 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001420{
1421 int err;
1422 u64 failed_start;
David Sterba9ee49a042015-01-14 19:52:13 +01001423
Chris Masond1310b22008-01-24 16:13:08 -05001424 while (1) {
David Sterbaff13db42015-12-03 14:30:40 +01001425 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001426 EXTENT_LOCKED, &failed_start,
Qu Wenruod38ed272015-10-12 14:53:37 +08001427 cached_state, GFP_NOFS, NULL);
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001428 if (err == -EEXIST) {
Chris Masond1310b22008-01-24 16:13:08 -05001429 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1430 start = failed_start;
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001431 } else
Chris Masond1310b22008-01-24 16:13:08 -05001432 break;
Chris Masond1310b22008-01-24 16:13:08 -05001433 WARN_ON(start > end);
1434 }
1435 return err;
1436}
Chris Masond1310b22008-01-24 16:13:08 -05001437
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001438int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
Josef Bacik25179202008-10-29 14:49:05 -04001439{
1440 int err;
1441 u64 failed_start;
1442
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001443 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
Qu Wenruod38ed272015-10-12 14:53:37 +08001444 &failed_start, NULL, GFP_NOFS, NULL);
Yan Zheng66435582008-10-30 14:19:50 -04001445 if (err == -EEXIST) {
1446 if (failed_start > start)
1447 clear_extent_bit(tree, start, failed_start - 1,
David Sterbaae0f1622017-10-31 16:37:52 +01001448 EXTENT_LOCKED, 1, 0, NULL);
Josef Bacik25179202008-10-29 14:49:05 -04001449 return 0;
Yan Zheng66435582008-10-30 14:19:50 -04001450 }
Josef Bacik25179202008-10-29 14:49:05 -04001451 return 1;
1452}
Josef Bacik25179202008-10-29 14:49:05 -04001453
David Sterbabd1fa4f2015-12-03 13:08:59 +01001454void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
Chris Mason4adaa612013-03-26 13:07:00 -04001455{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001456 unsigned long index = start >> PAGE_SHIFT;
1457 unsigned long end_index = end >> PAGE_SHIFT;
Chris Mason4adaa612013-03-26 13:07:00 -04001458 struct page *page;
1459
1460 while (index <= end_index) {
1461 page = find_get_page(inode->i_mapping, index);
1462 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1463 clear_page_dirty_for_io(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001464 put_page(page);
Chris Mason4adaa612013-03-26 13:07:00 -04001465 index++;
1466 }
Chris Mason4adaa612013-03-26 13:07:00 -04001467}
1468
David Sterbaf6311572015-12-03 13:08:59 +01001469void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
Chris Mason4adaa612013-03-26 13:07:00 -04001470{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001471 unsigned long index = start >> PAGE_SHIFT;
1472 unsigned long end_index = end >> PAGE_SHIFT;
Chris Mason4adaa612013-03-26 13:07:00 -04001473 struct page *page;
1474
1475 while (index <= end_index) {
1476 page = find_get_page(inode->i_mapping, index);
1477 BUG_ON(!page); /* Pages should be in the extent_io_tree */
Chris Mason4adaa612013-03-26 13:07:00 -04001478 __set_page_dirty_nobuffers(page);
Konstantin Khebnikov8d386332015-02-11 15:26:55 -08001479 account_page_redirty(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001480 put_page(page);
Chris Mason4adaa612013-03-26 13:07:00 -04001481 index++;
1482 }
Chris Mason4adaa612013-03-26 13:07:00 -04001483}
1484
Chris Masond352ac62008-09-29 15:18:18 -04001485/* find the first state struct with 'bits' set after 'start', and
1486 * return it. tree->lock must be held. NULL will returned if
1487 * nothing was found after 'start'
1488 */
Eric Sandeen48a3b632013-04-25 20:41:01 +00001489static struct extent_state *
1490find_first_extent_bit_state(struct extent_io_tree *tree,
David Sterba9ee49a042015-01-14 19:52:13 +01001491 u64 start, unsigned bits)
Chris Masond7fc6402008-02-18 12:12:38 -05001492{
1493 struct rb_node *node;
1494 struct extent_state *state;
1495
1496 /*
1497 * this search will find all the extents that end after
1498 * our range starts.
1499 */
1500 node = tree_search(tree, start);
Chris Masond3977122009-01-05 21:25:51 -05001501 if (!node)
Chris Masond7fc6402008-02-18 12:12:38 -05001502 goto out;
Chris Masond7fc6402008-02-18 12:12:38 -05001503
Chris Masond3977122009-01-05 21:25:51 -05001504 while (1) {
Chris Masond7fc6402008-02-18 12:12:38 -05001505 state = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -05001506 if (state->end >= start && (state->state & bits))
Chris Masond7fc6402008-02-18 12:12:38 -05001507 return state;
Chris Masond3977122009-01-05 21:25:51 -05001508
Chris Masond7fc6402008-02-18 12:12:38 -05001509 node = rb_next(node);
1510 if (!node)
1511 break;
1512 }
1513out:
1514 return NULL;
1515}
Chris Masond7fc6402008-02-18 12:12:38 -05001516
Chris Masond352ac62008-09-29 15:18:18 -04001517/*
Xiao Guangrong69261c42011-07-14 03:19:45 +00001518 * find the first offset in the io tree with 'bits' set. zero is
1519 * returned if we find something, and *start_ret and *end_ret are
1520 * set to reflect the state struct that was found.
1521 *
Wang Sheng-Hui477d7ea2012-04-06 14:35:47 +08001522 * If nothing was found, 1 is returned. If found something, return 0.
Xiao Guangrong69261c42011-07-14 03:19:45 +00001523 */
1524int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
David Sterba9ee49a042015-01-14 19:52:13 +01001525 u64 *start_ret, u64 *end_ret, unsigned bits,
Josef Bacike6138872012-09-27 17:07:30 -04001526 struct extent_state **cached_state)
Xiao Guangrong69261c42011-07-14 03:19:45 +00001527{
1528 struct extent_state *state;
1529 int ret = 1;
1530
1531 spin_lock(&tree->lock);
Josef Bacike6138872012-09-27 17:07:30 -04001532 if (cached_state && *cached_state) {
1533 state = *cached_state;
Filipe Manana27a35072014-07-06 20:09:59 +01001534 if (state->end == start - 1 && extent_state_in_tree(state)) {
Liu Bo9688e9a2018-08-23 03:14:53 +08001535 while ((state = next_state(state)) != NULL) {
Josef Bacike6138872012-09-27 17:07:30 -04001536 if (state->state & bits)
1537 goto got_it;
Josef Bacike6138872012-09-27 17:07:30 -04001538 }
1539 free_extent_state(*cached_state);
1540 *cached_state = NULL;
1541 goto out;
1542 }
1543 free_extent_state(*cached_state);
1544 *cached_state = NULL;
1545 }
1546
Xiao Guangrong69261c42011-07-14 03:19:45 +00001547 state = find_first_extent_bit_state(tree, start, bits);
Josef Bacike6138872012-09-27 17:07:30 -04001548got_it:
Xiao Guangrong69261c42011-07-14 03:19:45 +00001549 if (state) {
Filipe Mananae38e2ed2014-10-13 12:28:38 +01001550 cache_state_if_flags(state, cached_state, 0);
Xiao Guangrong69261c42011-07-14 03:19:45 +00001551 *start_ret = state->start;
1552 *end_ret = state->end;
1553 ret = 0;
1554 }
Josef Bacike6138872012-09-27 17:07:30 -04001555out:
Xiao Guangrong69261c42011-07-14 03:19:45 +00001556 spin_unlock(&tree->lock);
1557 return ret;
1558}
1559
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001560/**
Nikolay Borisov1eaebb32019-06-03 13:06:02 +03001561 * find_first_clear_extent_bit - find the first range that has @bits not set.
1562 * This range could start before @start.
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001563 *
1564 * @tree - the tree to search
1565 * @start - the offset at/after which the found extent should start
1566 * @start_ret - records the beginning of the range
1567 * @end_ret - records the end of the range (inclusive)
1568 * @bits - the set of bits which must be unset
1569 *
1570 * Since unallocated range is also considered one which doesn't have the bits
1571 * set it's possible that @end_ret contains -1, this happens in case the range
1572 * spans (last_range_end, end of device]. In this case it's up to the caller to
1573 * trim @end_ret to the appropriate size.
1574 */
1575void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1576 u64 *start_ret, u64 *end_ret, unsigned bits)
1577{
1578 struct extent_state *state;
1579 struct rb_node *node, *prev = NULL, *next;
1580
1581 spin_lock(&tree->lock);
1582
1583 /* Find first extent with bits cleared */
1584 while (1) {
1585 node = __etree_search(tree, start, &next, &prev, NULL, NULL);
1586 if (!node) {
1587 node = next;
1588 if (!node) {
1589 /*
1590 * We are past the last allocated chunk,
1591 * set start at the end of the last extent. The
1592 * device alloc tree should never be empty so
1593 * prev is always set.
1594 */
1595 ASSERT(prev);
1596 state = rb_entry(prev, struct extent_state, rb_node);
1597 *start_ret = state->end + 1;
1598 *end_ret = -1;
1599 goto out;
1600 }
1601 }
Nikolay Borisov1eaebb32019-06-03 13:06:02 +03001602 /*
1603 * At this point 'node' either contains 'start' or start is
1604 * before 'node'
1605 */
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001606 state = rb_entry(node, struct extent_state, rb_node);
Nikolay Borisov1eaebb32019-06-03 13:06:02 +03001607
1608 if (in_range(start, state->start, state->end - state->start + 1)) {
1609 if (state->state & bits) {
1610 /*
1611 * |--range with bits sets--|
1612 * |
1613 * start
1614 */
1615 start = state->end + 1;
1616 } else {
1617 /*
1618 * 'start' falls within a range that doesn't
1619 * have the bits set, so take its start as
1620 * the beginning of the desired range
1621 *
1622 * |--range with bits cleared----|
1623 * |
1624 * start
1625 */
1626 *start_ret = state->start;
1627 break;
1628 }
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001629 } else {
Nikolay Borisov1eaebb32019-06-03 13:06:02 +03001630 /*
1631 * |---prev range---|---hole/unset---|---node range---|
1632 * |
1633 * start
1634 *
1635 * or
1636 *
1637 * |---hole/unset--||--first node--|
1638 * 0 |
1639 * start
1640 */
1641 if (prev) {
1642 state = rb_entry(prev, struct extent_state,
1643 rb_node);
1644 *start_ret = state->end + 1;
1645 } else {
1646 *start_ret = 0;
1647 }
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001648 break;
1649 }
1650 }
1651
1652 /*
1653 * Find the longest stretch from start until an entry which has the
1654 * bits set
1655 */
1656 while (1) {
1657 state = rb_entry(node, struct extent_state, rb_node);
1658 if (state->end >= start && !(state->state & bits)) {
1659 *end_ret = state->end;
1660 } else {
1661 *end_ret = state->start - 1;
1662 break;
1663 }
1664
1665 node = rb_next(node);
1666 if (!node)
1667 break;
1668 }
1669out:
1670 spin_unlock(&tree->lock);
1671}
1672
Xiao Guangrong69261c42011-07-14 03:19:45 +00001673/*
Chris Masond352ac62008-09-29 15:18:18 -04001674 * find a contiguous range of bytes in the file marked as delalloc, not
1675 * more than 'max_bytes'. start and end are used to return the range,
1676 *
Lu Fengqi3522e902018-11-29 11:33:38 +08001677 * true is returned if we find something, false if nothing was in the tree
Chris Masond352ac62008-09-29 15:18:18 -04001678 */
Lu Fengqi3522e902018-11-29 11:33:38 +08001679static noinline bool find_delalloc_range(struct extent_io_tree *tree,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001680 u64 *start, u64 *end, u64 max_bytes,
1681 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001682{
1683 struct rb_node *node;
1684 struct extent_state *state;
1685 u64 cur_start = *start;
Lu Fengqi3522e902018-11-29 11:33:38 +08001686 bool found = false;
Chris Masond1310b22008-01-24 16:13:08 -05001687 u64 total_bytes = 0;
1688
Chris Masoncad321a2008-12-17 14:51:42 -05001689 spin_lock(&tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -04001690
Chris Masond1310b22008-01-24 16:13:08 -05001691 /*
1692 * this search will find all the extents that end after
1693 * our range starts.
1694 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001695 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001696 if (!node) {
Lu Fengqi3522e902018-11-29 11:33:38 +08001697 *end = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05001698 goto out;
1699 }
1700
Chris Masond3977122009-01-05 21:25:51 -05001701 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001702 state = rb_entry(node, struct extent_state, rb_node);
Zheng Yan5b21f2e2008-09-26 10:05:38 -04001703 if (found && (state->start != cur_start ||
1704 (state->state & EXTENT_BOUNDARY))) {
Chris Masond1310b22008-01-24 16:13:08 -05001705 goto out;
1706 }
1707 if (!(state->state & EXTENT_DELALLOC)) {
1708 if (!found)
1709 *end = state->end;
1710 goto out;
1711 }
Josef Bacikc2a128d2010-02-02 21:19:11 +00001712 if (!found) {
Chris Masond1310b22008-01-24 16:13:08 -05001713 *start = state->start;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001714 *cached_state = state;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +02001715 refcount_inc(&state->refs);
Josef Bacikc2a128d2010-02-02 21:19:11 +00001716 }
Lu Fengqi3522e902018-11-29 11:33:38 +08001717 found = true;
Chris Masond1310b22008-01-24 16:13:08 -05001718 *end = state->end;
1719 cur_start = state->end + 1;
1720 node = rb_next(node);
Chris Masond1310b22008-01-24 16:13:08 -05001721 total_bytes += state->end - state->start + 1;
Josef Bacik7bf811a52013-10-07 22:11:09 -04001722 if (total_bytes >= max_bytes)
Josef Bacik573aeca2013-08-30 14:38:49 -04001723 break;
Josef Bacik573aeca2013-08-30 14:38:49 -04001724 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001725 break;
1726 }
1727out:
Chris Masoncad321a2008-12-17 14:51:42 -05001728 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001729 return found;
1730}
1731
Liu Boda2c7002017-02-10 16:41:05 +01001732static int __process_pages_contig(struct address_space *mapping,
1733 struct page *locked_page,
1734 pgoff_t start_index, pgoff_t end_index,
1735 unsigned long page_ops, pgoff_t *index_ret);
1736
Jeff Mahoney143bede2012-03-01 14:56:26 +01001737static noinline void __unlock_for_delalloc(struct inode *inode,
1738 struct page *locked_page,
1739 u64 start, u64 end)
Chris Masonc8b97812008-10-29 14:49:59 -04001740{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001741 unsigned long index = start >> PAGE_SHIFT;
1742 unsigned long end_index = end >> PAGE_SHIFT;
Chris Masonc8b97812008-10-29 14:49:59 -04001743
Liu Bo76c00212017-02-10 16:42:14 +01001744 ASSERT(locked_page);
Chris Masonc8b97812008-10-29 14:49:59 -04001745 if (index == locked_page->index && end_index == index)
Jeff Mahoney143bede2012-03-01 14:56:26 +01001746 return;
Chris Masonc8b97812008-10-29 14:49:59 -04001747
Liu Bo76c00212017-02-10 16:42:14 +01001748 __process_pages_contig(inode->i_mapping, locked_page, index, end_index,
1749 PAGE_UNLOCK, NULL);
Chris Masonc8b97812008-10-29 14:49:59 -04001750}
1751
1752static noinline int lock_delalloc_pages(struct inode *inode,
1753 struct page *locked_page,
1754 u64 delalloc_start,
1755 u64 delalloc_end)
1756{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001757 unsigned long index = delalloc_start >> PAGE_SHIFT;
Liu Bo76c00212017-02-10 16:42:14 +01001758 unsigned long index_ret = index;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001759 unsigned long end_index = delalloc_end >> PAGE_SHIFT;
Chris Masonc8b97812008-10-29 14:49:59 -04001760 int ret;
Chris Masonc8b97812008-10-29 14:49:59 -04001761
Liu Bo76c00212017-02-10 16:42:14 +01001762 ASSERT(locked_page);
Chris Masonc8b97812008-10-29 14:49:59 -04001763 if (index == locked_page->index && index == end_index)
1764 return 0;
1765
Liu Bo76c00212017-02-10 16:42:14 +01001766 ret = __process_pages_contig(inode->i_mapping, locked_page, index,
1767 end_index, PAGE_LOCK, &index_ret);
1768 if (ret == -EAGAIN)
1769 __unlock_for_delalloc(inode, locked_page, delalloc_start,
1770 (u64)index_ret << PAGE_SHIFT);
Chris Masonc8b97812008-10-29 14:49:59 -04001771 return ret;
1772}
1773
1774/*
Lu Fengqi3522e902018-11-29 11:33:38 +08001775 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
1776 * more than @max_bytes. @Start and @end are used to return the range,
Chris Masonc8b97812008-10-29 14:49:59 -04001777 *
Lu Fengqi3522e902018-11-29 11:33:38 +08001778 * Return: true if we find something
1779 * false if nothing was in the tree
Chris Masonc8b97812008-10-29 14:49:59 -04001780 */
Johannes Thumshirnce9f9672018-11-19 10:38:17 +01001781EXPORT_FOR_TESTS
Lu Fengqi3522e902018-11-29 11:33:38 +08001782noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
Josef Bacik294e30f2013-10-09 12:00:56 -04001783 struct extent_io_tree *tree,
1784 struct page *locked_page, u64 *start,
Nikolay Borisov917aace2018-10-26 14:43:20 +03001785 u64 *end)
Chris Masonc8b97812008-10-29 14:49:59 -04001786{
Nikolay Borisov917aace2018-10-26 14:43:20 +03001787 u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -04001788 u64 delalloc_start;
1789 u64 delalloc_end;
Lu Fengqi3522e902018-11-29 11:33:38 +08001790 bool found;
Chris Mason9655d292009-09-02 15:22:30 -04001791 struct extent_state *cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001792 int ret;
1793 int loops = 0;
1794
1795again:
1796 /* step one, find a bunch of delalloc bytes starting at start */
1797 delalloc_start = *start;
1798 delalloc_end = 0;
1799 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001800 max_bytes, &cached_state);
Chris Mason70b99e62008-10-31 12:46:39 -04001801 if (!found || delalloc_end <= *start) {
Chris Masonc8b97812008-10-29 14:49:59 -04001802 *start = delalloc_start;
1803 *end = delalloc_end;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001804 free_extent_state(cached_state);
Lu Fengqi3522e902018-11-29 11:33:38 +08001805 return false;
Chris Masonc8b97812008-10-29 14:49:59 -04001806 }
1807
1808 /*
Chris Mason70b99e62008-10-31 12:46:39 -04001809 * start comes from the offset of locked_page. We have to lock
1810 * pages in order, so we can't process delalloc bytes before
1811 * locked_page
1812 */
Chris Masond3977122009-01-05 21:25:51 -05001813 if (delalloc_start < *start)
Chris Mason70b99e62008-10-31 12:46:39 -04001814 delalloc_start = *start;
Chris Mason70b99e62008-10-31 12:46:39 -04001815
1816 /*
Chris Masonc8b97812008-10-29 14:49:59 -04001817 * make sure to limit the number of pages we try to lock down
Chris Masonc8b97812008-10-29 14:49:59 -04001818 */
Josef Bacik7bf811a52013-10-07 22:11:09 -04001819 if (delalloc_end + 1 - delalloc_start > max_bytes)
1820 delalloc_end = delalloc_start + max_bytes - 1;
Chris Masond3977122009-01-05 21:25:51 -05001821
Chris Masonc8b97812008-10-29 14:49:59 -04001822 /* step two, lock all the pages after the page that has start */
1823 ret = lock_delalloc_pages(inode, locked_page,
1824 delalloc_start, delalloc_end);
Nikolay Borisov9bfd61d2018-10-26 14:43:21 +03001825 ASSERT(!ret || ret == -EAGAIN);
Chris Masonc8b97812008-10-29 14:49:59 -04001826 if (ret == -EAGAIN) {
1827 /* some of the pages are gone, lets avoid looping by
1828 * shortening the size of the delalloc range we're searching
1829 */
Chris Mason9655d292009-09-02 15:22:30 -04001830 free_extent_state(cached_state);
Chris Mason7d788742014-05-21 05:49:54 -07001831 cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001832 if (!loops) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001833 max_bytes = PAGE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -04001834 loops = 1;
1835 goto again;
1836 } else {
Lu Fengqi3522e902018-11-29 11:33:38 +08001837 found = false;
Chris Masonc8b97812008-10-29 14:49:59 -04001838 goto out_failed;
1839 }
1840 }
Chris Masonc8b97812008-10-29 14:49:59 -04001841
1842 /* step three, lock the state bits for the whole range */
David Sterbaff13db42015-12-03 14:30:40 +01001843 lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001844
1845 /* then test to make sure it is all still delalloc */
1846 ret = test_range_bit(tree, delalloc_start, delalloc_end,
Chris Mason9655d292009-09-02 15:22:30 -04001847 EXTENT_DELALLOC, 1, cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001848 if (!ret) {
Chris Mason9655d292009-09-02 15:22:30 -04001849 unlock_extent_cached(tree, delalloc_start, delalloc_end,
David Sterbae43bbe52017-12-12 21:43:52 +01001850 &cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001851 __unlock_for_delalloc(inode, locked_page,
1852 delalloc_start, delalloc_end);
1853 cond_resched();
1854 goto again;
1855 }
Chris Mason9655d292009-09-02 15:22:30 -04001856 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001857 *start = delalloc_start;
1858 *end = delalloc_end;
1859out_failed:
1860 return found;
1861}
1862
Liu Boda2c7002017-02-10 16:41:05 +01001863static int __process_pages_contig(struct address_space *mapping,
1864 struct page *locked_page,
1865 pgoff_t start_index, pgoff_t end_index,
1866 unsigned long page_ops, pgoff_t *index_ret)
Chris Masonc8b97812008-10-29 14:49:59 -04001867{
Liu Bo873695b2017-02-02 17:49:22 -08001868 unsigned long nr_pages = end_index - start_index + 1;
Liu Boda2c7002017-02-10 16:41:05 +01001869 unsigned long pages_locked = 0;
Liu Bo873695b2017-02-02 17:49:22 -08001870 pgoff_t index = start_index;
Chris Masonc8b97812008-10-29 14:49:59 -04001871 struct page *pages[16];
Liu Bo873695b2017-02-02 17:49:22 -08001872 unsigned ret;
Liu Boda2c7002017-02-10 16:41:05 +01001873 int err = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001874 int i;
Chris Mason771ed682008-11-06 22:02:51 -05001875
Liu Boda2c7002017-02-10 16:41:05 +01001876 if (page_ops & PAGE_LOCK) {
1877 ASSERT(page_ops == PAGE_LOCK);
1878 ASSERT(index_ret && *index_ret == start_index);
1879 }
1880
Filipe Manana704de492014-10-06 22:14:22 +01001881 if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
Liu Bo873695b2017-02-02 17:49:22 -08001882 mapping_set_error(mapping, -EIO);
Filipe Manana704de492014-10-06 22:14:22 +01001883
Chris Masond3977122009-01-05 21:25:51 -05001884 while (nr_pages > 0) {
Liu Bo873695b2017-02-02 17:49:22 -08001885 ret = find_get_pages_contig(mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001886 min_t(unsigned long,
1887 nr_pages, ARRAY_SIZE(pages)), pages);
Liu Boda2c7002017-02-10 16:41:05 +01001888 if (ret == 0) {
1889 /*
1890 * Only if we're going to lock these pages,
1891 * can we find nothing at @index.
1892 */
1893 ASSERT(page_ops & PAGE_LOCK);
Liu Bo49d4a332017-03-06 18:20:56 -08001894 err = -EAGAIN;
1895 goto out;
Liu Boda2c7002017-02-10 16:41:05 +01001896 }
Chris Mason8b62b722009-09-02 16:53:46 -04001897
Liu Boda2c7002017-02-10 16:41:05 +01001898 for (i = 0; i < ret; i++) {
Josef Bacikc2790a22013-07-29 11:20:47 -04001899 if (page_ops & PAGE_SET_PRIVATE2)
Chris Mason8b62b722009-09-02 16:53:46 -04001900 SetPagePrivate2(pages[i]);
1901
Chris Masonc8b97812008-10-29 14:49:59 -04001902 if (pages[i] == locked_page) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001903 put_page(pages[i]);
Liu Boda2c7002017-02-10 16:41:05 +01001904 pages_locked++;
Chris Masonc8b97812008-10-29 14:49:59 -04001905 continue;
1906 }
Josef Bacikc2790a22013-07-29 11:20:47 -04001907 if (page_ops & PAGE_CLEAR_DIRTY)
Chris Masonc8b97812008-10-29 14:49:59 -04001908 clear_page_dirty_for_io(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001909 if (page_ops & PAGE_SET_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001910 set_page_writeback(pages[i]);
Filipe Manana704de492014-10-06 22:14:22 +01001911 if (page_ops & PAGE_SET_ERROR)
1912 SetPageError(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001913 if (page_ops & PAGE_END_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001914 end_page_writeback(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001915 if (page_ops & PAGE_UNLOCK)
Chris Mason771ed682008-11-06 22:02:51 -05001916 unlock_page(pages[i]);
Liu Boda2c7002017-02-10 16:41:05 +01001917 if (page_ops & PAGE_LOCK) {
1918 lock_page(pages[i]);
1919 if (!PageDirty(pages[i]) ||
1920 pages[i]->mapping != mapping) {
1921 unlock_page(pages[i]);
1922 put_page(pages[i]);
1923 err = -EAGAIN;
1924 goto out;
1925 }
1926 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001927 put_page(pages[i]);
Liu Boda2c7002017-02-10 16:41:05 +01001928 pages_locked++;
Chris Masonc8b97812008-10-29 14:49:59 -04001929 }
1930 nr_pages -= ret;
1931 index += ret;
1932 cond_resched();
1933 }
Liu Boda2c7002017-02-10 16:41:05 +01001934out:
1935 if (err && index_ret)
1936 *index_ret = start_index + pages_locked - 1;
1937 return err;
Chris Masonc8b97812008-10-29 14:49:59 -04001938}
Chris Masonc8b97812008-10-29 14:49:59 -04001939
Liu Bo873695b2017-02-02 17:49:22 -08001940void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1941 u64 delalloc_end, struct page *locked_page,
1942 unsigned clear_bits,
1943 unsigned long page_ops)
1944{
1945 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 1, 0,
David Sterbaae0f1622017-10-31 16:37:52 +01001946 NULL);
Liu Bo873695b2017-02-02 17:49:22 -08001947
1948 __process_pages_contig(inode->i_mapping, locked_page,
1949 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
Liu Boda2c7002017-02-10 16:41:05 +01001950 page_ops, NULL);
Liu Bo873695b2017-02-02 17:49:22 -08001951}
1952
Chris Masond352ac62008-09-29 15:18:18 -04001953/*
1954 * count the number of bytes in the tree that have a given bit(s)
1955 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1956 * cached. The total number found is returned.
1957 */
Chris Masond1310b22008-01-24 16:13:08 -05001958u64 count_range_bits(struct extent_io_tree *tree,
1959 u64 *start, u64 search_end, u64 max_bytes,
David Sterba9ee49a042015-01-14 19:52:13 +01001960 unsigned bits, int contig)
Chris Masond1310b22008-01-24 16:13:08 -05001961{
1962 struct rb_node *node;
1963 struct extent_state *state;
1964 u64 cur_start = *start;
1965 u64 total_bytes = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05001966 u64 last = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001967 int found = 0;
1968
Dulshani Gunawardhanafae7f212013-10-31 10:30:08 +05301969 if (WARN_ON(search_end <= cur_start))
Chris Masond1310b22008-01-24 16:13:08 -05001970 return 0;
Chris Masond1310b22008-01-24 16:13:08 -05001971
Chris Masoncad321a2008-12-17 14:51:42 -05001972 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001973 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1974 total_bytes = tree->dirty_bytes;
1975 goto out;
1976 }
1977 /*
1978 * this search will find all the extents that end after
1979 * our range starts.
1980 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001981 node = tree_search(tree, cur_start);
Chris Masond3977122009-01-05 21:25:51 -05001982 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001983 goto out;
Chris Masond1310b22008-01-24 16:13:08 -05001984
Chris Masond3977122009-01-05 21:25:51 -05001985 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001986 state = rb_entry(node, struct extent_state, rb_node);
1987 if (state->start > search_end)
1988 break;
Chris Masonec29ed52011-02-23 16:23:20 -05001989 if (contig && found && state->start > last + 1)
1990 break;
1991 if (state->end >= cur_start && (state->state & bits) == bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001992 total_bytes += min(search_end, state->end) + 1 -
1993 max(cur_start, state->start);
1994 if (total_bytes >= max_bytes)
1995 break;
1996 if (!found) {
Josef Bacikaf60bed2011-05-04 11:11:17 -04001997 *start = max(cur_start, state->start);
Chris Masond1310b22008-01-24 16:13:08 -05001998 found = 1;
1999 }
Chris Masonec29ed52011-02-23 16:23:20 -05002000 last = state->end;
2001 } else if (contig && found) {
2002 break;
Chris Masond1310b22008-01-24 16:13:08 -05002003 }
2004 node = rb_next(node);
2005 if (!node)
2006 break;
2007 }
2008out:
Chris Masoncad321a2008-12-17 14:51:42 -05002009 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002010 return total_bytes;
2011}
Christoph Hellwigb2950862008-12-02 09:54:17 -05002012
Chris Masond352ac62008-09-29 15:18:18 -04002013/*
2014 * set the private field for a given byte offset in the tree. If there isn't
2015 * an extent_state there already, this does nothing.
2016 */
Arnd Bergmannf827ba92016-02-22 22:53:20 +01002017static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start,
David Sterba47dc1962016-02-11 13:24:13 +01002018 struct io_failure_record *failrec)
Chris Masond1310b22008-01-24 16:13:08 -05002019{
2020 struct rb_node *node;
2021 struct extent_state *state;
2022 int ret = 0;
2023
Chris Masoncad321a2008-12-17 14:51:42 -05002024 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002025 /*
2026 * this search will find all the extents that end after
2027 * our range starts.
2028 */
Chris Mason80ea96b2008-02-01 14:51:59 -05002029 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04002030 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05002031 ret = -ENOENT;
2032 goto out;
2033 }
2034 state = rb_entry(node, struct extent_state, rb_node);
2035 if (state->start != start) {
2036 ret = -ENOENT;
2037 goto out;
2038 }
David Sterba47dc1962016-02-11 13:24:13 +01002039 state->failrec = failrec;
Chris Masond1310b22008-01-24 16:13:08 -05002040out:
Chris Masoncad321a2008-12-17 14:51:42 -05002041 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002042 return ret;
2043}
2044
Arnd Bergmannf827ba92016-02-22 22:53:20 +01002045static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start,
David Sterba47dc1962016-02-11 13:24:13 +01002046 struct io_failure_record **failrec)
Chris Masond1310b22008-01-24 16:13:08 -05002047{
2048 struct rb_node *node;
2049 struct extent_state *state;
2050 int ret = 0;
2051
Chris Masoncad321a2008-12-17 14:51:42 -05002052 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002053 /*
2054 * this search will find all the extents that end after
2055 * our range starts.
2056 */
Chris Mason80ea96b2008-02-01 14:51:59 -05002057 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04002058 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05002059 ret = -ENOENT;
2060 goto out;
2061 }
2062 state = rb_entry(node, struct extent_state, rb_node);
2063 if (state->start != start) {
2064 ret = -ENOENT;
2065 goto out;
2066 }
David Sterba47dc1962016-02-11 13:24:13 +01002067 *failrec = state->failrec;
Chris Masond1310b22008-01-24 16:13:08 -05002068out:
Chris Masoncad321a2008-12-17 14:51:42 -05002069 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002070 return ret;
2071}
2072
2073/*
2074 * searches a range in the state tree for a given mask.
Chris Mason70dec802008-01-29 09:59:12 -05002075 * If 'filled' == 1, this returns 1 only if every extent in the tree
Chris Masond1310b22008-01-24 16:13:08 -05002076 * has the bits set. Otherwise, 1 is returned if any bit in the
2077 * range is found set.
2078 */
2079int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +01002080 unsigned bits, int filled, struct extent_state *cached)
Chris Masond1310b22008-01-24 16:13:08 -05002081{
2082 struct extent_state *state = NULL;
2083 struct rb_node *node;
2084 int bitset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002085
Chris Masoncad321a2008-12-17 14:51:42 -05002086 spin_lock(&tree->lock);
Filipe Manana27a35072014-07-06 20:09:59 +01002087 if (cached && extent_state_in_tree(cached) && cached->start <= start &&
Josef Bacikdf98b6e2011-06-20 14:53:48 -04002088 cached->end > start)
Chris Mason9655d292009-09-02 15:22:30 -04002089 node = &cached->rb_node;
2090 else
2091 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -05002092 while (node && start <= end) {
2093 state = rb_entry(node, struct extent_state, rb_node);
2094
2095 if (filled && state->start > start) {
2096 bitset = 0;
2097 break;
2098 }
2099
2100 if (state->start > end)
2101 break;
2102
2103 if (state->state & bits) {
2104 bitset = 1;
2105 if (!filled)
2106 break;
2107 } else if (filled) {
2108 bitset = 0;
2109 break;
2110 }
Chris Mason46562ce2009-09-23 20:23:16 -04002111
2112 if (state->end == (u64)-1)
2113 break;
2114
Chris Masond1310b22008-01-24 16:13:08 -05002115 start = state->end + 1;
2116 if (start > end)
2117 break;
2118 node = rb_next(node);
2119 if (!node) {
2120 if (filled)
2121 bitset = 0;
2122 break;
2123 }
2124 }
Chris Masoncad321a2008-12-17 14:51:42 -05002125 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002126 return bitset;
2127}
Chris Masond1310b22008-01-24 16:13:08 -05002128
2129/*
2130 * helper function to set a given page up to date if all the
2131 * extents in the tree for that page are up to date
2132 */
Jeff Mahoney143bede2012-03-01 14:56:26 +01002133static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
Chris Masond1310b22008-01-24 16:13:08 -05002134{
Miao Xie4eee4fa2012-12-21 09:17:45 +00002135 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002136 u64 end = start + PAGE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04002137 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05002138 SetPageUptodate(page);
Chris Masond1310b22008-01-24 16:13:08 -05002139}
2140
Josef Bacik7870d082017-05-05 11:57:15 -04002141int free_io_failure(struct extent_io_tree *failure_tree,
2142 struct extent_io_tree *io_tree,
2143 struct io_failure_record *rec)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002144{
2145 int ret;
2146 int err = 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002147
David Sterba47dc1962016-02-11 13:24:13 +01002148 set_state_failrec(failure_tree, rec->start, NULL);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002149 ret = clear_extent_bits(failure_tree, rec->start,
2150 rec->start + rec->len - 1,
David Sterba91166212016-04-26 23:54:39 +02002151 EXTENT_LOCKED | EXTENT_DIRTY);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002152 if (ret)
2153 err = ret;
2154
Josef Bacik7870d082017-05-05 11:57:15 -04002155 ret = clear_extent_bits(io_tree, rec->start,
David Woodhouse53b381b2013-01-29 18:40:14 -05002156 rec->start + rec->len - 1,
David Sterba91166212016-04-26 23:54:39 +02002157 EXTENT_DAMAGED);
David Woodhouse53b381b2013-01-29 18:40:14 -05002158 if (ret && !err)
2159 err = ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002160
2161 kfree(rec);
2162 return err;
2163}
2164
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002165/*
2166 * this bypasses the standard btrfs submit functions deliberately, as
2167 * the standard behavior is to write all copies in a raid setup. here we only
2168 * want to write the one bad copy. so we do the mapping for ourselves and issue
2169 * submit_bio directly.
Stefan Behrens3ec706c2012-11-05 15:46:42 +01002170 * to avoid any synchronization issues, wait for the data after writing, which
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002171 * actually prevents the read that triggered the error from finishing.
2172 * currently, there can be no more than two copies of every data bit. thus,
2173 * exactly one rewrite is required.
2174 */
Josef Bacik6ec656b2017-05-05 11:57:14 -04002175int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2176 u64 length, u64 logical, struct page *page,
2177 unsigned int pg_offset, int mirror_num)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002178{
2179 struct bio *bio;
2180 struct btrfs_device *dev;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002181 u64 map_length = 0;
2182 u64 sector;
2183 struct btrfs_bio *bbio = NULL;
2184 int ret;
2185
Linus Torvalds1751e8a2017-11-27 13:05:09 -08002186 ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002187 BUG_ON(!mirror_num);
2188
David Sterbac5e4c3d2017-06-12 17:29:41 +02002189 bio = btrfs_io_bio_alloc(1);
Kent Overstreet4f024f32013-10-11 15:44:27 -07002190 bio->bi_iter.bi_size = 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002191 map_length = length;
2192
Filipe Mananab5de8d02016-05-27 22:21:27 +01002193 /*
2194 * Avoid races with device replace and make sure our bbio has devices
2195 * associated to its stripes that don't go away while we are doing the
2196 * read repair operation.
2197 */
2198 btrfs_bio_counter_inc_blocked(fs_info);
Nikolay Borisove4ff5fb2017-07-19 10:48:42 +03002199 if (btrfs_is_parity_mirror(fs_info, logical, length)) {
Liu Boc7253282017-03-29 10:53:58 -07002200 /*
2201 * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2202 * to update all raid stripes, but here we just want to correct
2203 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2204 * stripe's dev and sector.
2205 */
2206 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2207 &map_length, &bbio, 0);
2208 if (ret) {
2209 btrfs_bio_counter_dec(fs_info);
2210 bio_put(bio);
2211 return -EIO;
2212 }
2213 ASSERT(bbio->mirror_num == 1);
2214 } else {
2215 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2216 &map_length, &bbio, mirror_num);
2217 if (ret) {
2218 btrfs_bio_counter_dec(fs_info);
2219 bio_put(bio);
2220 return -EIO;
2221 }
2222 BUG_ON(mirror_num != bbio->mirror_num);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002223 }
Liu Boc7253282017-03-29 10:53:58 -07002224
2225 sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002226 bio->bi_iter.bi_sector = sector;
Liu Boc7253282017-03-29 10:53:58 -07002227 dev = bbio->stripes[bbio->mirror_num - 1].dev;
Zhao Lei6e9606d2015-01-20 15:11:34 +08002228 btrfs_put_bbio(bbio);
Anand Jainebbede42017-12-04 12:54:52 +08002229 if (!dev || !dev->bdev ||
2230 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
Filipe Mananab5de8d02016-05-27 22:21:27 +01002231 btrfs_bio_counter_dec(fs_info);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002232 bio_put(bio);
2233 return -EIO;
2234 }
Christoph Hellwig74d46992017-08-23 19:10:32 +02002235 bio_set_dev(bio, dev->bdev);
Christoph Hellwig70fd7612016-11-01 07:40:10 -06002236 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
Miao Xieffdd2012014-09-12 18:44:00 +08002237 bio_add_page(bio, page, length, pg_offset);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002238
Mike Christie4e49ea42016-06-05 14:31:41 -05002239 if (btrfsic_submit_bio_wait(bio)) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002240 /* try to remap that extent elsewhere? */
Filipe Mananab5de8d02016-05-27 22:21:27 +01002241 btrfs_bio_counter_dec(fs_info);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002242 bio_put(bio);
Stefan Behrens442a4f62012-05-25 16:06:08 +02002243 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002244 return -EIO;
2245 }
2246
David Sterbab14af3b2015-10-08 10:43:10 +02002247 btrfs_info_rl_in_rcu(fs_info,
2248 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
Josef Bacik6ec656b2017-05-05 11:57:14 -04002249 ino, start,
Miao Xie1203b682014-09-12 18:44:01 +08002250 rcu_str_deref(dev->name), sector);
Filipe Mananab5de8d02016-05-27 22:21:27 +01002251 btrfs_bio_counter_dec(fs_info);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002252 bio_put(bio);
2253 return 0;
2254}
2255
David Sterba20a1fbf92019-03-20 11:23:44 +01002256int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num)
Josef Bacikea466792012-03-26 21:57:36 -04002257{
David Sterba20a1fbf92019-03-20 11:23:44 +01002258 struct btrfs_fs_info *fs_info = eb->fs_info;
Josef Bacikea466792012-03-26 21:57:36 -04002259 u64 start = eb->start;
David Sterbacc5e31a2018-03-01 18:20:27 +01002260 int i, num_pages = num_extent_pages(eb);
Chris Masond95603b2012-04-12 15:55:15 -04002261 int ret = 0;
Josef Bacikea466792012-03-26 21:57:36 -04002262
David Howellsbc98a422017-07-17 08:45:34 +01002263 if (sb_rdonly(fs_info->sb))
Ilya Dryomov908960c2013-11-03 19:06:39 +02002264 return -EROFS;
2265
Josef Bacikea466792012-03-26 21:57:36 -04002266 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02002267 struct page *p = eb->pages[i];
Miao Xie1203b682014-09-12 18:44:01 +08002268
Josef Bacik6ec656b2017-05-05 11:57:14 -04002269 ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
Miao Xie1203b682014-09-12 18:44:01 +08002270 start - page_offset(p), mirror_num);
Josef Bacikea466792012-03-26 21:57:36 -04002271 if (ret)
2272 break;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002273 start += PAGE_SIZE;
Josef Bacikea466792012-03-26 21:57:36 -04002274 }
2275
2276 return ret;
2277}
2278
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002279/*
2280 * each time an IO finishes, we do a fast check in the IO failure tree
2281 * to see if we need to process or clean up an io_failure_record
2282 */
Josef Bacik7870d082017-05-05 11:57:15 -04002283int clean_io_failure(struct btrfs_fs_info *fs_info,
2284 struct extent_io_tree *failure_tree,
2285 struct extent_io_tree *io_tree, u64 start,
2286 struct page *page, u64 ino, unsigned int pg_offset)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002287{
2288 u64 private;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002289 struct io_failure_record *failrec;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002290 struct extent_state *state;
2291 int num_copies;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002292 int ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002293
2294 private = 0;
Josef Bacik7870d082017-05-05 11:57:15 -04002295 ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2296 EXTENT_DIRTY, 0);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002297 if (!ret)
2298 return 0;
2299
Josef Bacik7870d082017-05-05 11:57:15 -04002300 ret = get_state_failrec(failure_tree, start, &failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002301 if (ret)
2302 return 0;
2303
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002304 BUG_ON(!failrec->this_mirror);
2305
2306 if (failrec->in_validation) {
2307 /* there was no real error, just free the record */
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002308 btrfs_debug(fs_info,
2309 "clean_io_failure: freeing dummy error at %llu",
2310 failrec->start);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002311 goto out;
2312 }
David Howellsbc98a422017-07-17 08:45:34 +01002313 if (sb_rdonly(fs_info->sb))
Ilya Dryomov908960c2013-11-03 19:06:39 +02002314 goto out;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002315
Josef Bacik7870d082017-05-05 11:57:15 -04002316 spin_lock(&io_tree->lock);
2317 state = find_first_extent_bit_state(io_tree,
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002318 failrec->start,
2319 EXTENT_LOCKED);
Josef Bacik7870d082017-05-05 11:57:15 -04002320 spin_unlock(&io_tree->lock);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002321
Miao Xie883d0de2013-07-25 19:22:35 +08002322 if (state && state->start <= failrec->start &&
2323 state->end >= failrec->start + failrec->len - 1) {
Stefan Behrens3ec706c2012-11-05 15:46:42 +01002324 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2325 failrec->len);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002326 if (num_copies > 1) {
Josef Bacik7870d082017-05-05 11:57:15 -04002327 repair_io_failure(fs_info, ino, start, failrec->len,
2328 failrec->logical, page, pg_offset,
2329 failrec->failed_mirror);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002330 }
2331 }
2332
2333out:
Josef Bacik7870d082017-05-05 11:57:15 -04002334 free_io_failure(failure_tree, io_tree, failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002335
Miao Xie454ff3d2014-09-12 18:43:58 +08002336 return 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002337}
2338
Miao Xief6124962014-09-12 18:44:04 +08002339/*
2340 * Can be called when
2341 * - hold extent lock
2342 * - under ordered extent
2343 * - the inode is freeing
2344 */
Nikolay Borisov7ab79562017-02-20 13:50:57 +02002345void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
Miao Xief6124962014-09-12 18:44:04 +08002346{
Nikolay Borisov7ab79562017-02-20 13:50:57 +02002347 struct extent_io_tree *failure_tree = &inode->io_failure_tree;
Miao Xief6124962014-09-12 18:44:04 +08002348 struct io_failure_record *failrec;
2349 struct extent_state *state, *next;
2350
2351 if (RB_EMPTY_ROOT(&failure_tree->state))
2352 return;
2353
2354 spin_lock(&failure_tree->lock);
2355 state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2356 while (state) {
2357 if (state->start > end)
2358 break;
2359
2360 ASSERT(state->end <= end);
2361
2362 next = next_state(state);
2363
David Sterba47dc1962016-02-11 13:24:13 +01002364 failrec = state->failrec;
Miao Xief6124962014-09-12 18:44:04 +08002365 free_extent_state(state);
2366 kfree(failrec);
2367
2368 state = next;
2369 }
2370 spin_unlock(&failure_tree->lock);
2371}
2372
Miao Xie2fe63032014-09-12 18:43:59 +08002373int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
David Sterba47dc1962016-02-11 13:24:13 +01002374 struct io_failure_record **failrec_ret)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002375{
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002376 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie2fe63032014-09-12 18:43:59 +08002377 struct io_failure_record *failrec;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002378 struct extent_map *em;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002379 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2380 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2381 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002382 int ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002383 u64 logical;
2384
David Sterba47dc1962016-02-11 13:24:13 +01002385 ret = get_state_failrec(failure_tree, start, &failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002386 if (ret) {
2387 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2388 if (!failrec)
2389 return -ENOMEM;
Miao Xie2fe63032014-09-12 18:43:59 +08002390
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002391 failrec->start = start;
2392 failrec->len = end - start + 1;
2393 failrec->this_mirror = 0;
2394 failrec->bio_flags = 0;
2395 failrec->in_validation = 0;
2396
2397 read_lock(&em_tree->lock);
2398 em = lookup_extent_mapping(em_tree, start, failrec->len);
2399 if (!em) {
2400 read_unlock(&em_tree->lock);
2401 kfree(failrec);
2402 return -EIO;
2403 }
2404
Filipe David Borba Manana68ba9902013-11-25 03:22:07 +00002405 if (em->start > start || em->start + em->len <= start) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002406 free_extent_map(em);
2407 em = NULL;
2408 }
2409 read_unlock(&em_tree->lock);
Tsutomu Itoh7a2d6a62012-10-01 03:07:15 -06002410 if (!em) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002411 kfree(failrec);
2412 return -EIO;
2413 }
Miao Xie2fe63032014-09-12 18:43:59 +08002414
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002415 logical = start - em->start;
2416 logical = em->block_start + logical;
2417 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2418 logical = em->block_start;
2419 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2420 extent_set_compress_type(&failrec->bio_flags,
2421 em->compress_type);
2422 }
Miao Xie2fe63032014-09-12 18:43:59 +08002423
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002424 btrfs_debug(fs_info,
2425 "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2426 logical, start, failrec->len);
Miao Xie2fe63032014-09-12 18:43:59 +08002427
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002428 failrec->logical = logical;
2429 free_extent_map(em);
2430
2431 /* set the bits in the private failure tree */
2432 ret = set_extent_bits(failure_tree, start, end,
David Sterbaceeb0ae2016-04-26 23:54:39 +02002433 EXTENT_LOCKED | EXTENT_DIRTY);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002434 if (ret >= 0)
David Sterba47dc1962016-02-11 13:24:13 +01002435 ret = set_state_failrec(failure_tree, start, failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002436 /* set the bits in the inode's tree */
2437 if (ret >= 0)
David Sterbaceeb0ae2016-04-26 23:54:39 +02002438 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002439 if (ret < 0) {
2440 kfree(failrec);
2441 return ret;
2442 }
2443 } else {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002444 btrfs_debug(fs_info,
2445 "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
2446 failrec->logical, failrec->start, failrec->len,
2447 failrec->in_validation);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002448 /*
2449 * when data can be on disk more than twice, add to failrec here
2450 * (e.g. with a list for failed_mirror) to make
2451 * clean_io_failure() clean all those errors at once.
2452 */
2453 }
Miao Xie2fe63032014-09-12 18:43:59 +08002454
2455 *failrec_ret = failrec;
2456
2457 return 0;
2458}
2459
Ming Leia0b60d72017-12-18 20:22:11 +08002460bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
Miao Xie2fe63032014-09-12 18:43:59 +08002461 struct io_failure_record *failrec, int failed_mirror)
2462{
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002463 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie2fe63032014-09-12 18:43:59 +08002464 int num_copies;
2465
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002466 num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002467 if (num_copies == 1) {
2468 /*
2469 * we only have a single copy of the data, so don't bother with
2470 * all the retry and error correction code that follows. no
2471 * matter what the error is, it is very likely to persist.
2472 */
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002473 btrfs_debug(fs_info,
2474 "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2475 num_copies, failrec->this_mirror, failed_mirror);
Liu Boc3cfb652017-07-13 15:00:50 -07002476 return false;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002477 }
2478
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002479 /*
2480 * there are two premises:
2481 * a) deliver good data to the caller
2482 * b) correct the bad sectors on disk
2483 */
Ming Leia0b60d72017-12-18 20:22:11 +08002484 if (failed_bio_pages > 1) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002485 /*
2486 * to fulfill b), we need to know the exact failing sectors, as
2487 * we don't want to rewrite any more than the failed ones. thus,
2488 * we need separate read requests for the failed bio
2489 *
2490 * if the following BUG_ON triggers, our validation request got
2491 * merged. we need separate requests for our algorithm to work.
2492 */
2493 BUG_ON(failrec->in_validation);
2494 failrec->in_validation = 1;
2495 failrec->this_mirror = failed_mirror;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002496 } else {
2497 /*
2498 * we're ready to fulfill a) and b) alongside. get a good copy
2499 * of the failed sector and if we succeed, we have setup
2500 * everything for repair_io_failure to do the rest for us.
2501 */
2502 if (failrec->in_validation) {
2503 BUG_ON(failrec->this_mirror != failed_mirror);
2504 failrec->in_validation = 0;
2505 failrec->this_mirror = 0;
2506 }
2507 failrec->failed_mirror = failed_mirror;
2508 failrec->this_mirror++;
2509 if (failrec->this_mirror == failed_mirror)
2510 failrec->this_mirror++;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002511 }
2512
Miao Xiefacc8a222013-07-25 19:22:34 +08002513 if (failrec->this_mirror > num_copies) {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002514 btrfs_debug(fs_info,
2515 "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2516 num_copies, failrec->this_mirror, failed_mirror);
Liu Boc3cfb652017-07-13 15:00:50 -07002517 return false;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002518 }
2519
Liu Boc3cfb652017-07-13 15:00:50 -07002520 return true;
Miao Xie2fe63032014-09-12 18:43:59 +08002521}
2522
2523
2524struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
2525 struct io_failure_record *failrec,
2526 struct page *page, int pg_offset, int icsum,
Miao Xie8b110e32014-09-12 18:44:03 +08002527 bio_end_io_t *endio_func, void *data)
Miao Xie2fe63032014-09-12 18:43:59 +08002528{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002529 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie2fe63032014-09-12 18:43:59 +08002530 struct bio *bio;
2531 struct btrfs_io_bio *btrfs_failed_bio;
2532 struct btrfs_io_bio *btrfs_bio;
2533
David Sterbac5e4c3d2017-06-12 17:29:41 +02002534 bio = btrfs_io_bio_alloc(1);
Miao Xie2fe63032014-09-12 18:43:59 +08002535 bio->bi_end_io = endio_func;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002536 bio->bi_iter.bi_sector = failrec->logical >> 9;
Christoph Hellwig74d46992017-08-23 19:10:32 +02002537 bio_set_dev(bio, fs_info->fs_devices->latest_bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07002538 bio->bi_iter.bi_size = 0;
Miao Xie8b110e32014-09-12 18:44:03 +08002539 bio->bi_private = data;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002540
Miao Xiefacc8a222013-07-25 19:22:34 +08002541 btrfs_failed_bio = btrfs_io_bio(failed_bio);
2542 if (btrfs_failed_bio->csum) {
Miao Xiefacc8a222013-07-25 19:22:34 +08002543 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2544
2545 btrfs_bio = btrfs_io_bio(bio);
2546 btrfs_bio->csum = btrfs_bio->csum_inline;
Miao Xie2fe63032014-09-12 18:43:59 +08002547 icsum *= csum_size;
2548 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
Miao Xiefacc8a222013-07-25 19:22:34 +08002549 csum_size);
2550 }
2551
Miao Xie2fe63032014-09-12 18:43:59 +08002552 bio_add_page(bio, page, failrec->len, pg_offset);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002553
Miao Xie2fe63032014-09-12 18:43:59 +08002554 return bio;
2555}
2556
2557/*
Nikolay Borisov78e62c02018-11-22 10:17:49 +02002558 * This is a generic handler for readpage errors. If other copies exist, read
2559 * those and write back good data to the failed position. Does not investigate
2560 * in remapping the failed extent elsewhere, hoping the device will be smart
2561 * enough to do this as needed
Miao Xie2fe63032014-09-12 18:43:59 +08002562 */
Miao Xie2fe63032014-09-12 18:43:59 +08002563static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2564 struct page *page, u64 start, u64 end,
2565 int failed_mirror)
2566{
2567 struct io_failure_record *failrec;
2568 struct inode *inode = page->mapping->host;
2569 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
Josef Bacik7870d082017-05-05 11:57:15 -04002570 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
Miao Xie2fe63032014-09-12 18:43:59 +08002571 struct bio *bio;
Christoph Hellwig70fd7612016-11-01 07:40:10 -06002572 int read_mode = 0;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002573 blk_status_t status;
Miao Xie2fe63032014-09-12 18:43:59 +08002574 int ret;
Christoph Hellwig8a2ee442019-02-15 19:13:07 +08002575 unsigned failed_bio_pages = failed_bio->bi_iter.bi_size >> PAGE_SHIFT;
Miao Xie2fe63032014-09-12 18:43:59 +08002576
Mike Christie1f7ad752016-06-05 14:31:51 -05002577 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
Miao Xie2fe63032014-09-12 18:43:59 +08002578
2579 ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
2580 if (ret)
2581 return ret;
2582
Ming Leia0b60d72017-12-18 20:22:11 +08002583 if (!btrfs_check_repairable(inode, failed_bio_pages, failrec,
Liu Boc3cfb652017-07-13 15:00:50 -07002584 failed_mirror)) {
Josef Bacik7870d082017-05-05 11:57:15 -04002585 free_io_failure(failure_tree, tree, failrec);
Miao Xie2fe63032014-09-12 18:43:59 +08002586 return -EIO;
2587 }
2588
Ming Leia0b60d72017-12-18 20:22:11 +08002589 if (failed_bio_pages > 1)
Christoph Hellwig70fd7612016-11-01 07:40:10 -06002590 read_mode |= REQ_FAILFAST_DEV;
Miao Xie2fe63032014-09-12 18:43:59 +08002591
2592 phy_offset >>= inode->i_sb->s_blocksize_bits;
2593 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
2594 start - page_offset(page),
Miao Xie8b110e32014-09-12 18:44:03 +08002595 (int)phy_offset, failed_bio->bi_end_io,
2596 NULL);
David Sterbaebcc3262018-06-29 10:56:53 +02002597 bio->bi_opf = REQ_OP_READ | read_mode;
Miao Xie2fe63032014-09-12 18:43:59 +08002598
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002599 btrfs_debug(btrfs_sb(inode->i_sb),
2600 "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
2601 read_mode, failrec->this_mirror, failrec->in_validation);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002602
Linus Torvalds8c27cb32017-07-05 16:41:23 -07002603 status = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror,
Nikolay Borisov50489a52019-04-10 19:46:04 +03002604 failrec->bio_flags);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002605 if (status) {
Josef Bacik7870d082017-05-05 11:57:15 -04002606 free_io_failure(failure_tree, tree, failrec);
Miao Xie6c387ab2014-09-12 18:43:57 +08002607 bio_put(bio);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002608 ret = blk_status_to_errno(status);
Miao Xie6c387ab2014-09-12 18:43:57 +08002609 }
2610
Tsutomu Itoh013bd4c2012-02-16 10:11:40 +09002611 return ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002612}
2613
Chris Masond1310b22008-01-24 16:13:08 -05002614/* lots and lots of room for performance fixes in the end_bio funcs */
2615
David Sterbab5227c02015-12-03 13:08:59 +01002616void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
Jeff Mahoney87826df2012-02-15 16:23:57 +01002617{
2618 int uptodate = (err == 0);
Eric Sandeen3e2426b2014-06-12 00:39:58 -05002619 int ret = 0;
Jeff Mahoney87826df2012-02-15 16:23:57 +01002620
Nikolay Borisovc6297322018-11-08 10:18:08 +02002621 btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
Jeff Mahoney87826df2012-02-15 16:23:57 +01002622
Jeff Mahoney87826df2012-02-15 16:23:57 +01002623 if (!uptodate) {
Jeff Mahoney87826df2012-02-15 16:23:57 +01002624 ClearPageUptodate(page);
2625 SetPageError(page);
Colin Ian Kingbff5baf2017-05-09 18:14:01 +01002626 ret = err < 0 ? err : -EIO;
Liu Bo5dca6ee2014-05-12 12:47:36 +08002627 mapping_set_error(page->mapping, ret);
Jeff Mahoney87826df2012-02-15 16:23:57 +01002628 }
Jeff Mahoney87826df2012-02-15 16:23:57 +01002629}
2630
Chris Masond1310b22008-01-24 16:13:08 -05002631/*
2632 * after a writepage IO is done, we need to:
2633 * clear the uptodate bits on error
2634 * clear the writeback bits in the extent tree for this IO
2635 * end_page_writeback if the page has no more pending IO
2636 *
2637 * Scheduling is not allowed, so the extent state tree is expected
2638 * to have one and only one object corresponding to this IO.
2639 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002640static void end_bio_extent_writepage(struct bio *bio)
Chris Masond1310b22008-01-24 16:13:08 -05002641{
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002642 int error = blk_status_to_errno(bio->bi_status);
Kent Overstreet2c30c712013-11-07 12:20:26 -08002643 struct bio_vec *bvec;
Chris Masond1310b22008-01-24 16:13:08 -05002644 u64 start;
2645 u64 end;
Ming Lei6dc4f102019-02-15 19:13:19 +08002646 struct bvec_iter_all iter_all;
Chris Masond1310b22008-01-24 16:13:08 -05002647
David Sterbac09abff2017-07-13 18:10:07 +02002648 ASSERT(!bio_flagged(bio, BIO_CLONED));
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02002649 bio_for_each_segment_all(bvec, bio, iter_all) {
Chris Masond1310b22008-01-24 16:13:08 -05002650 struct page *page = bvec->bv_page;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002651 struct inode *inode = page->mapping->host;
2652 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
David Woodhouse902b22f2008-08-20 08:51:49 -04002653
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002654 /* We always issue full-page reads, but if some block
2655 * in a page fails to read, blk_update_request() will
2656 * advance bv_offset and adjust bv_len to compensate.
2657 * Print a warning for nonzero offsets, and an error
2658 * if they don't add up to a full page. */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002659 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2660 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002661 btrfs_err(fs_info,
Frank Holtonefe120a2013-12-20 11:37:06 -05002662 "partial page write in btrfs with offset %u and length %u",
2663 bvec->bv_offset, bvec->bv_len);
2664 else
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002665 btrfs_info(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04002666 "incomplete page write in btrfs with offset %u and length %u",
Frank Holtonefe120a2013-12-20 11:37:06 -05002667 bvec->bv_offset, bvec->bv_len);
2668 }
Chris Masond1310b22008-01-24 16:13:08 -05002669
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002670 start = page_offset(page);
2671 end = start + bvec->bv_offset + bvec->bv_len - 1;
Chris Masond1310b22008-01-24 16:13:08 -05002672
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002673 end_extent_writepage(page, error, start, end);
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002674 end_page_writeback(page);
Kent Overstreet2c30c712013-11-07 12:20:26 -08002675 }
Chris Mason2b1f55b2008-09-24 11:48:04 -04002676
Chris Masond1310b22008-01-24 16:13:08 -05002677 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002678}
2679
Miao Xie883d0de2013-07-25 19:22:35 +08002680static void
2681endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2682 int uptodate)
2683{
2684 struct extent_state *cached = NULL;
2685 u64 end = start + len - 1;
2686
2687 if (uptodate && tree->track_uptodate)
2688 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
David Sterbad810a4b2017-12-07 18:52:54 +01002689 unlock_extent_cached_atomic(tree, start, end, &cached);
Miao Xie883d0de2013-07-25 19:22:35 +08002690}
2691
Chris Masond1310b22008-01-24 16:13:08 -05002692/*
2693 * after a readpage IO is done, we need to:
2694 * clear the uptodate bits on error
2695 * set the uptodate bits if things worked
2696 * set the page up to date if all extents in the tree are uptodate
2697 * clear the lock bit in the extent tree
2698 * unlock the page if there are no other extents locked for it
2699 *
2700 * Scheduling is not allowed, so the extent state tree is expected
2701 * to have one and only one object corresponding to this IO.
2702 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002703static void end_bio_extent_readpage(struct bio *bio)
Chris Masond1310b22008-01-24 16:13:08 -05002704{
Kent Overstreet2c30c712013-11-07 12:20:26 -08002705 struct bio_vec *bvec;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002706 int uptodate = !bio->bi_status;
Miao Xiefacc8a222013-07-25 19:22:34 +08002707 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
Josef Bacik7870d082017-05-05 11:57:15 -04002708 struct extent_io_tree *tree, *failure_tree;
Miao Xiefacc8a222013-07-25 19:22:34 +08002709 u64 offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002710 u64 start;
2711 u64 end;
Miao Xiefacc8a222013-07-25 19:22:34 +08002712 u64 len;
Miao Xie883d0de2013-07-25 19:22:35 +08002713 u64 extent_start = 0;
2714 u64 extent_len = 0;
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002715 int mirror;
Chris Masond1310b22008-01-24 16:13:08 -05002716 int ret;
Ming Lei6dc4f102019-02-15 19:13:19 +08002717 struct bvec_iter_all iter_all;
Chris Masond1310b22008-01-24 16:13:08 -05002718
David Sterbac09abff2017-07-13 18:10:07 +02002719 ASSERT(!bio_flagged(bio, BIO_CLONED));
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02002720 bio_for_each_segment_all(bvec, bio, iter_all) {
Chris Masond1310b22008-01-24 16:13:08 -05002721 struct page *page = bvec->bv_page;
Josef Bacika71754f2013-06-17 17:14:39 -04002722 struct inode *inode = page->mapping->host;
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002723 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Nikolay Borisov78e62c02018-11-22 10:17:49 +02002724 bool data_inode = btrfs_ino(BTRFS_I(inode))
2725 != BTRFS_BTREE_INODE_OBJECTID;
Arne Jansen507903b2011-04-06 10:02:20 +00002726
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002727 btrfs_debug(fs_info,
2728 "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002729 (u64)bio->bi_iter.bi_sector, bio->bi_status,
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002730 io_bio->mirror_num);
Josef Bacika71754f2013-06-17 17:14:39 -04002731 tree = &BTRFS_I(inode)->io_tree;
Josef Bacik7870d082017-05-05 11:57:15 -04002732 failure_tree = &BTRFS_I(inode)->io_failure_tree;
David Woodhouse902b22f2008-08-20 08:51:49 -04002733
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002734 /* We always issue full-page reads, but if some block
2735 * in a page fails to read, blk_update_request() will
2736 * advance bv_offset and adjust bv_len to compensate.
2737 * Print a warning for nonzero offsets, and an error
2738 * if they don't add up to a full page. */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002739 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2740 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002741 btrfs_err(fs_info,
2742 "partial page read in btrfs with offset %u and length %u",
Frank Holtonefe120a2013-12-20 11:37:06 -05002743 bvec->bv_offset, bvec->bv_len);
2744 else
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002745 btrfs_info(fs_info,
2746 "incomplete page read in btrfs with offset %u and length %u",
Frank Holtonefe120a2013-12-20 11:37:06 -05002747 bvec->bv_offset, bvec->bv_len);
2748 }
Chris Masond1310b22008-01-24 16:13:08 -05002749
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002750 start = page_offset(page);
2751 end = start + bvec->bv_offset + bvec->bv_len - 1;
Miao Xiefacc8a222013-07-25 19:22:34 +08002752 len = bvec->bv_len;
Chris Masond1310b22008-01-24 16:13:08 -05002753
Chris Mason9be33952013-05-17 18:30:14 -04002754 mirror = io_bio->mirror_num;
Nikolay Borisov78e62c02018-11-22 10:17:49 +02002755 if (likely(uptodate)) {
Miao Xiefacc8a222013-07-25 19:22:34 +08002756 ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2757 page, start, end,
2758 mirror);
Stefan Behrens5ee08442012-08-27 08:30:03 -06002759 if (ret)
Chris Masond1310b22008-01-24 16:13:08 -05002760 uptodate = 0;
Stefan Behrens5ee08442012-08-27 08:30:03 -06002761 else
Josef Bacik7870d082017-05-05 11:57:15 -04002762 clean_io_failure(BTRFS_I(inode)->root->fs_info,
2763 failure_tree, tree, start,
2764 page,
2765 btrfs_ino(BTRFS_I(inode)), 0);
Chris Masond1310b22008-01-24 16:13:08 -05002766 }
Josef Bacikea466792012-03-26 21:57:36 -04002767
Miao Xief2a09da2013-07-25 19:22:33 +08002768 if (likely(uptodate))
2769 goto readpage_ok;
2770
Nikolay Borisov78e62c02018-11-22 10:17:49 +02002771 if (data_inode) {
Liu Bo9d0d1c82017-03-24 15:04:50 -07002772
2773 /*
Nikolay Borisov78e62c02018-11-22 10:17:49 +02002774 * The generic bio_readpage_error handles errors the
2775 * following way: If possible, new read requests are
2776 * created and submitted and will end up in
2777 * end_bio_extent_readpage as well (if we're lucky,
2778 * not in the !uptodate case). In that case it returns
2779 * 0 and we just go on with the next page in our bio.
2780 * If it can't handle the error it will return -EIO and
2781 * we remain responsible for that page.
Liu Bo9d0d1c82017-03-24 15:04:50 -07002782 */
Nikolay Borisov78e62c02018-11-22 10:17:49 +02002783 ret = bio_readpage_error(bio, offset, page, start, end,
2784 mirror);
2785 if (ret == 0) {
2786 uptodate = !bio->bi_status;
2787 offset += len;
2788 continue;
2789 }
2790 } else {
2791 struct extent_buffer *eb;
2792
2793 eb = (struct extent_buffer *)page->private;
2794 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
2795 eb->read_mirror = mirror;
2796 atomic_dec(&eb->io_pages);
2797 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD,
2798 &eb->bflags))
2799 btree_readahead_hook(eb, -EIO);
Chris Mason7e383262008-04-09 16:28:12 -04002800 }
Miao Xief2a09da2013-07-25 19:22:33 +08002801readpage_ok:
Miao Xie883d0de2013-07-25 19:22:35 +08002802 if (likely(uptodate)) {
Josef Bacika71754f2013-06-17 17:14:39 -04002803 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002804 pgoff_t end_index = i_size >> PAGE_SHIFT;
Liu Boa583c022014-08-19 23:32:22 +08002805 unsigned off;
Josef Bacika71754f2013-06-17 17:14:39 -04002806
2807 /* Zero out the end if this page straddles i_size */
Johannes Thumshirn70730172018-12-05 15:23:03 +01002808 off = offset_in_page(i_size);
Liu Boa583c022014-08-19 23:32:22 +08002809 if (page->index == end_index && off)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002810 zero_user_segment(page, off, PAGE_SIZE);
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002811 SetPageUptodate(page);
Chris Mason70dec802008-01-29 09:59:12 -05002812 } else {
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002813 ClearPageUptodate(page);
2814 SetPageError(page);
Chris Mason70dec802008-01-29 09:59:12 -05002815 }
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002816 unlock_page(page);
Miao Xiefacc8a222013-07-25 19:22:34 +08002817 offset += len;
Miao Xie883d0de2013-07-25 19:22:35 +08002818
2819 if (unlikely(!uptodate)) {
2820 if (extent_len) {
2821 endio_readpage_release_extent(tree,
2822 extent_start,
2823 extent_len, 1);
2824 extent_start = 0;
2825 extent_len = 0;
2826 }
2827 endio_readpage_release_extent(tree, start,
2828 end - start + 1, 0);
2829 } else if (!extent_len) {
2830 extent_start = start;
2831 extent_len = end + 1 - start;
2832 } else if (extent_start + extent_len == start) {
2833 extent_len += end + 1 - start;
2834 } else {
2835 endio_readpage_release_extent(tree, extent_start,
2836 extent_len, uptodate);
2837 extent_start = start;
2838 extent_len = end + 1 - start;
2839 }
Kent Overstreet2c30c712013-11-07 12:20:26 -08002840 }
Chris Masond1310b22008-01-24 16:13:08 -05002841
Miao Xie883d0de2013-07-25 19:22:35 +08002842 if (extent_len)
2843 endio_readpage_release_extent(tree, extent_start, extent_len,
2844 uptodate);
David Sterbab3a0dd52018-11-22 17:16:49 +01002845 btrfs_io_bio_free_csum(io_bio);
Chris Masond1310b22008-01-24 16:13:08 -05002846 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002847}
2848
Chris Mason9be33952013-05-17 18:30:14 -04002849/*
David Sterba184f9992017-06-12 17:29:39 +02002850 * Initialize the members up to but not including 'bio'. Use after allocating a
2851 * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
2852 * 'bio' because use of __GFP_ZERO is not supported.
Chris Mason9be33952013-05-17 18:30:14 -04002853 */
David Sterba184f9992017-06-12 17:29:39 +02002854static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
Chris Masond1310b22008-01-24 16:13:08 -05002855{
David Sterba184f9992017-06-12 17:29:39 +02002856 memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
2857}
2858
2859/*
David Sterba6e707bc2017-06-02 17:26:26 +02002860 * The following helpers allocate a bio. As it's backed by a bioset, it'll
2861 * never fail. We're returning a bio right now but you can call btrfs_io_bio
2862 * for the appropriate container_of magic
Chris Masond1310b22008-01-24 16:13:08 -05002863 */
David Sterbac821e7f32017-06-02 18:35:36 +02002864struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte)
Chris Masond1310b22008-01-24 16:13:08 -05002865{
2866 struct bio *bio;
2867
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -04002868 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &btrfs_bioset);
Christoph Hellwig74d46992017-08-23 19:10:32 +02002869 bio_set_dev(bio, bdev);
David Sterbac821e7f32017-06-02 18:35:36 +02002870 bio->bi_iter.bi_sector = first_byte >> 9;
David Sterba184f9992017-06-12 17:29:39 +02002871 btrfs_io_bio_init(btrfs_io_bio(bio));
Chris Masond1310b22008-01-24 16:13:08 -05002872 return bio;
2873}
2874
David Sterba8b6c1d52017-06-02 17:48:13 +02002875struct bio *btrfs_bio_clone(struct bio *bio)
Chris Mason9be33952013-05-17 18:30:14 -04002876{
Miao Xie23ea8e52014-09-12 18:43:54 +08002877 struct btrfs_io_bio *btrfs_bio;
2878 struct bio *new;
Chris Mason9be33952013-05-17 18:30:14 -04002879
David Sterba6e707bc2017-06-02 17:26:26 +02002880 /* Bio allocation backed by a bioset does not fail */
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -04002881 new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
David Sterba6e707bc2017-06-02 17:26:26 +02002882 btrfs_bio = btrfs_io_bio(new);
David Sterba184f9992017-06-12 17:29:39 +02002883 btrfs_io_bio_init(btrfs_bio);
David Sterba6e707bc2017-06-02 17:26:26 +02002884 btrfs_bio->iter = bio->bi_iter;
Miao Xie23ea8e52014-09-12 18:43:54 +08002885 return new;
2886}
Chris Mason9be33952013-05-17 18:30:14 -04002887
David Sterbac5e4c3d2017-06-12 17:29:41 +02002888struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
Chris Mason9be33952013-05-17 18:30:14 -04002889{
Miao Xiefacc8a222013-07-25 19:22:34 +08002890 struct bio *bio;
2891
David Sterba6e707bc2017-06-02 17:26:26 +02002892 /* Bio allocation backed by a bioset does not fail */
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -04002893 bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
David Sterba184f9992017-06-12 17:29:39 +02002894 btrfs_io_bio_init(btrfs_io_bio(bio));
Miao Xiefacc8a222013-07-25 19:22:34 +08002895 return bio;
Chris Mason9be33952013-05-17 18:30:14 -04002896}
2897
Liu Boe4770942017-05-16 10:57:14 -07002898struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
Liu Bo2f8e9142017-05-15 17:43:31 -07002899{
2900 struct bio *bio;
2901 struct btrfs_io_bio *btrfs_bio;
2902
2903 /* this will never fail when it's backed by a bioset */
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -04002904 bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
Liu Bo2f8e9142017-05-15 17:43:31 -07002905 ASSERT(bio);
2906
2907 btrfs_bio = btrfs_io_bio(bio);
David Sterba184f9992017-06-12 17:29:39 +02002908 btrfs_io_bio_init(btrfs_bio);
Liu Bo2f8e9142017-05-15 17:43:31 -07002909
2910 bio_trim(bio, offset >> 9, size >> 9);
Liu Bo17347ce2017-05-15 15:33:27 -07002911 btrfs_bio->iter = bio->bi_iter;
Liu Bo2f8e9142017-05-15 17:43:31 -07002912 return bio;
2913}
Chris Mason9be33952013-05-17 18:30:14 -04002914
David Sterba4b81ba42017-06-06 19:14:26 +02002915/*
2916 * @opf: bio REQ_OP_* and REQ_* flags as one value
David Sterbab8b3d622017-06-12 19:50:41 +02002917 * @tree: tree so we can call our merge_bio hook
2918 * @wbc: optional writeback control for io accounting
2919 * @page: page to add to the bio
2920 * @pg_offset: offset of the new bio or to check whether we are adding
2921 * a contiguous page to the previous one
2922 * @size: portion of page that we want to write
2923 * @offset: starting offset in the page
2924 * @bdev: attach newly created bios to this bdev
David Sterba5c2b1fd2017-06-06 19:22:55 +02002925 * @bio_ret: must be valid pointer, newly allocated bio will be stored there
David Sterbab8b3d622017-06-12 19:50:41 +02002926 * @end_io_func: end_io callback for new bio
2927 * @mirror_num: desired mirror to read/write
2928 * @prev_bio_flags: flags of previous bio to see if we can merge the current one
2929 * @bio_flags: flags of the current bio to see if we can merge them
David Sterba4b81ba42017-06-06 19:14:26 +02002930 */
2931static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
Chris Masonda2f0f72015-07-02 13:57:22 -07002932 struct writeback_control *wbc,
David Sterba6273b7f2017-10-04 17:30:11 +02002933 struct page *page, u64 offset,
David Sterba6c5a4e22017-10-04 17:10:34 +02002934 size_t size, unsigned long pg_offset,
Chris Masond1310b22008-01-24 16:13:08 -05002935 struct block_device *bdev,
2936 struct bio **bio_ret,
Chris Masonf1885912008-04-09 16:28:12 -04002937 bio_end_io_t end_io_func,
Chris Masonc8b97812008-10-29 14:49:59 -04002938 int mirror_num,
2939 unsigned long prev_bio_flags,
Filipe Manana005efed2015-09-14 09:09:31 +01002940 unsigned long bio_flags,
2941 bool force_bio_submit)
Chris Masond1310b22008-01-24 16:13:08 -05002942{
2943 int ret = 0;
2944 struct bio *bio;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002945 size_t page_size = min_t(size_t, size, PAGE_SIZE);
David Sterba6273b7f2017-10-04 17:30:11 +02002946 sector_t sector = offset >> 9;
Chris Masond1310b22008-01-24 16:13:08 -05002947
David Sterba5c2b1fd2017-06-06 19:22:55 +02002948 ASSERT(bio_ret);
2949
2950 if (*bio_ret) {
David Sterba0c8508a2017-06-12 20:00:43 +02002951 bool contig;
2952 bool can_merge = true;
2953
Chris Masond1310b22008-01-24 16:13:08 -05002954 bio = *bio_ret;
David Sterba0c8508a2017-06-12 20:00:43 +02002955 if (prev_bio_flags & EXTENT_BIO_COMPRESSED)
Kent Overstreet4f024f32013-10-11 15:44:27 -07002956 contig = bio->bi_iter.bi_sector == sector;
Chris Masonc8b97812008-10-29 14:49:59 -04002957 else
Kent Overstreetf73a1c72012-09-25 15:05:12 -07002958 contig = bio_end_sector(bio) == sector;
Chris Masonc8b97812008-10-29 14:49:59 -04002959
Nikolay Borisovda12fe52018-11-27 20:57:58 +02002960 ASSERT(tree->ops);
2961 if (btrfs_bio_fits_in_stripe(page, page_size, bio, bio_flags))
David Sterba0c8508a2017-06-12 20:00:43 +02002962 can_merge = false;
2963
2964 if (prev_bio_flags != bio_flags || !contig || !can_merge ||
Filipe Manana005efed2015-09-14 09:09:31 +01002965 force_bio_submit ||
David Sterba6c5a4e22017-10-04 17:10:34 +02002966 bio_add_page(bio, page, page_size, pg_offset) < page_size) {
Mike Christie1f7ad752016-06-05 14:31:51 -05002967 ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
Naohiro Aota289454a2015-01-06 01:01:03 +09002968 if (ret < 0) {
2969 *bio_ret = NULL;
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002970 return ret;
Naohiro Aota289454a2015-01-06 01:01:03 +09002971 }
Chris Masond1310b22008-01-24 16:13:08 -05002972 bio = NULL;
2973 } else {
Chris Masonda2f0f72015-07-02 13:57:22 -07002974 if (wbc)
2975 wbc_account_io(wbc, page, page_size);
Chris Masond1310b22008-01-24 16:13:08 -05002976 return 0;
2977 }
2978 }
Chris Masonc8b97812008-10-29 14:49:59 -04002979
David Sterba6273b7f2017-10-04 17:30:11 +02002980 bio = btrfs_bio_alloc(bdev, offset);
David Sterba6c5a4e22017-10-04 17:10:34 +02002981 bio_add_page(bio, page, page_size, pg_offset);
Chris Masond1310b22008-01-24 16:13:08 -05002982 bio->bi_end_io = end_io_func;
2983 bio->bi_private = tree;
Jens Axboee6959b92017-06-27 11:51:28 -06002984 bio->bi_write_hint = page->mapping->host->i_write_hint;
David Sterba4b81ba42017-06-06 19:14:26 +02002985 bio->bi_opf = opf;
Chris Masonda2f0f72015-07-02 13:57:22 -07002986 if (wbc) {
2987 wbc_init_bio(wbc, bio);
2988 wbc_account_io(wbc, page, page_size);
2989 }
Chris Mason70dec802008-01-29 09:59:12 -05002990
David Sterba5c2b1fd2017-06-06 19:22:55 +02002991 *bio_ret = bio;
Chris Masond1310b22008-01-24 16:13:08 -05002992
2993 return ret;
2994}
2995
Eric Sandeen48a3b632013-04-25 20:41:01 +00002996static void attach_extent_buffer_page(struct extent_buffer *eb,
2997 struct page *page)
Josef Bacik4f2de97a2012-03-07 16:20:05 -05002998{
2999 if (!PagePrivate(page)) {
3000 SetPagePrivate(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003001 get_page(page);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05003002 set_page_private(page, (unsigned long)eb);
3003 } else {
3004 WARN_ON(page->private != (unsigned long)eb);
3005 }
3006}
3007
Chris Masond1310b22008-01-24 16:13:08 -05003008void set_page_extent_mapped(struct page *page)
3009{
3010 if (!PagePrivate(page)) {
3011 SetPagePrivate(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003012 get_page(page);
Chris Mason6af118ce2008-07-22 11:18:07 -04003013 set_page_private(page, EXTENT_PAGE_PRIVATE);
Chris Masond1310b22008-01-24 16:13:08 -05003014 }
3015}
3016
Miao Xie125bac012013-07-25 19:22:37 +08003017static struct extent_map *
3018__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
3019 u64 start, u64 len, get_extent_t *get_extent,
3020 struct extent_map **em_cached)
3021{
3022 struct extent_map *em;
3023
3024 if (em_cached && *em_cached) {
3025 em = *em_cached;
Filipe Mananacbc0e922014-02-25 14:15:12 +00003026 if (extent_map_in_tree(em) && start >= em->start &&
Miao Xie125bac012013-07-25 19:22:37 +08003027 start < extent_map_end(em)) {
Elena Reshetova490b54d2017-03-03 10:55:12 +02003028 refcount_inc(&em->refs);
Miao Xie125bac012013-07-25 19:22:37 +08003029 return em;
3030 }
3031
3032 free_extent_map(em);
3033 *em_cached = NULL;
3034 }
3035
Nikolay Borisovfc4f21b12017-02-20 13:51:06 +02003036 em = get_extent(BTRFS_I(inode), page, pg_offset, start, len, 0);
Miao Xie125bac012013-07-25 19:22:37 +08003037 if (em_cached && !IS_ERR_OR_NULL(em)) {
3038 BUG_ON(*em_cached);
Elena Reshetova490b54d2017-03-03 10:55:12 +02003039 refcount_inc(&em->refs);
Miao Xie125bac012013-07-25 19:22:37 +08003040 *em_cached = em;
3041 }
3042 return em;
3043}
Chris Masond1310b22008-01-24 16:13:08 -05003044/*
3045 * basic readpage implementation. Locked extent state structs are inserted
3046 * into the tree that are removed when the IO is done (by the end_io
3047 * handlers)
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003048 * XXX JDM: This needs looking at to ensure proper page locking
Liu Bobaf863b2016-07-11 10:39:07 -07003049 * return 0 on success, otherwise return error
Chris Masond1310b22008-01-24 16:13:08 -05003050 */
Miao Xie99740902013-07-25 19:22:36 +08003051static int __do_readpage(struct extent_io_tree *tree,
3052 struct page *page,
3053 get_extent_t *get_extent,
Miao Xie125bac012013-07-25 19:22:37 +08003054 struct extent_map **em_cached,
Miao Xie99740902013-07-25 19:22:36 +08003055 struct bio **bio, int mirror_num,
David Sterbaf1c77c52017-06-06 19:03:49 +02003056 unsigned long *bio_flags, unsigned int read_flags,
Filipe Manana005efed2015-09-14 09:09:31 +01003057 u64 *prev_em_start)
Chris Masond1310b22008-01-24 16:13:08 -05003058{
3059 struct inode *inode = page->mapping->host;
Miao Xie4eee4fa2012-12-21 09:17:45 +00003060 u64 start = page_offset(page);
David Sterba8eec8292017-06-06 19:50:13 +02003061 const u64 end = start + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05003062 u64 cur = start;
3063 u64 extent_offset;
3064 u64 last_byte = i_size_read(inode);
3065 u64 block_start;
3066 u64 cur_end;
Chris Masond1310b22008-01-24 16:13:08 -05003067 struct extent_map *em;
3068 struct block_device *bdev;
Liu Bobaf863b2016-07-11 10:39:07 -07003069 int ret = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003070 int nr = 0;
David Sterba306e16c2011-04-19 14:29:38 +02003071 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003072 size_t iosize;
Chris Masonc8b97812008-10-29 14:49:59 -04003073 size_t disk_io_size;
Chris Masond1310b22008-01-24 16:13:08 -05003074 size_t blocksize = inode->i_sb->s_blocksize;
Filipe Manana7f042a82016-01-27 19:17:20 +00003075 unsigned long this_bio_flag = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003076
3077 set_page_extent_mapped(page);
3078
Dan Magenheimer90a887c2011-05-26 10:01:56 -06003079 if (!PageUptodate(page)) {
3080 if (cleancache_get_page(page) == 0) {
3081 BUG_ON(blocksize != PAGE_SIZE);
Miao Xie99740902013-07-25 19:22:36 +08003082 unlock_extent(tree, start, end);
Dan Magenheimer90a887c2011-05-26 10:01:56 -06003083 goto out;
3084 }
3085 }
3086
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003087 if (page->index == last_byte >> PAGE_SHIFT) {
Chris Masonc8b97812008-10-29 14:49:59 -04003088 char *userpage;
Johannes Thumshirn70730172018-12-05 15:23:03 +01003089 size_t zero_offset = offset_in_page(last_byte);
Chris Masonc8b97812008-10-29 14:49:59 -04003090
3091 if (zero_offset) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003092 iosize = PAGE_SIZE - zero_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08003093 userpage = kmap_atomic(page);
Chris Masonc8b97812008-10-29 14:49:59 -04003094 memset(userpage + zero_offset, 0, iosize);
3095 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08003096 kunmap_atomic(userpage);
Chris Masonc8b97812008-10-29 14:49:59 -04003097 }
3098 }
Chris Masond1310b22008-01-24 16:13:08 -05003099 while (cur <= end) {
Filipe Manana005efed2015-09-14 09:09:31 +01003100 bool force_bio_submit = false;
David Sterba6273b7f2017-10-04 17:30:11 +02003101 u64 offset;
Josef Bacikc8f2f242013-02-11 11:33:00 -05003102
Chris Masond1310b22008-01-24 16:13:08 -05003103 if (cur >= last_byte) {
3104 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00003105 struct extent_state *cached = NULL;
3106
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003107 iosize = PAGE_SIZE - pg_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08003108 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02003109 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003110 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08003111 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05003112 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00003113 &cached, GFP_NOFS);
Filipe Manana7f042a82016-01-27 19:17:20 +00003114 unlock_extent_cached(tree, cur,
David Sterbae43bbe52017-12-12 21:43:52 +01003115 cur + iosize - 1, &cached);
Chris Masond1310b22008-01-24 16:13:08 -05003116 break;
3117 }
Miao Xie125bac012013-07-25 19:22:37 +08003118 em = __get_extent_map(inode, page, pg_offset, cur,
3119 end - cur + 1, get_extent, em_cached);
David Sterbac7040052011-04-19 18:00:01 +02003120 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05003121 SetPageError(page);
Filipe Manana7f042a82016-01-27 19:17:20 +00003122 unlock_extent(tree, cur, end);
Chris Masond1310b22008-01-24 16:13:08 -05003123 break;
3124 }
Chris Masond1310b22008-01-24 16:13:08 -05003125 extent_offset = cur - em->start;
3126 BUG_ON(extent_map_end(em) <= cur);
3127 BUG_ON(end < cur);
3128
Li Zefan261507a02010-12-17 14:21:50 +08003129 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
Mark Fasheh4b384312013-08-06 11:42:50 -07003130 this_bio_flag |= EXTENT_BIO_COMPRESSED;
Li Zefan261507a02010-12-17 14:21:50 +08003131 extent_set_compress_type(&this_bio_flag,
3132 em->compress_type);
3133 }
Chris Masonc8b97812008-10-29 14:49:59 -04003134
Chris Masond1310b22008-01-24 16:13:08 -05003135 iosize = min(extent_map_end(em) - cur, end - cur + 1);
3136 cur_end = min(extent_map_end(em) - 1, end);
Qu Wenruofda28322013-02-26 08:10:22 +00003137 iosize = ALIGN(iosize, blocksize);
Chris Masonc8b97812008-10-29 14:49:59 -04003138 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
3139 disk_io_size = em->block_len;
David Sterba6273b7f2017-10-04 17:30:11 +02003140 offset = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04003141 } else {
David Sterba6273b7f2017-10-04 17:30:11 +02003142 offset = em->block_start + extent_offset;
Chris Masonc8b97812008-10-29 14:49:59 -04003143 disk_io_size = iosize;
3144 }
Chris Masond1310b22008-01-24 16:13:08 -05003145 bdev = em->bdev;
3146 block_start = em->block_start;
Yan Zhengd899e052008-10-30 14:25:28 -04003147 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3148 block_start = EXTENT_MAP_HOLE;
Filipe Manana005efed2015-09-14 09:09:31 +01003149
3150 /*
3151 * If we have a file range that points to a compressed extent
3152 * and it's followed by a consecutive file range that points to
3153 * to the same compressed extent (possibly with a different
3154 * offset and/or length, so it either points to the whole extent
3155 * or only part of it), we must make sure we do not submit a
3156 * single bio to populate the pages for the 2 ranges because
3157 * this makes the compressed extent read zero out the pages
3158 * belonging to the 2nd range. Imagine the following scenario:
3159 *
3160 * File layout
3161 * [0 - 8K] [8K - 24K]
3162 * | |
3163 * | |
3164 * points to extent X, points to extent X,
3165 * offset 4K, length of 8K offset 0, length 16K
3166 *
3167 * [extent X, compressed length = 4K uncompressed length = 16K]
3168 *
3169 * If the bio to read the compressed extent covers both ranges,
3170 * it will decompress extent X into the pages belonging to the
3171 * first range and then it will stop, zeroing out the remaining
3172 * pages that belong to the other range that points to extent X.
3173 * So here we make sure we submit 2 bios, one for the first
3174 * range and another one for the third range. Both will target
3175 * the same physical extent from disk, but we can't currently
3176 * make the compressed bio endio callback populate the pages
3177 * for both ranges because each compressed bio is tightly
3178 * coupled with a single extent map, and each range can have
3179 * an extent map with a different offset value relative to the
3180 * uncompressed data of our extent and different lengths. This
3181 * is a corner case so we prioritize correctness over
3182 * non-optimal behavior (submitting 2 bios for the same extent).
3183 */
3184 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3185 prev_em_start && *prev_em_start != (u64)-1 &&
Filipe Manana8e928212019-02-14 15:17:20 +00003186 *prev_em_start != em->start)
Filipe Manana005efed2015-09-14 09:09:31 +01003187 force_bio_submit = true;
3188
3189 if (prev_em_start)
Filipe Manana8e928212019-02-14 15:17:20 +00003190 *prev_em_start = em->start;
Filipe Manana005efed2015-09-14 09:09:31 +01003191
Chris Masond1310b22008-01-24 16:13:08 -05003192 free_extent_map(em);
3193 em = NULL;
3194
3195 /* we've found a hole, just zero and go on */
3196 if (block_start == EXTENT_MAP_HOLE) {
3197 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00003198 struct extent_state *cached = NULL;
3199
Cong Wang7ac687d2011-11-25 23:14:28 +08003200 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02003201 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003202 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08003203 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05003204
3205 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00003206 &cached, GFP_NOFS);
Filipe Manana7f042a82016-01-27 19:17:20 +00003207 unlock_extent_cached(tree, cur,
David Sterbae43bbe52017-12-12 21:43:52 +01003208 cur + iosize - 1, &cached);
Chris Masond1310b22008-01-24 16:13:08 -05003209 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003210 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003211 continue;
3212 }
3213 /* the get_extent function already copied into the page */
Chris Mason9655d292009-09-02 15:22:30 -04003214 if (test_range_bit(tree, cur, cur_end,
3215 EXTENT_UPTODATE, 1, NULL)) {
Chris Masona1b32a52008-09-05 16:09:51 -04003216 check_page_uptodate(tree, page);
Filipe Manana7f042a82016-01-27 19:17:20 +00003217 unlock_extent(tree, cur, cur + iosize - 1);
Chris Masond1310b22008-01-24 16:13:08 -05003218 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003219 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003220 continue;
3221 }
Chris Mason70dec802008-01-29 09:59:12 -05003222 /* we have an inline extent but it didn't get marked up
3223 * to date. Error out
3224 */
3225 if (block_start == EXTENT_MAP_INLINE) {
3226 SetPageError(page);
Filipe Manana7f042a82016-01-27 19:17:20 +00003227 unlock_extent(tree, cur, cur + iosize - 1);
Chris Mason70dec802008-01-29 09:59:12 -05003228 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003229 pg_offset += iosize;
Chris Mason70dec802008-01-29 09:59:12 -05003230 continue;
3231 }
Chris Masond1310b22008-01-24 16:13:08 -05003232
David Sterba4b81ba42017-06-06 19:14:26 +02003233 ret = submit_extent_page(REQ_OP_READ | read_flags, tree, NULL,
David Sterba6273b7f2017-10-04 17:30:11 +02003234 page, offset, disk_io_size,
3235 pg_offset, bdev, bio,
Chris Masonc8b97812008-10-29 14:49:59 -04003236 end_bio_extent_readpage, mirror_num,
3237 *bio_flags,
Filipe Manana005efed2015-09-14 09:09:31 +01003238 this_bio_flag,
3239 force_bio_submit);
Josef Bacikc8f2f242013-02-11 11:33:00 -05003240 if (!ret) {
3241 nr++;
3242 *bio_flags = this_bio_flag;
3243 } else {
Chris Masond1310b22008-01-24 16:13:08 -05003244 SetPageError(page);
Filipe Manana7f042a82016-01-27 19:17:20 +00003245 unlock_extent(tree, cur, cur + iosize - 1);
Liu Bobaf863b2016-07-11 10:39:07 -07003246 goto out;
Josef Bacikedd33c92012-10-05 16:40:32 -04003247 }
Chris Masond1310b22008-01-24 16:13:08 -05003248 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003249 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003250 }
Dan Magenheimer90a887c2011-05-26 10:01:56 -06003251out:
Chris Masond1310b22008-01-24 16:13:08 -05003252 if (!nr) {
3253 if (!PageError(page))
3254 SetPageUptodate(page);
3255 unlock_page(page);
3256 }
Liu Bobaf863b2016-07-11 10:39:07 -07003257 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05003258}
3259
Nikolay Borisove65ef212019-03-11 09:55:38 +02003260static inline void contiguous_readpages(struct extent_io_tree *tree,
Miao Xie99740902013-07-25 19:22:36 +08003261 struct page *pages[], int nr_pages,
3262 u64 start, u64 end,
Miao Xie125bac012013-07-25 19:22:37 +08003263 struct extent_map **em_cached,
Nikolay Borisovd3fac6b2017-10-24 11:50:39 +03003264 struct bio **bio,
Mike Christie1f7ad752016-06-05 14:31:51 -05003265 unsigned long *bio_flags,
Filipe Manana808f80b2015-09-28 09:56:26 +01003266 u64 *prev_em_start)
Miao Xie99740902013-07-25 19:22:36 +08003267{
Nikolay Borisov23d31bd2019-05-07 10:19:23 +03003268 struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
Miao Xie99740902013-07-25 19:22:36 +08003269 int index;
3270
Nikolay Borisov23d31bd2019-05-07 10:19:23 +03003271 btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL);
Miao Xie99740902013-07-25 19:22:36 +08003272
3273 for (index = 0; index < nr_pages; index++) {
David Sterba4ef77692017-06-23 04:09:57 +02003274 __do_readpage(tree, pages[index], btrfs_get_extent, em_cached,
Jens Axboe5e9d3982018-08-17 15:45:39 -07003275 bio, 0, bio_flags, REQ_RAHEAD, prev_em_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003276 put_page(pages[index]);
Miao Xie99740902013-07-25 19:22:36 +08003277 }
3278}
3279
Miao Xie99740902013-07-25 19:22:36 +08003280static int __extent_read_full_page(struct extent_io_tree *tree,
3281 struct page *page,
3282 get_extent_t *get_extent,
3283 struct bio **bio, int mirror_num,
David Sterbaf1c77c52017-06-06 19:03:49 +02003284 unsigned long *bio_flags,
3285 unsigned int read_flags)
Miao Xie99740902013-07-25 19:22:36 +08003286{
Nikolay Borisov23d31bd2019-05-07 10:19:23 +03003287 struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
Miao Xie99740902013-07-25 19:22:36 +08003288 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003289 u64 end = start + PAGE_SIZE - 1;
Miao Xie99740902013-07-25 19:22:36 +08003290 int ret;
3291
Nikolay Borisov23d31bd2019-05-07 10:19:23 +03003292 btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL);
Miao Xie99740902013-07-25 19:22:36 +08003293
Miao Xie125bac012013-07-25 19:22:37 +08003294 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
Mike Christie1f7ad752016-06-05 14:31:51 -05003295 bio_flags, read_flags, NULL);
Miao Xie99740902013-07-25 19:22:36 +08003296 return ret;
3297}
3298
Chris Masond1310b22008-01-24 16:13:08 -05003299int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02003300 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05003301{
3302 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04003303 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003304 int ret;
3305
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02003306 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
Mike Christie1f7ad752016-06-05 14:31:51 -05003307 &bio_flags, 0);
Chris Masond1310b22008-01-24 16:13:08 -05003308 if (bio)
Mike Christie1f7ad752016-06-05 14:31:51 -05003309 ret = submit_one_bio(bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05003310 return ret;
3311}
Chris Masond1310b22008-01-24 16:13:08 -05003312
David Sterba3d4b9492017-02-10 19:33:41 +01003313static void update_nr_written(struct writeback_control *wbc,
Liu Boa91326672016-03-07 16:56:21 -08003314 unsigned long nr_written)
Chris Mason11c83492009-04-20 15:50:09 -04003315{
3316 wbc->nr_to_write -= nr_written;
Chris Mason11c83492009-04-20 15:50:09 -04003317}
3318
Chris Masond1310b22008-01-24 16:13:08 -05003319/*
Chris Mason40f76582014-05-21 13:35:51 -07003320 * helper for __extent_writepage, doing all of the delayed allocation setup.
3321 *
Nikolay Borisov5eaad972018-11-01 14:09:46 +02003322 * This returns 1 if btrfs_run_delalloc_range function did all the work required
Chris Mason40f76582014-05-21 13:35:51 -07003323 * to write the page (copy into inline extent). In this case the IO has
3324 * been started and the page is already unlocked.
3325 *
3326 * This returns 0 if all went well (page still locked)
3327 * This returns < 0 if there were errors (page still locked)
Chris Masond1310b22008-01-24 16:13:08 -05003328 */
Chris Mason40f76582014-05-21 13:35:51 -07003329static noinline_for_stack int writepage_delalloc(struct inode *inode,
Nikolay Borisov8cc02372018-11-08 10:18:07 +02003330 struct page *page, struct writeback_control *wbc,
3331 u64 delalloc_start, unsigned long *nr_written)
Chris Masond1310b22008-01-24 16:13:08 -05003332{
Nikolay Borisov8cc02372018-11-08 10:18:07 +02003333 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003334 u64 page_end = delalloc_start + PAGE_SIZE - 1;
Lu Fengqi3522e902018-11-29 11:33:38 +08003335 bool found;
Chris Mason40f76582014-05-21 13:35:51 -07003336 u64 delalloc_to_write = 0;
3337 u64 delalloc_end = 0;
3338 int ret;
3339 int page_started = 0;
3340
Chris Mason40f76582014-05-21 13:35:51 -07003341
3342 while (delalloc_end < page_end) {
Lu Fengqi3522e902018-11-29 11:33:38 +08003343 found = find_lock_delalloc_range(inode, tree,
Chris Mason40f76582014-05-21 13:35:51 -07003344 page,
3345 &delalloc_start,
Nikolay Borisov917aace2018-10-26 14:43:20 +03003346 &delalloc_end);
Lu Fengqi3522e902018-11-29 11:33:38 +08003347 if (!found) {
Chris Mason40f76582014-05-21 13:35:51 -07003348 delalloc_start = delalloc_end + 1;
3349 continue;
3350 }
Nikolay Borisov5eaad972018-11-01 14:09:46 +02003351 ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
3352 delalloc_end, &page_started, nr_written, wbc);
Chris Mason40f76582014-05-21 13:35:51 -07003353 if (ret) {
3354 SetPageError(page);
Nikolay Borisov5eaad972018-11-01 14:09:46 +02003355 /*
3356 * btrfs_run_delalloc_range should return < 0 for error
3357 * but just in case, we use > 0 here meaning the IO is
3358 * started, so we don't want to return > 0 unless
3359 * things are going well.
Chris Mason40f76582014-05-21 13:35:51 -07003360 */
3361 ret = ret < 0 ? ret : -EIO;
3362 goto done;
3363 }
3364 /*
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03003365 * delalloc_end is already one less than the total length, so
3366 * we don't subtract one from PAGE_SIZE
Chris Mason40f76582014-05-21 13:35:51 -07003367 */
3368 delalloc_to_write += (delalloc_end - delalloc_start +
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03003369 PAGE_SIZE) >> PAGE_SHIFT;
Chris Mason40f76582014-05-21 13:35:51 -07003370 delalloc_start = delalloc_end + 1;
3371 }
3372 if (wbc->nr_to_write < delalloc_to_write) {
3373 int thresh = 8192;
3374
3375 if (delalloc_to_write < thresh * 2)
3376 thresh = delalloc_to_write;
3377 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3378 thresh);
3379 }
3380
3381 /* did the fill delalloc function already unlock and start
3382 * the IO?
3383 */
3384 if (page_started) {
3385 /*
3386 * we've unlocked the page, so we can't update
3387 * the mapping's writeback index, just update
3388 * nr_to_write.
3389 */
3390 wbc->nr_to_write -= *nr_written;
3391 return 1;
3392 }
3393
3394 ret = 0;
3395
3396done:
3397 return ret;
3398}
3399
3400/*
3401 * helper for __extent_writepage. This calls the writepage start hooks,
3402 * and does the loop to map the page into extents and bios.
3403 *
3404 * We return 1 if the IO is started and the page is unlocked,
3405 * 0 if all went well (page still locked)
3406 * < 0 if there were errors (page still locked)
3407 */
3408static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3409 struct page *page,
3410 struct writeback_control *wbc,
3411 struct extent_page_data *epd,
3412 loff_t i_size,
3413 unsigned long nr_written,
David Sterbaf1c77c52017-06-06 19:03:49 +02003414 unsigned int write_flags, int *nr_ret)
Chris Mason40f76582014-05-21 13:35:51 -07003415{
Chris Masond1310b22008-01-24 16:13:08 -05003416 struct extent_io_tree *tree = epd->tree;
Miao Xie4eee4fa2012-12-21 09:17:45 +00003417 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003418 u64 page_end = start + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05003419 u64 end;
3420 u64 cur = start;
3421 u64 extent_offset;
Chris Masond1310b22008-01-24 16:13:08 -05003422 u64 block_start;
3423 u64 iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003424 struct extent_map *em;
3425 struct block_device *bdev;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003426 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003427 size_t blocksize;
Chris Mason40f76582014-05-21 13:35:51 -07003428 int ret = 0;
3429 int nr = 0;
3430 bool compressed;
Chris Masond1310b22008-01-24 16:13:08 -05003431
Nikolay Borisovd75855b2018-11-01 14:09:47 +02003432 ret = btrfs_writepage_cow_fixup(page, start, page_end);
3433 if (ret) {
3434 /* Fixup worker will requeue */
3435 if (ret == -EBUSY)
3436 wbc->pages_skipped++;
3437 else
3438 redirty_page_for_writepage(wbc, page);
Chris Mason40f76582014-05-21 13:35:51 -07003439
Nikolay Borisovd75855b2018-11-01 14:09:47 +02003440 update_nr_written(wbc, nr_written);
3441 unlock_page(page);
3442 return 1;
Chris Mason247e7432008-07-17 12:53:51 -04003443 }
3444
Chris Mason11c83492009-04-20 15:50:09 -04003445 /*
3446 * we don't want to touch the inode after unlocking the page,
3447 * so we update the mapping writeback index now
3448 */
David Sterba3d4b9492017-02-10 19:33:41 +01003449 update_nr_written(wbc, nr_written + 1);
Chris Mason771ed682008-11-06 22:02:51 -05003450
Chris Masond1310b22008-01-24 16:13:08 -05003451 end = page_end;
Chris Mason40f76582014-05-21 13:35:51 -07003452 if (i_size <= start) {
Nikolay Borisovc6297322018-11-08 10:18:08 +02003453 btrfs_writepage_endio_finish_ordered(page, start, page_end, 1);
Chris Masond1310b22008-01-24 16:13:08 -05003454 goto done;
3455 }
3456
Chris Masond1310b22008-01-24 16:13:08 -05003457 blocksize = inode->i_sb->s_blocksize;
3458
3459 while (cur <= end) {
Chris Mason40f76582014-05-21 13:35:51 -07003460 u64 em_end;
David Sterba6273b7f2017-10-04 17:30:11 +02003461 u64 offset;
David Sterba58409ed2016-05-04 11:46:10 +02003462
Chris Mason40f76582014-05-21 13:35:51 -07003463 if (cur >= i_size) {
Nikolay Borisov7087a9d2018-11-01 14:09:48 +02003464 btrfs_writepage_endio_finish_ordered(page, cur,
Nikolay Borisovc6297322018-11-08 10:18:08 +02003465 page_end, 1);
Chris Masond1310b22008-01-24 16:13:08 -05003466 break;
3467 }
David Sterba3c98c622017-06-23 04:01:08 +02003468 em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, cur,
Chris Masond1310b22008-01-24 16:13:08 -05003469 end - cur + 1, 1);
David Sterbac7040052011-04-19 18:00:01 +02003470 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05003471 SetPageError(page);
Filipe Manana61391d52014-05-09 17:17:40 +01003472 ret = PTR_ERR_OR_ZERO(em);
Chris Masond1310b22008-01-24 16:13:08 -05003473 break;
3474 }
3475
3476 extent_offset = cur - em->start;
Chris Mason40f76582014-05-21 13:35:51 -07003477 em_end = extent_map_end(em);
3478 BUG_ON(em_end <= cur);
Chris Masond1310b22008-01-24 16:13:08 -05003479 BUG_ON(end < cur);
Chris Mason40f76582014-05-21 13:35:51 -07003480 iosize = min(em_end - cur, end - cur + 1);
Qu Wenruofda28322013-02-26 08:10:22 +00003481 iosize = ALIGN(iosize, blocksize);
David Sterba6273b7f2017-10-04 17:30:11 +02003482 offset = em->block_start + extent_offset;
Chris Masond1310b22008-01-24 16:13:08 -05003483 bdev = em->bdev;
3484 block_start = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04003485 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
Chris Masond1310b22008-01-24 16:13:08 -05003486 free_extent_map(em);
3487 em = NULL;
3488
Chris Masonc8b97812008-10-29 14:49:59 -04003489 /*
3490 * compressed and inline extents are written through other
3491 * paths in the FS
3492 */
3493 if (compressed || block_start == EXTENT_MAP_HOLE ||
Chris Masond1310b22008-01-24 16:13:08 -05003494 block_start == EXTENT_MAP_INLINE) {
Chris Masonc8b97812008-10-29 14:49:59 -04003495 /*
3496 * end_io notification does not happen here for
3497 * compressed extents
3498 */
Nikolay Borisov7087a9d2018-11-01 14:09:48 +02003499 if (!compressed)
3500 btrfs_writepage_endio_finish_ordered(page, cur,
3501 cur + iosize - 1,
Nikolay Borisovc6297322018-11-08 10:18:08 +02003502 1);
Chris Masonc8b97812008-10-29 14:49:59 -04003503 else if (compressed) {
3504 /* we don't want to end_page_writeback on
3505 * a compressed extent. this happens
3506 * elsewhere
3507 */
3508 nr++;
3509 }
3510
3511 cur += iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003512 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003513 continue;
3514 }
Chris Masonc8b97812008-10-29 14:49:59 -04003515
David Sterba5cdc84b2018-07-18 20:32:52 +02003516 btrfs_set_range_writeback(tree, cur, cur + iosize - 1);
David Sterba58409ed2016-05-04 11:46:10 +02003517 if (!PageWriteback(page)) {
3518 btrfs_err(BTRFS_I(inode)->root->fs_info,
3519 "page %lu not writeback, cur %llu end %llu",
3520 page->index, cur, end);
Chris Masond1310b22008-01-24 16:13:08 -05003521 }
David Sterba58409ed2016-05-04 11:46:10 +02003522
David Sterba4b81ba42017-06-06 19:14:26 +02003523 ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
David Sterba6273b7f2017-10-04 17:30:11 +02003524 page, offset, iosize, pg_offset,
David Sterbac2df8bb2017-02-10 19:29:38 +01003525 bdev, &epd->bio,
David Sterba58409ed2016-05-04 11:46:10 +02003526 end_bio_extent_writepage,
3527 0, 0, 0, false);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09003528 if (ret) {
Chris Masond1310b22008-01-24 16:13:08 -05003529 SetPageError(page);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09003530 if (PageWriteback(page))
3531 end_page_writeback(page);
3532 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04003533
Chris Masond1310b22008-01-24 16:13:08 -05003534 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003535 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003536 nr++;
3537 }
3538done:
Chris Mason40f76582014-05-21 13:35:51 -07003539 *nr_ret = nr;
Chris Mason40f76582014-05-21 13:35:51 -07003540 return ret;
3541}
3542
3543/*
3544 * the writepage semantics are similar to regular writepage. extent
3545 * records are inserted to lock ranges in the tree, and as dirty areas
3546 * are found, they are marked writeback. Then the lock bits are removed
3547 * and the end_io handler clears the writeback ranges
Qu Wenruo30659762019-03-20 14:27:42 +08003548 *
3549 * Return 0 if everything goes well.
3550 * Return <0 for error.
Chris Mason40f76582014-05-21 13:35:51 -07003551 */
3552static int __extent_writepage(struct page *page, struct writeback_control *wbc,
David Sterbaaab6e9e2017-11-30 18:00:02 +01003553 struct extent_page_data *epd)
Chris Mason40f76582014-05-21 13:35:51 -07003554{
3555 struct inode *inode = page->mapping->host;
Chris Mason40f76582014-05-21 13:35:51 -07003556 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003557 u64 page_end = start + PAGE_SIZE - 1;
Chris Mason40f76582014-05-21 13:35:51 -07003558 int ret;
3559 int nr = 0;
3560 size_t pg_offset = 0;
3561 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003562 unsigned long end_index = i_size >> PAGE_SHIFT;
David Sterbaf1c77c52017-06-06 19:03:49 +02003563 unsigned int write_flags = 0;
Chris Mason40f76582014-05-21 13:35:51 -07003564 unsigned long nr_written = 0;
3565
Liu Boff40adf2017-08-24 18:19:48 -06003566 write_flags = wbc_to_write_flags(wbc);
Chris Mason40f76582014-05-21 13:35:51 -07003567
3568 trace___extent_writepage(page, inode, wbc);
3569
3570 WARN_ON(!PageLocked(page));
3571
3572 ClearPageError(page);
3573
Johannes Thumshirn70730172018-12-05 15:23:03 +01003574 pg_offset = offset_in_page(i_size);
Chris Mason40f76582014-05-21 13:35:51 -07003575 if (page->index > end_index ||
3576 (page->index == end_index && !pg_offset)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003577 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
Chris Mason40f76582014-05-21 13:35:51 -07003578 unlock_page(page);
3579 return 0;
3580 }
3581
3582 if (page->index == end_index) {
3583 char *userpage;
3584
3585 userpage = kmap_atomic(page);
3586 memset(userpage + pg_offset, 0,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003587 PAGE_SIZE - pg_offset);
Chris Mason40f76582014-05-21 13:35:51 -07003588 kunmap_atomic(userpage);
3589 flush_dcache_page(page);
3590 }
3591
3592 pg_offset = 0;
3593
3594 set_page_extent_mapped(page);
3595
Nikolay Borisov7789a552018-11-08 10:18:06 +02003596 if (!epd->extent_locked) {
Nikolay Borisov8cc02372018-11-08 10:18:07 +02003597 ret = writepage_delalloc(inode, page, wbc, start, &nr_written);
Nikolay Borisov7789a552018-11-08 10:18:06 +02003598 if (ret == 1)
3599 goto done_unlocked;
3600 if (ret)
3601 goto done;
3602 }
Chris Mason40f76582014-05-21 13:35:51 -07003603
3604 ret = __extent_writepage_io(inode, page, wbc, epd,
3605 i_size, nr_written, write_flags, &nr);
3606 if (ret == 1)
3607 goto done_unlocked;
3608
3609done:
Chris Masond1310b22008-01-24 16:13:08 -05003610 if (nr == 0) {
3611 /* make sure the mapping tag for page dirty gets cleared */
3612 set_page_writeback(page);
3613 end_page_writeback(page);
3614 }
Filipe Manana61391d52014-05-09 17:17:40 +01003615 if (PageError(page)) {
3616 ret = ret < 0 ? ret : -EIO;
3617 end_extent_writepage(page, ret, start, page_end);
3618 }
Chris Masond1310b22008-01-24 16:13:08 -05003619 unlock_page(page);
Qu Wenruo30659762019-03-20 14:27:42 +08003620 ASSERT(ret <= 0);
Chris Mason40f76582014-05-21 13:35:51 -07003621 return ret;
Chris Mason771ed682008-11-06 22:02:51 -05003622
Chris Mason11c83492009-04-20 15:50:09 -04003623done_unlocked:
Chris Masond1310b22008-01-24 16:13:08 -05003624 return 0;
3625}
3626
Josef Bacikfd8b2b62013-04-24 16:41:19 -04003627void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003628{
NeilBrown74316202014-07-07 15:16:04 +10003629 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3630 TASK_UNINTERRUPTIBLE);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003631}
3632
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003633/*
3634 * Lock eb pages and flush the bio if we can't the locks
3635 *
3636 * Return 0 if nothing went wrong
3637 * Return >0 is same as 0, except bio is not submitted
3638 * Return <0 if something went wrong, no page is locked
3639 */
David Sterba9df76fb2019-03-20 11:21:41 +01003640static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
Chris Mason0e378df2014-05-19 20:55:27 -07003641 struct extent_page_data *epd)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003642{
David Sterba9df76fb2019-03-20 11:21:41 +01003643 struct btrfs_fs_info *fs_info = eb->fs_info;
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003644 int i, num_pages, failed_page_nr;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003645 int flush = 0;
3646 int ret = 0;
3647
3648 if (!btrfs_try_tree_write_lock(eb)) {
Qu Wenruof4340622019-03-20 14:27:41 +08003649 ret = flush_write_bio(epd);
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003650 if (ret < 0)
3651 return ret;
3652 flush = 1;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003653 btrfs_tree_lock(eb);
3654 }
3655
3656 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3657 btrfs_tree_unlock(eb);
3658 if (!epd->sync_io)
3659 return 0;
3660 if (!flush) {
Qu Wenruof4340622019-03-20 14:27:41 +08003661 ret = flush_write_bio(epd);
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003662 if (ret < 0)
3663 return ret;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003664 flush = 1;
3665 }
Chris Masona098d8e82012-03-21 12:09:56 -04003666 while (1) {
3667 wait_on_extent_buffer_writeback(eb);
3668 btrfs_tree_lock(eb);
3669 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3670 break;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003671 btrfs_tree_unlock(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003672 }
3673 }
3674
Josef Bacik51561ff2012-07-20 16:25:24 -04003675 /*
3676 * We need to do this to prevent races in people who check if the eb is
3677 * under IO since we can end up having no IO bits set for a short period
3678 * of time.
3679 */
3680 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003681 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3682 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
Josef Bacik51561ff2012-07-20 16:25:24 -04003683 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003684 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
Nikolay Borisov104b4e52017-06-20 21:01:20 +03003685 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3686 -eb->len,
3687 fs_info->dirty_metadata_batch);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003688 ret = 1;
Josef Bacik51561ff2012-07-20 16:25:24 -04003689 } else {
3690 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003691 }
3692
3693 btrfs_tree_unlock(eb);
3694
3695 if (!ret)
3696 return ret;
3697
David Sterba65ad0102018-06-29 10:56:49 +02003698 num_pages = num_extent_pages(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003699 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02003700 struct page *p = eb->pages[i];
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003701
3702 if (!trylock_page(p)) {
3703 if (!flush) {
Qu Wenruof4340622019-03-20 14:27:41 +08003704 ret = flush_write_bio(epd);
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003705 if (ret < 0) {
3706 failed_page_nr = i;
3707 goto err_unlock;
3708 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003709 flush = 1;
3710 }
3711 lock_page(p);
3712 }
3713 }
3714
3715 return ret;
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003716err_unlock:
3717 /* Unlock already locked pages */
3718 for (i = 0; i < failed_page_nr; i++)
3719 unlock_page(eb->pages[i]);
3720 return ret;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003721}
3722
3723static void end_extent_buffer_writeback(struct extent_buffer *eb)
3724{
3725 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003726 smp_mb__after_atomic();
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003727 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3728}
3729
Filipe Manana656f30d2014-09-26 12:25:56 +01003730static void set_btree_ioerr(struct page *page)
3731{
3732 struct extent_buffer *eb = (struct extent_buffer *)page->private;
Filipe Manana656f30d2014-09-26 12:25:56 +01003733
3734 SetPageError(page);
3735 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3736 return;
3737
3738 /*
3739 * If writeback for a btree extent that doesn't belong to a log tree
3740 * failed, increment the counter transaction->eb_write_errors.
3741 * We do this because while the transaction is running and before it's
3742 * committing (when we call filemap_fdata[write|wait]_range against
3743 * the btree inode), we might have
3744 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
3745 * returns an error or an error happens during writeback, when we're
3746 * committing the transaction we wouldn't know about it, since the pages
3747 * can be no longer dirty nor marked anymore for writeback (if a
3748 * subsequent modification to the extent buffer didn't happen before the
3749 * transaction commit), which makes filemap_fdata[write|wait]_range not
3750 * able to find the pages tagged with SetPageError at transaction
3751 * commit time. So if this happens we must abort the transaction,
3752 * otherwise we commit a super block with btree roots that point to
3753 * btree nodes/leafs whose content on disk is invalid - either garbage
3754 * or the content of some node/leaf from a past generation that got
3755 * cowed or deleted and is no longer valid.
3756 *
3757 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
3758 * not be enough - we need to distinguish between log tree extents vs
3759 * non-log tree extents, and the next filemap_fdatawait_range() call
3760 * will catch and clear such errors in the mapping - and that call might
3761 * be from a log sync and not from a transaction commit. Also, checking
3762 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
3763 * not done and would not be reliable - the eb might have been released
3764 * from memory and reading it back again means that flag would not be
3765 * set (since it's a runtime flag, not persisted on disk).
3766 *
3767 * Using the flags below in the btree inode also makes us achieve the
3768 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
3769 * writeback for all dirty pages and before filemap_fdatawait_range()
3770 * is called, the writeback for all dirty pages had already finished
3771 * with errors - because we were not using AS_EIO/AS_ENOSPC,
3772 * filemap_fdatawait_range() would return success, as it could not know
3773 * that writeback errors happened (the pages were no longer tagged for
3774 * writeback).
3775 */
3776 switch (eb->log_index) {
3777 case -1:
Josef Bacikafcdd122016-09-02 15:40:02 -04003778 set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags);
Filipe Manana656f30d2014-09-26 12:25:56 +01003779 break;
3780 case 0:
Josef Bacikafcdd122016-09-02 15:40:02 -04003781 set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags);
Filipe Manana656f30d2014-09-26 12:25:56 +01003782 break;
3783 case 1:
Josef Bacikafcdd122016-09-02 15:40:02 -04003784 set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags);
Filipe Manana656f30d2014-09-26 12:25:56 +01003785 break;
3786 default:
3787 BUG(); /* unexpected, logic error */
3788 }
3789}
3790
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02003791static void end_bio_extent_buffer_writepage(struct bio *bio)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003792{
Kent Overstreet2c30c712013-11-07 12:20:26 -08003793 struct bio_vec *bvec;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003794 struct extent_buffer *eb;
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02003795 int done;
Ming Lei6dc4f102019-02-15 19:13:19 +08003796 struct bvec_iter_all iter_all;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003797
David Sterbac09abff2017-07-13 18:10:07 +02003798 ASSERT(!bio_flagged(bio, BIO_CLONED));
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02003799 bio_for_each_segment_all(bvec, bio, iter_all) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003800 struct page *page = bvec->bv_page;
3801
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003802 eb = (struct extent_buffer *)page->private;
3803 BUG_ON(!eb);
3804 done = atomic_dec_and_test(&eb->io_pages);
3805
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02003806 if (bio->bi_status ||
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02003807 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003808 ClearPageUptodate(page);
Filipe Manana656f30d2014-09-26 12:25:56 +01003809 set_btree_ioerr(page);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003810 }
3811
3812 end_page_writeback(page);
3813
3814 if (!done)
3815 continue;
3816
3817 end_extent_buffer_writeback(eb);
Kent Overstreet2c30c712013-11-07 12:20:26 -08003818 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003819
3820 bio_put(bio);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003821}
3822
Chris Mason0e378df2014-05-19 20:55:27 -07003823static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003824 struct writeback_control *wbc,
3825 struct extent_page_data *epd)
3826{
David Sterba0ab02062019-03-20 11:27:57 +01003827 struct btrfs_fs_info *fs_info = eb->fs_info;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003828 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
Josef Bacikf28491e2013-12-16 13:24:27 -05003829 struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003830 u64 offset = eb->start;
Liu Bo851cd172016-09-23 13:44:44 -07003831 u32 nritems;
David Sterbacc5e31a2018-03-01 18:20:27 +01003832 int i, num_pages;
Liu Bo851cd172016-09-23 13:44:44 -07003833 unsigned long start, end;
Liu Boff40adf2017-08-24 18:19:48 -06003834 unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
Josef Bacikd7dbe9e2012-04-23 14:00:51 -04003835 int ret = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003836
Filipe Manana656f30d2014-09-26 12:25:56 +01003837 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
David Sterba65ad0102018-06-29 10:56:49 +02003838 num_pages = num_extent_pages(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003839 atomic_set(&eb->io_pages, num_pages);
Josef Bacikde0022b2012-09-25 14:25:58 -04003840
Liu Bo851cd172016-09-23 13:44:44 -07003841 /* set btree blocks beyond nritems with 0 to avoid stale content. */
3842 nritems = btrfs_header_nritems(eb);
Liu Bo3eb548e2016-09-14 17:22:57 -07003843 if (btrfs_header_level(eb) > 0) {
Liu Bo3eb548e2016-09-14 17:22:57 -07003844 end = btrfs_node_key_ptr_offset(nritems);
3845
David Sterbab159fa22016-11-08 18:09:03 +01003846 memzero_extent_buffer(eb, end, eb->len - end);
Liu Bo851cd172016-09-23 13:44:44 -07003847 } else {
3848 /*
3849 * leaf:
3850 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
3851 */
3852 start = btrfs_item_nr_offset(nritems);
David Sterba8f881e82019-03-20 11:33:10 +01003853 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
David Sterbab159fa22016-11-08 18:09:03 +01003854 memzero_extent_buffer(eb, start, end - start);
Liu Bo3eb548e2016-09-14 17:22:57 -07003855 }
3856
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003857 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02003858 struct page *p = eb->pages[i];
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003859
3860 clear_page_dirty_for_io(p);
3861 set_page_writeback(p);
David Sterba4b81ba42017-06-06 19:14:26 +02003862 ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
David Sterba6273b7f2017-10-04 17:30:11 +02003863 p, offset, PAGE_SIZE, 0, bdev,
David Sterbac2df8bb2017-02-10 19:29:38 +01003864 &epd->bio,
Mike Christie1f7ad752016-06-05 14:31:51 -05003865 end_bio_extent_buffer_writepage,
Liu Bo18fdc672017-09-13 12:18:22 -06003866 0, 0, 0, false);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003867 if (ret) {
Filipe Manana656f30d2014-09-26 12:25:56 +01003868 set_btree_ioerr(p);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09003869 if (PageWriteback(p))
3870 end_page_writeback(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003871 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3872 end_extent_buffer_writeback(eb);
3873 ret = -EIO;
3874 break;
3875 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003876 offset += PAGE_SIZE;
David Sterba3d4b9492017-02-10 19:33:41 +01003877 update_nr_written(wbc, 1);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003878 unlock_page(p);
3879 }
3880
3881 if (unlikely(ret)) {
3882 for (; i < num_pages; i++) {
Chris Masonbbf65cf2014-10-04 09:56:45 -07003883 struct page *p = eb->pages[i];
Liu Bo81465022014-09-23 22:22:33 +08003884 clear_page_dirty_for_io(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003885 unlock_page(p);
3886 }
3887 }
3888
3889 return ret;
3890}
3891
3892int btree_write_cache_pages(struct address_space *mapping,
3893 struct writeback_control *wbc)
3894{
3895 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003896 struct extent_buffer *eb, *prev_eb = NULL;
3897 struct extent_page_data epd = {
3898 .bio = NULL,
3899 .tree = tree,
3900 .extent_locked = 0,
3901 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3902 };
3903 int ret = 0;
3904 int done = 0;
3905 int nr_to_write_done = 0;
3906 struct pagevec pvec;
3907 int nr_pages;
3908 pgoff_t index;
3909 pgoff_t end; /* Inclusive */
3910 int scanned = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -05003911 xa_mark_t tag;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003912
Mel Gorman86679822017-11-15 17:37:52 -08003913 pagevec_init(&pvec);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003914 if (wbc->range_cyclic) {
3915 index = mapping->writeback_index; /* Start from prev offset */
3916 end = -1;
3917 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003918 index = wbc->range_start >> PAGE_SHIFT;
3919 end = wbc->range_end >> PAGE_SHIFT;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003920 scanned = 1;
3921 }
3922 if (wbc->sync_mode == WB_SYNC_ALL)
3923 tag = PAGECACHE_TAG_TOWRITE;
3924 else
3925 tag = PAGECACHE_TAG_DIRTY;
3926retry:
3927 if (wbc->sync_mode == WB_SYNC_ALL)
3928 tag_pages_for_writeback(mapping, index, end);
3929 while (!done && !nr_to_write_done && (index <= end) &&
Jan Kara4006f432017-11-15 17:34:37 -08003930 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
Jan Kara67fd7072017-11-15 17:35:19 -08003931 tag))) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003932 unsigned i;
3933
3934 scanned = 1;
3935 for (i = 0; i < nr_pages; i++) {
3936 struct page *page = pvec.pages[i];
3937
3938 if (!PagePrivate(page))
3939 continue;
3940
Josef Bacikb5bae262012-09-14 13:43:01 -04003941 spin_lock(&mapping->private_lock);
3942 if (!PagePrivate(page)) {
3943 spin_unlock(&mapping->private_lock);
3944 continue;
3945 }
3946
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003947 eb = (struct extent_buffer *)page->private;
Josef Bacikb5bae262012-09-14 13:43:01 -04003948
3949 /*
3950 * Shouldn't happen and normally this would be a BUG_ON
3951 * but no sense in crashing the users box for something
3952 * we can survive anyway.
3953 */
Dulshani Gunawardhanafae7f212013-10-31 10:30:08 +05303954 if (WARN_ON(!eb)) {
Josef Bacikb5bae262012-09-14 13:43:01 -04003955 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003956 continue;
3957 }
3958
Josef Bacikb5bae262012-09-14 13:43:01 -04003959 if (eb == prev_eb) {
3960 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003961 continue;
3962 }
3963
Josef Bacikb5bae262012-09-14 13:43:01 -04003964 ret = atomic_inc_not_zero(&eb->refs);
3965 spin_unlock(&mapping->private_lock);
3966 if (!ret)
3967 continue;
3968
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003969 prev_eb = eb;
David Sterba9df76fb2019-03-20 11:21:41 +01003970 ret = lock_extent_buffer_for_io(eb, &epd);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003971 if (!ret) {
3972 free_extent_buffer(eb);
3973 continue;
3974 }
3975
David Sterba0ab02062019-03-20 11:27:57 +01003976 ret = write_one_eb(eb, wbc, &epd);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003977 if (ret) {
3978 done = 1;
3979 free_extent_buffer(eb);
3980 break;
3981 }
3982 free_extent_buffer(eb);
3983
3984 /*
3985 * the filesystem may choose to bump up nr_to_write.
3986 * We have to make sure to honor the new nr_to_write
3987 * at any time
3988 */
3989 nr_to_write_done = wbc->nr_to_write <= 0;
3990 }
3991 pagevec_release(&pvec);
3992 cond_resched();
3993 }
3994 if (!scanned && !done) {
3995 /*
3996 * We hit the last page and there is more work to be done: wrap
3997 * back to the start of the file
3998 */
3999 scanned = 1;
4000 index = 0;
4001 goto retry;
4002 }
Qu Wenruo2b952ee2019-03-20 14:27:43 +08004003 ASSERT(ret <= 0);
4004 if (ret < 0) {
4005 end_write_bio(&epd, ret);
4006 return ret;
4007 }
4008 ret = flush_write_bio(&epd);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004009 return ret;
4010}
4011
Chris Masond1310b22008-01-24 16:13:08 -05004012/**
Chris Mason4bef0842008-09-08 11:18:08 -04004013 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
Chris Masond1310b22008-01-24 16:13:08 -05004014 * @mapping: address space structure to write
4015 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
David Sterba935db852017-06-23 04:30:28 +02004016 * @data: data passed to __extent_writepage function
Chris Masond1310b22008-01-24 16:13:08 -05004017 *
4018 * If a page is already under I/O, write_cache_pages() skips it, even
4019 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
4020 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
4021 * and msync() need to guarantee that all the data which was dirty at the time
4022 * the call was made get new I/O started against them. If wbc->sync_mode is
4023 * WB_SYNC_ALL then we were called for data integrity and we must wait for
4024 * existing IO to complete.
4025 */
David Sterba4242b642017-02-10 19:38:24 +01004026static int extent_write_cache_pages(struct address_space *mapping,
Chris Mason4bef0842008-09-08 11:18:08 -04004027 struct writeback_control *wbc,
David Sterbaaab6e9e2017-11-30 18:00:02 +01004028 struct extent_page_data *epd)
Chris Masond1310b22008-01-24 16:13:08 -05004029{
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04004030 struct inode *inode = mapping->host;
Chris Masond1310b22008-01-24 16:13:08 -05004031 int ret = 0;
4032 int done = 0;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04004033 int nr_to_write_done = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004034 struct pagevec pvec;
4035 int nr_pages;
4036 pgoff_t index;
4037 pgoff_t end; /* Inclusive */
Liu Boa91326672016-03-07 16:56:21 -08004038 pgoff_t done_index;
4039 int range_whole = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004040 int scanned = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -05004041 xa_mark_t tag;
Chris Masond1310b22008-01-24 16:13:08 -05004042
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04004043 /*
4044 * We have to hold onto the inode so that ordered extents can do their
4045 * work when the IO finishes. The alternative to this is failing to add
4046 * an ordered extent if the igrab() fails there and that is a huge pain
4047 * to deal with, so instead just hold onto the inode throughout the
4048 * writepages operation. If it fails here we are freeing up the inode
4049 * anyway and we'd rather not waste our time writing out stuff that is
4050 * going to be truncated anyway.
4051 */
4052 if (!igrab(inode))
4053 return 0;
4054
Mel Gorman86679822017-11-15 17:37:52 -08004055 pagevec_init(&pvec);
Chris Masond1310b22008-01-24 16:13:08 -05004056 if (wbc->range_cyclic) {
4057 index = mapping->writeback_index; /* Start from prev offset */
4058 end = -1;
4059 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004060 index = wbc->range_start >> PAGE_SHIFT;
4061 end = wbc->range_end >> PAGE_SHIFT;
Liu Boa91326672016-03-07 16:56:21 -08004062 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4063 range_whole = 1;
Chris Masond1310b22008-01-24 16:13:08 -05004064 scanned = 1;
4065 }
Ethan Lien3cd24c62018-11-01 14:49:03 +08004066
4067 /*
4068 * We do the tagged writepage as long as the snapshot flush bit is set
4069 * and we are the first one who do the filemap_flush() on this inode.
4070 *
4071 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
4072 * not race in and drop the bit.
4073 */
4074 if (range_whole && wbc->nr_to_write == LONG_MAX &&
4075 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
4076 &BTRFS_I(inode)->runtime_flags))
4077 wbc->tagged_writepages = 1;
4078
4079 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
Josef Bacikf7aaa062011-07-15 21:26:38 +00004080 tag = PAGECACHE_TAG_TOWRITE;
4081 else
4082 tag = PAGECACHE_TAG_DIRTY;
Chris Masond1310b22008-01-24 16:13:08 -05004083retry:
Ethan Lien3cd24c62018-11-01 14:49:03 +08004084 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
Josef Bacikf7aaa062011-07-15 21:26:38 +00004085 tag_pages_for_writeback(mapping, index, end);
Liu Boa91326672016-03-07 16:56:21 -08004086 done_index = index;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04004087 while (!done && !nr_to_write_done && (index <= end) &&
Jan Kara67fd7072017-11-15 17:35:19 -08004088 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
4089 &index, end, tag))) {
Chris Masond1310b22008-01-24 16:13:08 -05004090 unsigned i;
4091
4092 scanned = 1;
4093 for (i = 0; i < nr_pages; i++) {
4094 struct page *page = pvec.pages[i];
4095
Liu Boa91326672016-03-07 16:56:21 -08004096 done_index = page->index;
Chris Masond1310b22008-01-24 16:13:08 -05004097 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07004098 * At this point we hold neither the i_pages lock nor
4099 * the page lock: the page may be truncated or
4100 * invalidated (changing page->mapping to NULL),
4101 * or even swizzled back from swapper_space to
4102 * tmpfs file mapping
Chris Masond1310b22008-01-24 16:13:08 -05004103 */
Josef Bacikc8f2f242013-02-11 11:33:00 -05004104 if (!trylock_page(page)) {
Qu Wenruof4340622019-03-20 14:27:41 +08004105 ret = flush_write_bio(epd);
4106 BUG_ON(ret < 0);
Josef Bacikc8f2f242013-02-11 11:33:00 -05004107 lock_page(page);
Chris Mason01d658f2011-11-01 10:08:06 -04004108 }
Chris Masond1310b22008-01-24 16:13:08 -05004109
4110 if (unlikely(page->mapping != mapping)) {
4111 unlock_page(page);
4112 continue;
4113 }
4114
Chris Masond2c3f4f2008-11-19 12:44:22 -05004115 if (wbc->sync_mode != WB_SYNC_NONE) {
Qu Wenruof4340622019-03-20 14:27:41 +08004116 if (PageWriteback(page)) {
4117 ret = flush_write_bio(epd);
4118 BUG_ON(ret < 0);
4119 }
Chris Masond1310b22008-01-24 16:13:08 -05004120 wait_on_page_writeback(page);
Chris Masond2c3f4f2008-11-19 12:44:22 -05004121 }
Chris Masond1310b22008-01-24 16:13:08 -05004122
4123 if (PageWriteback(page) ||
4124 !clear_page_dirty_for_io(page)) {
4125 unlock_page(page);
4126 continue;
4127 }
4128
David Sterbaaab6e9e2017-11-30 18:00:02 +01004129 ret = __extent_writepage(page, wbc, epd);
Liu Boa91326672016-03-07 16:56:21 -08004130 if (ret < 0) {
4131 /*
4132 * done_index is set past this page,
4133 * so media errors will not choke
4134 * background writeout for the entire
4135 * file. This has consequences for
4136 * range_cyclic semantics (ie. it may
4137 * not be suitable for data integrity
4138 * writeout).
4139 */
4140 done_index = page->index + 1;
4141 done = 1;
4142 break;
4143 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04004144
4145 /*
4146 * the filesystem may choose to bump up nr_to_write.
4147 * We have to make sure to honor the new nr_to_write
4148 * at any time
4149 */
4150 nr_to_write_done = wbc->nr_to_write <= 0;
Chris Masond1310b22008-01-24 16:13:08 -05004151 }
4152 pagevec_release(&pvec);
4153 cond_resched();
4154 }
Liu Bo894b36e2016-03-07 16:56:22 -08004155 if (!scanned && !done) {
Chris Masond1310b22008-01-24 16:13:08 -05004156 /*
4157 * We hit the last page and there is more work to be done: wrap
4158 * back to the start of the file
4159 */
4160 scanned = 1;
4161 index = 0;
4162 goto retry;
4163 }
Liu Boa91326672016-03-07 16:56:21 -08004164
4165 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
4166 mapping->writeback_index = done_index;
4167
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04004168 btrfs_add_delayed_iput(inode);
Liu Bo894b36e2016-03-07 16:56:22 -08004169 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05004170}
Chris Masond1310b22008-01-24 16:13:08 -05004171
Nikolay Borisov0a9b0e52017-12-08 15:55:59 +02004172int extent_write_full_page(struct page *page, struct writeback_control *wbc)
Chris Masond1310b22008-01-24 16:13:08 -05004173{
4174 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05004175 struct extent_page_data epd = {
4176 .bio = NULL,
Nikolay Borisov0a9b0e52017-12-08 15:55:59 +02004177 .tree = &BTRFS_I(page->mapping->host)->io_tree,
Chris Mason771ed682008-11-06 22:02:51 -05004178 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04004179 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05004180 };
Chris Masond1310b22008-01-24 16:13:08 -05004181
Chris Masond1310b22008-01-24 16:13:08 -05004182 ret = __extent_writepage(page, wbc, &epd);
Qu Wenruo30659762019-03-20 14:27:42 +08004183 ASSERT(ret <= 0);
4184 if (ret < 0) {
4185 end_write_bio(&epd, ret);
4186 return ret;
4187 }
Chris Masond1310b22008-01-24 16:13:08 -05004188
Qu Wenruo30659762019-03-20 14:27:42 +08004189 ret = flush_write_bio(&epd);
4190 ASSERT(ret <= 0);
Chris Masond1310b22008-01-24 16:13:08 -05004191 return ret;
4192}
Chris Masond1310b22008-01-24 16:13:08 -05004193
Nikolay Borisov5e3ee232017-12-08 15:55:58 +02004194int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
Chris Mason771ed682008-11-06 22:02:51 -05004195 int mode)
4196{
4197 int ret = 0;
4198 struct address_space *mapping = inode->i_mapping;
Nikolay Borisov5e3ee232017-12-08 15:55:58 +02004199 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
Chris Mason771ed682008-11-06 22:02:51 -05004200 struct page *page;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004201 unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4202 PAGE_SHIFT;
Chris Mason771ed682008-11-06 22:02:51 -05004203
4204 struct extent_page_data epd = {
4205 .bio = NULL,
4206 .tree = tree,
Chris Mason771ed682008-11-06 22:02:51 -05004207 .extent_locked = 1,
Chris Masonffbd5172009-04-20 15:50:09 -04004208 .sync_io = mode == WB_SYNC_ALL,
Chris Mason771ed682008-11-06 22:02:51 -05004209 };
4210 struct writeback_control wbc_writepages = {
Chris Mason771ed682008-11-06 22:02:51 -05004211 .sync_mode = mode,
Chris Mason771ed682008-11-06 22:02:51 -05004212 .nr_to_write = nr_pages * 2,
4213 .range_start = start,
4214 .range_end = end + 1,
4215 };
4216
Chris Masond3977122009-01-05 21:25:51 -05004217 while (start <= end) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004218 page = find_get_page(mapping, start >> PAGE_SHIFT);
Chris Mason771ed682008-11-06 22:02:51 -05004219 if (clear_page_dirty_for_io(page))
4220 ret = __extent_writepage(page, &wbc_writepages, &epd);
4221 else {
Nikolay Borisov7087a9d2018-11-01 14:09:48 +02004222 btrfs_writepage_endio_finish_ordered(page, start,
Nikolay Borisovc6297322018-11-08 10:18:08 +02004223 start + PAGE_SIZE - 1, 1);
Chris Mason771ed682008-11-06 22:02:51 -05004224 unlock_page(page);
4225 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004226 put_page(page);
4227 start += PAGE_SIZE;
Chris Mason771ed682008-11-06 22:02:51 -05004228 }
4229
Qu Wenruo02c6db42019-03-20 14:27:45 +08004230 ASSERT(ret <= 0);
4231 if (ret < 0) {
4232 end_write_bio(&epd, ret);
4233 return ret;
4234 }
4235 ret = flush_write_bio(&epd);
Chris Mason771ed682008-11-06 22:02:51 -05004236 return ret;
4237}
Chris Masond1310b22008-01-24 16:13:08 -05004238
Nikolay Borisov8ae225a2018-04-19 10:46:38 +03004239int extent_writepages(struct address_space *mapping,
Chris Masond1310b22008-01-24 16:13:08 -05004240 struct writeback_control *wbc)
4241{
4242 int ret = 0;
4243 struct extent_page_data epd = {
4244 .bio = NULL,
Nikolay Borisov8ae225a2018-04-19 10:46:38 +03004245 .tree = &BTRFS_I(mapping->host)->io_tree,
Chris Mason771ed682008-11-06 22:02:51 -05004246 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04004247 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05004248 };
4249
David Sterba935db852017-06-23 04:30:28 +02004250 ret = extent_write_cache_pages(mapping, wbc, &epd);
Qu Wenruoa2a72fb2019-03-20 14:27:48 +08004251 ASSERT(ret <= 0);
4252 if (ret < 0) {
4253 end_write_bio(&epd, ret);
4254 return ret;
4255 }
4256 ret = flush_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05004257 return ret;
4258}
Chris Masond1310b22008-01-24 16:13:08 -05004259
Nikolay Borisov2a3ff0a2018-04-19 10:46:36 +03004260int extent_readpages(struct address_space *mapping, struct list_head *pages,
4261 unsigned nr_pages)
Chris Masond1310b22008-01-24 16:13:08 -05004262{
4263 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04004264 unsigned long bio_flags = 0;
Liu Bo67c96842012-07-20 21:43:09 -06004265 struct page *pagepool[16];
Miao Xie125bac012013-07-25 19:22:37 +08004266 struct extent_map *em_cached = NULL;
Nikolay Borisov2a3ff0a2018-04-19 10:46:36 +03004267 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
Liu Bo67c96842012-07-20 21:43:09 -06004268 int nr = 0;
Filipe Manana808f80b2015-09-28 09:56:26 +01004269 u64 prev_em_start = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05004270
Nikolay Borisov61ed3a12018-11-29 18:41:31 +02004271 while (!list_empty(pages)) {
Nikolay Borisove65ef212019-03-11 09:55:38 +02004272 u64 contig_end = 0;
4273
Nikolay Borisov61ed3a12018-11-29 18:41:31 +02004274 for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) {
Nikolay Borisovf86196e2019-01-03 15:29:02 -08004275 struct page *page = lru_to_page(pages);
Chris Masond1310b22008-01-24 16:13:08 -05004276
Nikolay Borisov61ed3a12018-11-29 18:41:31 +02004277 prefetchw(&page->flags);
4278 list_del(&page->lru);
4279 if (add_to_page_cache_lru(page, mapping, page->index,
4280 readahead_gfp_mask(mapping))) {
4281 put_page(page);
Nikolay Borisove65ef212019-03-11 09:55:38 +02004282 break;
Nikolay Borisov61ed3a12018-11-29 18:41:31 +02004283 }
4284
4285 pagepool[nr++] = page;
Nikolay Borisove65ef212019-03-11 09:55:38 +02004286 contig_end = page_offset(page) + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05004287 }
Liu Bo67c96842012-07-20 21:43:09 -06004288
Nikolay Borisove65ef212019-03-11 09:55:38 +02004289 if (nr) {
4290 u64 contig_start = page_offset(pagepool[0]);
4291
4292 ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
4293
4294 contiguous_readpages(tree, pagepool, nr, contig_start,
4295 contig_end, &em_cached, &bio, &bio_flags,
4296 &prev_em_start);
4297 }
Chris Masond1310b22008-01-24 16:13:08 -05004298 }
Liu Bo67c96842012-07-20 21:43:09 -06004299
Miao Xie125bac012013-07-25 19:22:37 +08004300 if (em_cached)
4301 free_extent_map(em_cached);
4302
Chris Masond1310b22008-01-24 16:13:08 -05004303 if (bio)
Mike Christie1f7ad752016-06-05 14:31:51 -05004304 return submit_one_bio(bio, 0, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05004305 return 0;
4306}
Chris Masond1310b22008-01-24 16:13:08 -05004307
4308/*
4309 * basic invalidatepage code, this waits on any locked or writeback
4310 * ranges corresponding to the page, and then deletes any extent state
4311 * records from the tree
4312 */
4313int extent_invalidatepage(struct extent_io_tree *tree,
4314 struct page *page, unsigned long offset)
4315{
Josef Bacik2ac55d42010-02-03 19:33:23 +00004316 struct extent_state *cached_state = NULL;
Miao Xie4eee4fa2012-12-21 09:17:45 +00004317 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004318 u64 end = start + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05004319 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4320
Qu Wenruofda28322013-02-26 08:10:22 +00004321 start += ALIGN(offset, blocksize);
Chris Masond1310b22008-01-24 16:13:08 -05004322 if (start > end)
4323 return 0;
4324
David Sterbaff13db42015-12-03 14:30:40 +01004325 lock_extent_bits(tree, start, end, &cached_state);
Chris Mason1edbb732009-09-02 13:24:36 -04004326 wait_on_page_writeback(page);
Chris Masond1310b22008-01-24 16:13:08 -05004327 clear_extent_bit(tree, start, end,
Josef Bacik32c00af2009-10-08 13:34:05 -04004328 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4329 EXTENT_DO_ACCOUNTING,
David Sterbaae0f1622017-10-31 16:37:52 +01004330 1, 1, &cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05004331 return 0;
4332}
Chris Masond1310b22008-01-24 16:13:08 -05004333
4334/*
Chris Mason7b13b7b2008-04-18 10:29:50 -04004335 * a helper for releasepage, this tests for areas of the page that
4336 * are locked or under IO and drops the related state bits if it is safe
4337 * to drop the page.
4338 */
Nikolay Borisov29c68b2d2018-04-19 10:46:35 +03004339static int try_release_extent_state(struct extent_io_tree *tree,
Eric Sandeen48a3b632013-04-25 20:41:01 +00004340 struct page *page, gfp_t mask)
Chris Mason7b13b7b2008-04-18 10:29:50 -04004341{
Miao Xie4eee4fa2012-12-21 09:17:45 +00004342 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004343 u64 end = start + PAGE_SIZE - 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004344 int ret = 1;
4345
Nikolay Borisov88826792019-03-14 15:28:31 +02004346 if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
Chris Mason7b13b7b2008-04-18 10:29:50 -04004347 ret = 0;
Nikolay Borisov88826792019-03-14 15:28:31 +02004348 } else {
Chris Mason11ef1602009-09-23 20:28:46 -04004349 /*
4350 * at this point we can safely clear everything except the
4351 * locked bit and the nodatasum bit
4352 */
David Sterba66b0c882017-10-31 16:30:47 +01004353 ret = __clear_extent_bit(tree, start, end,
Chris Mason11ef1602009-09-23 20:28:46 -04004354 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
David Sterba66b0c882017-10-31 16:30:47 +01004355 0, 0, NULL, mask, NULL);
Chris Masone3f24cc2011-02-14 12:52:08 -05004356
4357 /* if clear_extent_bit failed for enomem reasons,
4358 * we can't allow the release to continue.
4359 */
4360 if (ret < 0)
4361 ret = 0;
4362 else
4363 ret = 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004364 }
4365 return ret;
4366}
Chris Mason7b13b7b2008-04-18 10:29:50 -04004367
4368/*
Chris Masond1310b22008-01-24 16:13:08 -05004369 * a helper for releasepage. As long as there are no locked extents
4370 * in the range corresponding to the page, both state records and extent
4371 * map records are removed
4372 */
Nikolay Borisov477a30b2018-04-19 10:46:34 +03004373int try_release_extent_mapping(struct page *page, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05004374{
4375 struct extent_map *em;
Miao Xie4eee4fa2012-12-21 09:17:45 +00004376 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004377 u64 end = start + PAGE_SIZE - 1;
Filipe Mananabd3599a2018-07-12 01:36:43 +01004378 struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
4379 struct extent_io_tree *tree = &btrfs_inode->io_tree;
4380 struct extent_map_tree *map = &btrfs_inode->extent_tree;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004381
Mel Gormand0164ad2015-11-06 16:28:21 -08004382 if (gfpflags_allow_blocking(mask) &&
Byongho Leeee221842015-12-15 01:42:10 +09004383 page->mapping->host->i_size > SZ_16M) {
Yan39b56372008-02-15 10:40:50 -05004384 u64 len;
Chris Mason70dec802008-01-29 09:59:12 -05004385 while (start <= end) {
Yan39b56372008-02-15 10:40:50 -05004386 len = end - start + 1;
Chris Mason890871b2009-09-02 16:24:52 -04004387 write_lock(&map->lock);
Yan39b56372008-02-15 10:40:50 -05004388 em = lookup_extent_mapping(map, start, len);
Tsutomu Itoh285190d2012-02-16 16:23:58 +09004389 if (!em) {
Chris Mason890871b2009-09-02 16:24:52 -04004390 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05004391 break;
4392 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04004393 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4394 em->start != start) {
Chris Mason890871b2009-09-02 16:24:52 -04004395 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05004396 free_extent_map(em);
4397 break;
4398 }
4399 if (!test_range_bit(tree, em->start,
4400 extent_map_end(em) - 1,
Nikolay Borisov4e586ca2019-03-14 15:28:30 +02004401 EXTENT_LOCKED, 0, NULL)) {
Filipe Mananabd3599a2018-07-12 01:36:43 +01004402 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4403 &btrfs_inode->runtime_flags);
Chris Mason70dec802008-01-29 09:59:12 -05004404 remove_extent_mapping(map, em);
4405 /* once for the rb tree */
4406 free_extent_map(em);
4407 }
4408 start = extent_map_end(em);
Chris Mason890871b2009-09-02 16:24:52 -04004409 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05004410
4411 /* once for us */
Chris Masond1310b22008-01-24 16:13:08 -05004412 free_extent_map(em);
4413 }
Chris Masond1310b22008-01-24 16:13:08 -05004414 }
Nikolay Borisov29c68b2d2018-04-19 10:46:35 +03004415 return try_release_extent_state(tree, page, mask);
Chris Masond1310b22008-01-24 16:13:08 -05004416}
Chris Masond1310b22008-01-24 16:13:08 -05004417
Chris Masonec29ed52011-02-23 16:23:20 -05004418/*
4419 * helper function for fiemap, which doesn't want to see any holes.
4420 * This maps until we find something past 'last'
4421 */
4422static struct extent_map *get_extent_skip_holes(struct inode *inode,
David Sterbae3350e12017-06-23 04:09:57 +02004423 u64 offset, u64 last)
Chris Masonec29ed52011-02-23 16:23:20 -05004424{
Jeff Mahoneyda170662016-06-15 09:22:56 -04004425 u64 sectorsize = btrfs_inode_sectorsize(inode);
Chris Masonec29ed52011-02-23 16:23:20 -05004426 struct extent_map *em;
4427 u64 len;
4428
4429 if (offset >= last)
4430 return NULL;
4431
Dulshani Gunawardhana67871252013-10-31 10:33:04 +05304432 while (1) {
Chris Masonec29ed52011-02-23 16:23:20 -05004433 len = last - offset;
4434 if (len == 0)
4435 break;
Qu Wenruofda28322013-02-26 08:10:22 +00004436 len = ALIGN(len, sectorsize);
Nikolay Borisov4ab47a82018-12-12 09:42:32 +02004437 em = btrfs_get_extent_fiemap(BTRFS_I(inode), offset, len);
David Sterbac7040052011-04-19 18:00:01 +02004438 if (IS_ERR_OR_NULL(em))
Chris Masonec29ed52011-02-23 16:23:20 -05004439 return em;
4440
4441 /* if this isn't a hole return it */
Nikolay Borisov4a2d25c2017-11-23 10:51:43 +02004442 if (em->block_start != EXTENT_MAP_HOLE)
Chris Masonec29ed52011-02-23 16:23:20 -05004443 return em;
Chris Masonec29ed52011-02-23 16:23:20 -05004444
4445 /* this is a hole, advance to the next extent */
4446 offset = extent_map_end(em);
4447 free_extent_map(em);
4448 if (offset >= last)
4449 break;
4450 }
4451 return NULL;
4452}
4453
Qu Wenruo47518322017-04-07 10:43:15 +08004454/*
4455 * To cache previous fiemap extent
4456 *
4457 * Will be used for merging fiemap extent
4458 */
4459struct fiemap_cache {
4460 u64 offset;
4461 u64 phys;
4462 u64 len;
4463 u32 flags;
4464 bool cached;
4465};
4466
4467/*
4468 * Helper to submit fiemap extent.
4469 *
4470 * Will try to merge current fiemap extent specified by @offset, @phys,
4471 * @len and @flags with cached one.
4472 * And only when we fails to merge, cached one will be submitted as
4473 * fiemap extent.
4474 *
4475 * Return value is the same as fiemap_fill_next_extent().
4476 */
4477static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
4478 struct fiemap_cache *cache,
4479 u64 offset, u64 phys, u64 len, u32 flags)
4480{
4481 int ret = 0;
4482
4483 if (!cache->cached)
4484 goto assign;
4485
4486 /*
4487 * Sanity check, extent_fiemap() should have ensured that new
Andrea Gelmini52042d82018-11-28 12:05:13 +01004488 * fiemap extent won't overlap with cached one.
Qu Wenruo47518322017-04-07 10:43:15 +08004489 * Not recoverable.
4490 *
4491 * NOTE: Physical address can overlap, due to compression
4492 */
4493 if (cache->offset + cache->len > offset) {
4494 WARN_ON(1);
4495 return -EINVAL;
4496 }
4497
4498 /*
4499 * Only merges fiemap extents if
4500 * 1) Their logical addresses are continuous
4501 *
4502 * 2) Their physical addresses are continuous
4503 * So truly compressed (physical size smaller than logical size)
4504 * extents won't get merged with each other
4505 *
4506 * 3) Share same flags except FIEMAP_EXTENT_LAST
4507 * So regular extent won't get merged with prealloc extent
4508 */
4509 if (cache->offset + cache->len == offset &&
4510 cache->phys + cache->len == phys &&
4511 (cache->flags & ~FIEMAP_EXTENT_LAST) ==
4512 (flags & ~FIEMAP_EXTENT_LAST)) {
4513 cache->len += len;
4514 cache->flags |= flags;
4515 goto try_submit_last;
4516 }
4517
4518 /* Not mergeable, need to submit cached one */
4519 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4520 cache->len, cache->flags);
4521 cache->cached = false;
4522 if (ret)
4523 return ret;
4524assign:
4525 cache->cached = true;
4526 cache->offset = offset;
4527 cache->phys = phys;
4528 cache->len = len;
4529 cache->flags = flags;
4530try_submit_last:
4531 if (cache->flags & FIEMAP_EXTENT_LAST) {
4532 ret = fiemap_fill_next_extent(fieinfo, cache->offset,
4533 cache->phys, cache->len, cache->flags);
4534 cache->cached = false;
4535 }
4536 return ret;
4537}
4538
4539/*
Qu Wenruo848c23b2017-06-22 10:01:21 +08004540 * Emit last fiemap cache
Qu Wenruo47518322017-04-07 10:43:15 +08004541 *
Qu Wenruo848c23b2017-06-22 10:01:21 +08004542 * The last fiemap cache may still be cached in the following case:
4543 * 0 4k 8k
4544 * |<- Fiemap range ->|
4545 * |<------------ First extent ----------->|
4546 *
4547 * In this case, the first extent range will be cached but not emitted.
4548 * So we must emit it before ending extent_fiemap().
Qu Wenruo47518322017-04-07 10:43:15 +08004549 */
David Sterba5c5aff92019-03-20 11:29:46 +01004550static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
Qu Wenruo848c23b2017-06-22 10:01:21 +08004551 struct fiemap_cache *cache)
Qu Wenruo47518322017-04-07 10:43:15 +08004552{
4553 int ret;
4554
4555 if (!cache->cached)
4556 return 0;
4557
Qu Wenruo47518322017-04-07 10:43:15 +08004558 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4559 cache->len, cache->flags);
4560 cache->cached = false;
4561 if (ret > 0)
4562 ret = 0;
4563 return ret;
4564}
4565
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004566int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
David Sterba2135fb92017-06-23 04:09:57 +02004567 __u64 start, __u64 len)
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004568{
Josef Bacik975f84f2010-11-23 19:36:57 +00004569 int ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004570 u64 off = start;
4571 u64 max = start + len;
4572 u32 flags = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00004573 u32 found_type;
4574 u64 last;
Chris Masonec29ed52011-02-23 16:23:20 -05004575 u64 last_for_get_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004576 u64 disko = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05004577 u64 isize = i_size_read(inode);
Josef Bacik975f84f2010-11-23 19:36:57 +00004578 struct btrfs_key found_key;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004579 struct extent_map *em = NULL;
Josef Bacik2ac55d42010-02-03 19:33:23 +00004580 struct extent_state *cached_state = NULL;
Josef Bacik975f84f2010-11-23 19:36:57 +00004581 struct btrfs_path *path;
Josef Bacikdc046b12014-09-10 16:20:45 -04004582 struct btrfs_root *root = BTRFS_I(inode)->root;
Qu Wenruo47518322017-04-07 10:43:15 +08004583 struct fiemap_cache cache = { 0 };
David Sterba5911c8f2019-05-15 15:31:04 +02004584 struct ulist *roots;
4585 struct ulist *tmp_ulist;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004586 int end = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05004587 u64 em_start = 0;
4588 u64 em_len = 0;
4589 u64 em_end = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004590
4591 if (len == 0)
4592 return -EINVAL;
4593
Josef Bacik975f84f2010-11-23 19:36:57 +00004594 path = btrfs_alloc_path();
4595 if (!path)
4596 return -ENOMEM;
4597 path->leave_spinning = 1;
4598
David Sterba5911c8f2019-05-15 15:31:04 +02004599 roots = ulist_alloc(GFP_KERNEL);
4600 tmp_ulist = ulist_alloc(GFP_KERNEL);
4601 if (!roots || !tmp_ulist) {
4602 ret = -ENOMEM;
4603 goto out_free_ulist;
4604 }
4605
Jeff Mahoneyda170662016-06-15 09:22:56 -04004606 start = round_down(start, btrfs_inode_sectorsize(inode));
4607 len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
Josef Bacik4d479cf2011-11-17 11:34:31 -05004608
Chris Masonec29ed52011-02-23 16:23:20 -05004609 /*
4610 * lookup the last file extent. We're not using i_size here
4611 * because there might be preallocation past i_size
4612 */
David Sterbaf85b7372017-01-20 14:54:07 +01004613 ret = btrfs_lookup_file_extent(NULL, root, path,
4614 btrfs_ino(BTRFS_I(inode)), -1, 0);
Josef Bacik975f84f2010-11-23 19:36:57 +00004615 if (ret < 0) {
4616 btrfs_free_path(path);
David Sterba5911c8f2019-05-15 15:31:04 +02004617 goto out_free_ulist;
Liu Bo2d324f52016-05-17 17:21:48 -07004618 } else {
4619 WARN_ON(!ret);
4620 if (ret == 1)
4621 ret = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00004622 }
Liu Bo2d324f52016-05-17 17:21:48 -07004623
Josef Bacik975f84f2010-11-23 19:36:57 +00004624 path->slots[0]--;
Josef Bacik975f84f2010-11-23 19:36:57 +00004625 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
David Sterba962a2982014-06-04 18:41:45 +02004626 found_type = found_key.type;
Josef Bacik975f84f2010-11-23 19:36:57 +00004627
Chris Masonec29ed52011-02-23 16:23:20 -05004628 /* No extents, but there might be delalloc bits */
Nikolay Borisov4a0cc7c2017-01-10 20:35:31 +02004629 if (found_key.objectid != btrfs_ino(BTRFS_I(inode)) ||
Josef Bacik975f84f2010-11-23 19:36:57 +00004630 found_type != BTRFS_EXTENT_DATA_KEY) {
Chris Masonec29ed52011-02-23 16:23:20 -05004631 /* have to trust i_size as the end */
4632 last = (u64)-1;
4633 last_for_get_extent = isize;
4634 } else {
4635 /*
4636 * remember the start of the last extent. There are a
4637 * bunch of different factors that go into the length of the
4638 * extent, so its much less complex to remember where it started
4639 */
4640 last = found_key.offset;
4641 last_for_get_extent = last + 1;
Josef Bacik975f84f2010-11-23 19:36:57 +00004642 }
Liu Bofe09e162013-09-22 12:54:23 +08004643 btrfs_release_path(path);
Josef Bacik975f84f2010-11-23 19:36:57 +00004644
Chris Masonec29ed52011-02-23 16:23:20 -05004645 /*
4646 * we might have some extents allocated but more delalloc past those
4647 * extents. so, we trust isize unless the start of the last extent is
4648 * beyond isize
4649 */
4650 if (last < isize) {
4651 last = (u64)-1;
4652 last_for_get_extent = isize;
4653 }
4654
David Sterbaff13db42015-12-03 14:30:40 +01004655 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01004656 &cached_state);
Chris Masonec29ed52011-02-23 16:23:20 -05004657
David Sterbae3350e12017-06-23 04:09:57 +02004658 em = get_extent_skip_holes(inode, start, last_for_get_extent);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004659 if (!em)
4660 goto out;
4661 if (IS_ERR(em)) {
4662 ret = PTR_ERR(em);
4663 goto out;
4664 }
Josef Bacik975f84f2010-11-23 19:36:57 +00004665
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004666 while (!end) {
Josef Bacikb76bb702013-07-05 13:52:51 -04004667 u64 offset_in_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004668
Chris Masonea8efc72011-03-08 11:54:40 -05004669 /* break if the extent we found is outside the range */
4670 if (em->start >= max || extent_map_end(em) < off)
4671 break;
4672
4673 /*
4674 * get_extent may return an extent that starts before our
4675 * requested range. We have to make sure the ranges
4676 * we return to fiemap always move forward and don't
4677 * overlap, so adjust the offsets here
4678 */
4679 em_start = max(em->start, off);
4680
4681 /*
4682 * record the offset from the start of the extent
Josef Bacikb76bb702013-07-05 13:52:51 -04004683 * for adjusting the disk offset below. Only do this if the
4684 * extent isn't compressed since our in ram offset may be past
4685 * what we have actually allocated on disk.
Chris Masonea8efc72011-03-08 11:54:40 -05004686 */
Josef Bacikb76bb702013-07-05 13:52:51 -04004687 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4688 offset_in_extent = em_start - em->start;
Chris Masonec29ed52011-02-23 16:23:20 -05004689 em_end = extent_map_end(em);
Chris Masonea8efc72011-03-08 11:54:40 -05004690 em_len = em_end - em_start;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004691 flags = 0;
Filipe Mananaf0986312018-06-20 10:02:30 +01004692 if (em->block_start < EXTENT_MAP_LAST_BYTE)
4693 disko = em->block_start + offset_in_extent;
4694 else
4695 disko = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004696
Chris Masonea8efc72011-03-08 11:54:40 -05004697 /*
4698 * bump off for our next call to get_extent
4699 */
4700 off = extent_map_end(em);
4701 if (off >= max)
4702 end = 1;
4703
Heiko Carstens93dbfad2009-04-03 10:33:45 -04004704 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004705 end = 1;
4706 flags |= FIEMAP_EXTENT_LAST;
Heiko Carstens93dbfad2009-04-03 10:33:45 -04004707 } else if (em->block_start == EXTENT_MAP_INLINE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004708 flags |= (FIEMAP_EXTENT_DATA_INLINE |
4709 FIEMAP_EXTENT_NOT_ALIGNED);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04004710 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004711 flags |= (FIEMAP_EXTENT_DELALLOC |
4712 FIEMAP_EXTENT_UNKNOWN);
Josef Bacikdc046b12014-09-10 16:20:45 -04004713 } else if (fieinfo->fi_extents_max) {
4714 u64 bytenr = em->block_start -
4715 (em->start - em->orig_start);
Liu Bofe09e162013-09-22 12:54:23 +08004716
Liu Bofe09e162013-09-22 12:54:23 +08004717 /*
4718 * As btrfs supports shared space, this information
4719 * can be exported to userspace tools via
Josef Bacikdc046b12014-09-10 16:20:45 -04004720 * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0
4721 * then we're just getting a count and we can skip the
4722 * lookup stuff.
Liu Bofe09e162013-09-22 12:54:23 +08004723 */
Edmund Nadolskibb739cf2017-06-28 21:56:58 -06004724 ret = btrfs_check_shared(root,
4725 btrfs_ino(BTRFS_I(inode)),
David Sterba5911c8f2019-05-15 15:31:04 +02004726 bytenr, roots, tmp_ulist);
Josef Bacikdc046b12014-09-10 16:20:45 -04004727 if (ret < 0)
Liu Bofe09e162013-09-22 12:54:23 +08004728 goto out_free;
Josef Bacikdc046b12014-09-10 16:20:45 -04004729 if (ret)
Liu Bofe09e162013-09-22 12:54:23 +08004730 flags |= FIEMAP_EXTENT_SHARED;
Josef Bacikdc046b12014-09-10 16:20:45 -04004731 ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004732 }
4733 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4734 flags |= FIEMAP_EXTENT_ENCODED;
Josef Bacik0d2b2372015-05-19 10:44:04 -04004735 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4736 flags |= FIEMAP_EXTENT_UNWRITTEN;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004737
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004738 free_extent_map(em);
4739 em = NULL;
Chris Masonec29ed52011-02-23 16:23:20 -05004740 if ((em_start >= last) || em_len == (u64)-1 ||
4741 (last == (u64)-1 && isize <= em_end)) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004742 flags |= FIEMAP_EXTENT_LAST;
4743 end = 1;
4744 }
4745
Chris Masonec29ed52011-02-23 16:23:20 -05004746 /* now scan forward to see if this is really the last extent. */
David Sterbae3350e12017-06-23 04:09:57 +02004747 em = get_extent_skip_holes(inode, off, last_for_get_extent);
Chris Masonec29ed52011-02-23 16:23:20 -05004748 if (IS_ERR(em)) {
4749 ret = PTR_ERR(em);
4750 goto out;
4751 }
4752 if (!em) {
Josef Bacik975f84f2010-11-23 19:36:57 +00004753 flags |= FIEMAP_EXTENT_LAST;
4754 end = 1;
4755 }
Qu Wenruo47518322017-04-07 10:43:15 +08004756 ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
4757 em_len, flags);
Chengyu Song26e726a2015-03-24 18:12:56 -04004758 if (ret) {
4759 if (ret == 1)
4760 ret = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05004761 goto out_free;
Chengyu Song26e726a2015-03-24 18:12:56 -04004762 }
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004763 }
4764out_free:
Qu Wenruo47518322017-04-07 10:43:15 +08004765 if (!ret)
David Sterba5c5aff92019-03-20 11:29:46 +01004766 ret = emit_last_fiemap_cache(fieinfo, &cache);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004767 free_extent_map(em);
4768out:
Liu Bofe09e162013-09-22 12:54:23 +08004769 btrfs_free_path(path);
Liu Boa52f4cd2013-05-01 16:23:41 +00004770 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
David Sterbae43bbe52017-12-12 21:43:52 +01004771 &cached_state);
David Sterba5911c8f2019-05-15 15:31:04 +02004772
4773out_free_ulist:
4774 ulist_free(roots);
4775 ulist_free(tmp_ulist);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004776 return ret;
4777}
4778
Chris Mason727011e2010-08-06 13:21:20 -04004779static void __free_extent_buffer(struct extent_buffer *eb)
4780{
Eric Sandeen6d49ba12013-04-22 16:12:31 +00004781 btrfs_leak_debug_del(&eb->leak_list);
Chris Mason727011e2010-08-06 13:21:20 -04004782 kmem_cache_free(extent_buffer_cache, eb);
4783}
4784
Josef Bacika26e8c92014-03-28 17:07:27 -04004785int extent_buffer_under_io(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004786{
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004787 return (atomic_read(&eb->io_pages) ||
4788 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4789 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
Chris Masond1310b22008-01-24 16:13:08 -05004790}
4791
Miao Xie897ca6e92010-10-26 20:57:29 -04004792/*
David Sterba55ac0132018-07-19 17:24:32 +02004793 * Release all pages attached to the extent buffer.
Miao Xie897ca6e92010-10-26 20:57:29 -04004794 */
David Sterba55ac0132018-07-19 17:24:32 +02004795static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
Miao Xie897ca6e92010-10-26 20:57:29 -04004796{
Nikolay Borisovd64766f2018-06-27 16:38:22 +03004797 int i;
4798 int num_pages;
Nikolay Borisovb0132a32018-06-27 16:38:24 +03004799 int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
Miao Xie897ca6e92010-10-26 20:57:29 -04004800
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004801 BUG_ON(extent_buffer_under_io(eb));
Miao Xie897ca6e92010-10-26 20:57:29 -04004802
Nikolay Borisovd64766f2018-06-27 16:38:22 +03004803 num_pages = num_extent_pages(eb);
4804 for (i = 0; i < num_pages; i++) {
4805 struct page *page = eb->pages[i];
Miao Xie897ca6e92010-10-26 20:57:29 -04004806
Forrest Liu5d2361d2015-02-09 17:31:45 +08004807 if (!page)
4808 continue;
4809 if (mapped)
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004810 spin_lock(&page->mapping->private_lock);
Forrest Liu5d2361d2015-02-09 17:31:45 +08004811 /*
4812 * We do this since we'll remove the pages after we've
4813 * removed the eb from the radix tree, so we could race
4814 * and have this page now attached to the new eb. So
4815 * only clear page_private if it's still connected to
4816 * this eb.
4817 */
4818 if (PagePrivate(page) &&
4819 page->private == (unsigned long)eb) {
4820 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4821 BUG_ON(PageDirty(page));
4822 BUG_ON(PageWriteback(page));
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004823 /*
Forrest Liu5d2361d2015-02-09 17:31:45 +08004824 * We need to make sure we haven't be attached
4825 * to a new eb.
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004826 */
Forrest Liu5d2361d2015-02-09 17:31:45 +08004827 ClearPagePrivate(page);
4828 set_page_private(page, 0);
4829 /* One for the page private */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004830 put_page(page);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004831 }
Forrest Liu5d2361d2015-02-09 17:31:45 +08004832
4833 if (mapped)
4834 spin_unlock(&page->mapping->private_lock);
4835
Nicholas D Steeves01327612016-05-19 21:18:45 -04004836 /* One for when we allocated the page */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004837 put_page(page);
Nikolay Borisovd64766f2018-06-27 16:38:22 +03004838 }
Miao Xie897ca6e92010-10-26 20:57:29 -04004839}
4840
4841/*
4842 * Helper for releasing the extent buffer.
4843 */
4844static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4845{
David Sterba55ac0132018-07-19 17:24:32 +02004846 btrfs_release_extent_buffer_pages(eb);
Miao Xie897ca6e92010-10-26 20:57:29 -04004847 __free_extent_buffer(eb);
4848}
4849
Josef Bacikf28491e2013-12-16 13:24:27 -05004850static struct extent_buffer *
4851__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
David Sterba23d79d82014-06-15 02:55:29 +02004852 unsigned long len)
Josef Bacikdb7f3432013-08-07 14:54:37 -04004853{
4854 struct extent_buffer *eb = NULL;
4855
Michal Hockod1b5c562015-08-19 14:17:40 +02004856 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004857 eb->start = start;
4858 eb->len = len;
Josef Bacikf28491e2013-12-16 13:24:27 -05004859 eb->fs_info = fs_info;
Josef Bacikdb7f3432013-08-07 14:54:37 -04004860 eb->bflags = 0;
4861 rwlock_init(&eb->lock);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004862 atomic_set(&eb->blocking_readers, 0);
David Sterba06297d82019-05-02 16:47:23 +02004863 eb->blocking_writers = 0;
David Sterbaed1b4ed2018-08-24 16:31:17 +02004864 eb->lock_nested = false;
Josef Bacikdb7f3432013-08-07 14:54:37 -04004865 init_waitqueue_head(&eb->write_lock_wq);
4866 init_waitqueue_head(&eb->read_lock_wq);
4867
4868 btrfs_leak_debug_add(&eb->leak_list, &buffers);
4869
4870 spin_lock_init(&eb->refs_lock);
4871 atomic_set(&eb->refs, 1);
4872 atomic_set(&eb->io_pages, 0);
4873
4874 /*
4875 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4876 */
4877 BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4878 > MAX_INLINE_EXTENT_BUFFER_SIZE);
4879 BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4880
David Sterba843ccf92018-08-24 14:56:28 +02004881#ifdef CONFIG_BTRFS_DEBUG
David Sterbaf3dc24c2019-05-02 16:51:53 +02004882 eb->spinning_writers = 0;
David Sterbaafd495a2018-08-24 15:57:38 +02004883 atomic_set(&eb->spinning_readers, 0);
David Sterba5c9c7992018-08-24 16:15:51 +02004884 atomic_set(&eb->read_locks, 0);
David Sterba00801ae2019-05-02 16:53:47 +02004885 eb->write_locks = 0;
David Sterba843ccf92018-08-24 14:56:28 +02004886#endif
4887
Josef Bacikdb7f3432013-08-07 14:54:37 -04004888 return eb;
4889}
4890
4891struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4892{
David Sterbacc5e31a2018-03-01 18:20:27 +01004893 int i;
Josef Bacikdb7f3432013-08-07 14:54:37 -04004894 struct page *p;
4895 struct extent_buffer *new;
David Sterbacc5e31a2018-03-01 18:20:27 +01004896 int num_pages = num_extent_pages(src);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004897
David Sterba3f556f72014-06-15 03:20:26 +02004898 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004899 if (new == NULL)
4900 return NULL;
4901
4902 for (i = 0; i < num_pages; i++) {
Josef Bacik9ec72672013-08-07 16:57:23 -04004903 p = alloc_page(GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004904 if (!p) {
4905 btrfs_release_extent_buffer(new);
4906 return NULL;
4907 }
4908 attach_extent_buffer_page(new, p);
4909 WARN_ON(PageDirty(p));
4910 SetPageUptodate(p);
4911 new->pages[i] = p;
David Sterbafba1acf2016-11-08 17:56:24 +01004912 copy_page(page_address(p), page_address(src->pages[i]));
Josef Bacikdb7f3432013-08-07 14:54:37 -04004913 }
4914
Josef Bacikdb7f3432013-08-07 14:54:37 -04004915 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
Nikolay Borisovb0132a32018-06-27 16:38:24 +03004916 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004917
4918 return new;
4919}
4920
Omar Sandoval0f331222015-09-29 20:50:31 -07004921struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4922 u64 start, unsigned long len)
Josef Bacikdb7f3432013-08-07 14:54:37 -04004923{
4924 struct extent_buffer *eb;
David Sterbacc5e31a2018-03-01 18:20:27 +01004925 int num_pages;
4926 int i;
Josef Bacikdb7f3432013-08-07 14:54:37 -04004927
David Sterba3f556f72014-06-15 03:20:26 +02004928 eb = __alloc_extent_buffer(fs_info, start, len);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004929 if (!eb)
4930 return NULL;
4931
David Sterba65ad0102018-06-29 10:56:49 +02004932 num_pages = num_extent_pages(eb);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004933 for (i = 0; i < num_pages; i++) {
Josef Bacik9ec72672013-08-07 16:57:23 -04004934 eb->pages[i] = alloc_page(GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004935 if (!eb->pages[i])
4936 goto err;
4937 }
4938 set_extent_buffer_uptodate(eb);
4939 btrfs_set_header_nritems(eb, 0);
Nikolay Borisovb0132a32018-06-27 16:38:24 +03004940 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004941
4942 return eb;
4943err:
4944 for (; i > 0; i--)
4945 __free_page(eb->pages[i - 1]);
4946 __free_extent_buffer(eb);
4947 return NULL;
4948}
4949
Omar Sandoval0f331222015-09-29 20:50:31 -07004950struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -04004951 u64 start)
Omar Sandoval0f331222015-09-29 20:50:31 -07004952{
Jeff Mahoneyda170662016-06-15 09:22:56 -04004953 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
Omar Sandoval0f331222015-09-29 20:50:31 -07004954}
4955
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004956static void check_buffer_tree_ref(struct extent_buffer *eb)
4957{
Chris Mason242e18c2013-01-29 17:49:37 -05004958 int refs;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004959 /* the ref bit is tricky. We have to make sure it is set
4960 * if we have the buffer dirty. Otherwise the
4961 * code to free a buffer can end up dropping a dirty
4962 * page
4963 *
4964 * Once the ref bit is set, it won't go away while the
4965 * buffer is dirty or in writeback, and it also won't
4966 * go away while we have the reference count on the
4967 * eb bumped.
4968 *
4969 * We can't just set the ref bit without bumping the
4970 * ref on the eb because free_extent_buffer might
4971 * see the ref bit and try to clear it. If this happens
4972 * free_extent_buffer might end up dropping our original
4973 * ref by mistake and freeing the page before we are able
4974 * to add one more ref.
4975 *
4976 * So bump the ref count first, then set the bit. If someone
4977 * beat us to it, drop the ref we added.
4978 */
Chris Mason242e18c2013-01-29 17:49:37 -05004979 refs = atomic_read(&eb->refs);
4980 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4981 return;
4982
Josef Bacik594831c2012-07-20 16:11:08 -04004983 spin_lock(&eb->refs_lock);
4984 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004985 atomic_inc(&eb->refs);
Josef Bacik594831c2012-07-20 16:11:08 -04004986 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004987}
4988
Mel Gorman2457aec2014-06-04 16:10:31 -07004989static void mark_extent_buffer_accessed(struct extent_buffer *eb,
4990 struct page *accessed)
Josef Bacik5df42352012-03-15 18:24:42 -04004991{
David Sterbacc5e31a2018-03-01 18:20:27 +01004992 int num_pages, i;
Josef Bacik5df42352012-03-15 18:24:42 -04004993
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004994 check_buffer_tree_ref(eb);
4995
David Sterba65ad0102018-06-29 10:56:49 +02004996 num_pages = num_extent_pages(eb);
Josef Bacik5df42352012-03-15 18:24:42 -04004997 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02004998 struct page *p = eb->pages[i];
4999
Mel Gorman2457aec2014-06-04 16:10:31 -07005000 if (p != accessed)
5001 mark_page_accessed(p);
Josef Bacik5df42352012-03-15 18:24:42 -04005002 }
5003}
5004
Josef Bacikf28491e2013-12-16 13:24:27 -05005005struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
5006 u64 start)
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005007{
5008 struct extent_buffer *eb;
5009
5010 rcu_read_lock();
Josef Bacikf28491e2013-12-16 13:24:27 -05005011 eb = radix_tree_lookup(&fs_info->buffer_radix,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005012 start >> PAGE_SHIFT);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005013 if (eb && atomic_inc_not_zero(&eb->refs)) {
5014 rcu_read_unlock();
Filipe Manana062c19e2015-04-23 11:28:48 +01005015 /*
5016 * Lock our eb's refs_lock to avoid races with
5017 * free_extent_buffer. When we get our eb it might be flagged
5018 * with EXTENT_BUFFER_STALE and another task running
5019 * free_extent_buffer might have seen that flag set,
5020 * eb->refs == 2, that the buffer isn't under IO (dirty and
5021 * writeback flags not set) and it's still in the tree (flag
5022 * EXTENT_BUFFER_TREE_REF set), therefore being in the process
5023 * of decrementing the extent buffer's reference count twice.
5024 * So here we could race and increment the eb's reference count,
5025 * clear its stale flag, mark it as dirty and drop our reference
5026 * before the other task finishes executing free_extent_buffer,
5027 * which would later result in an attempt to free an extent
5028 * buffer that is dirty.
5029 */
5030 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
5031 spin_lock(&eb->refs_lock);
5032 spin_unlock(&eb->refs_lock);
5033 }
Mel Gorman2457aec2014-06-04 16:10:31 -07005034 mark_extent_buffer_accessed(eb, NULL);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005035 return eb;
5036 }
5037 rcu_read_unlock();
5038
5039 return NULL;
5040}
5041
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005042#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5043struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -04005044 u64 start)
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005045{
5046 struct extent_buffer *eb, *exists = NULL;
5047 int ret;
5048
5049 eb = find_extent_buffer(fs_info, start);
5050 if (eb)
5051 return eb;
Jeff Mahoneyda170662016-06-15 09:22:56 -04005052 eb = alloc_dummy_extent_buffer(fs_info, start);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005053 if (!eb)
5054 return NULL;
5055 eb->fs_info = fs_info;
5056again:
David Sterbae1860a72016-05-09 14:11:38 +02005057 ret = radix_tree_preload(GFP_NOFS);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005058 if (ret)
5059 goto free_eb;
5060 spin_lock(&fs_info->buffer_lock);
5061 ret = radix_tree_insert(&fs_info->buffer_radix,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005062 start >> PAGE_SHIFT, eb);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005063 spin_unlock(&fs_info->buffer_lock);
5064 radix_tree_preload_end();
5065 if (ret == -EEXIST) {
5066 exists = find_extent_buffer(fs_info, start);
5067 if (exists)
5068 goto free_eb;
5069 else
5070 goto again;
5071 }
5072 check_buffer_tree_ref(eb);
5073 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5074
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005075 return eb;
5076free_eb:
5077 btrfs_release_extent_buffer(eb);
5078 return exists;
5079}
5080#endif
5081
Josef Bacikf28491e2013-12-16 13:24:27 -05005082struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
David Sterbace3e6982014-06-15 03:00:04 +02005083 u64 start)
Chris Masond1310b22008-01-24 16:13:08 -05005084{
Jeff Mahoneyda170662016-06-15 09:22:56 -04005085 unsigned long len = fs_info->nodesize;
David Sterbacc5e31a2018-03-01 18:20:27 +01005086 int num_pages;
5087 int i;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005088 unsigned long index = start >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005089 struct extent_buffer *eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04005090 struct extent_buffer *exists = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05005091 struct page *p;
Josef Bacikf28491e2013-12-16 13:24:27 -05005092 struct address_space *mapping = fs_info->btree_inode->i_mapping;
Chris Masond1310b22008-01-24 16:13:08 -05005093 int uptodate = 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04005094 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05005095
Jeff Mahoneyda170662016-06-15 09:22:56 -04005096 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
Liu Boc871b0f2016-06-06 12:01:23 -07005097 btrfs_err(fs_info, "bad tree block start %llu", start);
5098 return ERR_PTR(-EINVAL);
5099 }
5100
Josef Bacikf28491e2013-12-16 13:24:27 -05005101 eb = find_extent_buffer(fs_info, start);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005102 if (eb)
Chris Mason6af118ce2008-07-22 11:18:07 -04005103 return eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04005104
David Sterba23d79d82014-06-15 02:55:29 +02005105 eb = __alloc_extent_buffer(fs_info, start, len);
Peter2b114d12008-04-01 11:21:40 -04005106 if (!eb)
Liu Boc871b0f2016-06-06 12:01:23 -07005107 return ERR_PTR(-ENOMEM);
Chris Masond1310b22008-01-24 16:13:08 -05005108
David Sterba65ad0102018-06-29 10:56:49 +02005109 num_pages = num_extent_pages(eb);
Chris Mason727011e2010-08-06 13:21:20 -04005110 for (i = 0; i < num_pages; i++, index++) {
Michal Hockod1b5c562015-08-19 14:17:40 +02005111 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
Liu Boc871b0f2016-06-06 12:01:23 -07005112 if (!p) {
5113 exists = ERR_PTR(-ENOMEM);
Chris Mason6af118ce2008-07-22 11:18:07 -04005114 goto free_eb;
Liu Boc871b0f2016-06-06 12:01:23 -07005115 }
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005116
5117 spin_lock(&mapping->private_lock);
5118 if (PagePrivate(p)) {
5119 /*
5120 * We could have already allocated an eb for this page
5121 * and attached one so lets see if we can get a ref on
5122 * the existing eb, and if we can we know it's good and
5123 * we can just return that one, else we know we can just
5124 * overwrite page->private.
5125 */
5126 exists = (struct extent_buffer *)p->private;
5127 if (atomic_inc_not_zero(&exists->refs)) {
5128 spin_unlock(&mapping->private_lock);
5129 unlock_page(p);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005130 put_page(p);
Mel Gorman2457aec2014-06-04 16:10:31 -07005131 mark_extent_buffer_accessed(exists, p);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005132 goto free_eb;
5133 }
Omar Sandoval5ca64f42015-02-24 02:47:05 -08005134 exists = NULL;
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005135
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005136 /*
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005137 * Do this so attach doesn't complain and we need to
5138 * drop the ref the old guy had.
5139 */
5140 ClearPagePrivate(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005141 WARN_ON(PageDirty(p));
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005142 put_page(p);
Chris Masond1310b22008-01-24 16:13:08 -05005143 }
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005144 attach_extent_buffer_page(eb, p);
5145 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005146 WARN_ON(PageDirty(p));
Chris Mason727011e2010-08-06 13:21:20 -04005147 eb->pages[i] = p;
Chris Masond1310b22008-01-24 16:13:08 -05005148 if (!PageUptodate(p))
5149 uptodate = 0;
Chris Masoneb14ab82011-02-10 12:35:00 -05005150
5151 /*
Nikolay Borisovb16d0112018-07-04 10:24:52 +03005152 * We can't unlock the pages just yet since the extent buffer
5153 * hasn't been properly inserted in the radix tree, this
5154 * opens a race with btree_releasepage which can free a page
5155 * while we are still filling in all pages for the buffer and
5156 * we could crash.
Chris Masoneb14ab82011-02-10 12:35:00 -05005157 */
Chris Masond1310b22008-01-24 16:13:08 -05005158 }
5159 if (uptodate)
Chris Masonb4ce94d2009-02-04 09:25:08 -05005160 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Josef Bacik115391d2012-03-09 09:51:43 -05005161again:
David Sterbae1860a72016-05-09 14:11:38 +02005162 ret = radix_tree_preload(GFP_NOFS);
Liu Boc871b0f2016-06-06 12:01:23 -07005163 if (ret) {
5164 exists = ERR_PTR(ret);
Miao Xie19fe0a82010-10-26 20:57:29 -04005165 goto free_eb;
Liu Boc871b0f2016-06-06 12:01:23 -07005166 }
Miao Xie19fe0a82010-10-26 20:57:29 -04005167
Josef Bacikf28491e2013-12-16 13:24:27 -05005168 spin_lock(&fs_info->buffer_lock);
5169 ret = radix_tree_insert(&fs_info->buffer_radix,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005170 start >> PAGE_SHIFT, eb);
Josef Bacikf28491e2013-12-16 13:24:27 -05005171 spin_unlock(&fs_info->buffer_lock);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005172 radix_tree_preload_end();
Miao Xie19fe0a82010-10-26 20:57:29 -04005173 if (ret == -EEXIST) {
Josef Bacikf28491e2013-12-16 13:24:27 -05005174 exists = find_extent_buffer(fs_info, start);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005175 if (exists)
5176 goto free_eb;
5177 else
Josef Bacik115391d2012-03-09 09:51:43 -05005178 goto again;
Chris Mason6af118ce2008-07-22 11:18:07 -04005179 }
Chris Mason6af118ce2008-07-22 11:18:07 -04005180 /* add one reference for the tree */
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005181 check_buffer_tree_ref(eb);
Josef Bacik34b41ac2013-12-13 10:41:51 -05005182 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
Chris Masoneb14ab82011-02-10 12:35:00 -05005183
5184 /*
Nikolay Borisovb16d0112018-07-04 10:24:52 +03005185 * Now it's safe to unlock the pages because any calls to
5186 * btree_releasepage will correctly detect that a page belongs to a
5187 * live buffer and won't free them prematurely.
Chris Masoneb14ab82011-02-10 12:35:00 -05005188 */
Nikolay Borisov28187ae2018-07-04 10:24:51 +03005189 for (i = 0; i < num_pages; i++)
5190 unlock_page(eb->pages[i]);
Chris Masond1310b22008-01-24 16:13:08 -05005191 return eb;
5192
Chris Mason6af118ce2008-07-22 11:18:07 -04005193free_eb:
Omar Sandoval5ca64f42015-02-24 02:47:05 -08005194 WARN_ON(!atomic_dec_and_test(&eb->refs));
Chris Mason727011e2010-08-06 13:21:20 -04005195 for (i = 0; i < num_pages; i++) {
5196 if (eb->pages[i])
5197 unlock_page(eb->pages[i]);
5198 }
Chris Masoneb14ab82011-02-10 12:35:00 -05005199
Miao Xie897ca6e92010-10-26 20:57:29 -04005200 btrfs_release_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04005201 return exists;
Chris Masond1310b22008-01-24 16:13:08 -05005202}
Chris Masond1310b22008-01-24 16:13:08 -05005203
Josef Bacik3083ee22012-03-09 16:01:49 -05005204static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
5205{
5206 struct extent_buffer *eb =
5207 container_of(head, struct extent_buffer, rcu_head);
5208
5209 __free_extent_buffer(eb);
5210}
5211
David Sterbaf7a52a42013-04-26 14:56:29 +00005212static int release_extent_buffer(struct extent_buffer *eb)
Josef Bacik3083ee22012-03-09 16:01:49 -05005213{
Nikolay Borisov07e21c42018-06-27 16:38:23 +03005214 lockdep_assert_held(&eb->refs_lock);
5215
Josef Bacik3083ee22012-03-09 16:01:49 -05005216 WARN_ON(atomic_read(&eb->refs) == 0);
5217 if (atomic_dec_and_test(&eb->refs)) {
Josef Bacik34b41ac2013-12-13 10:41:51 -05005218 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
Josef Bacikf28491e2013-12-16 13:24:27 -05005219 struct btrfs_fs_info *fs_info = eb->fs_info;
Josef Bacik3083ee22012-03-09 16:01:49 -05005220
Jan Schmidt815a51c2012-05-16 17:00:02 +02005221 spin_unlock(&eb->refs_lock);
Josef Bacik3083ee22012-03-09 16:01:49 -05005222
Josef Bacikf28491e2013-12-16 13:24:27 -05005223 spin_lock(&fs_info->buffer_lock);
5224 radix_tree_delete(&fs_info->buffer_radix,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005225 eb->start >> PAGE_SHIFT);
Josef Bacikf28491e2013-12-16 13:24:27 -05005226 spin_unlock(&fs_info->buffer_lock);
Josef Bacik34b41ac2013-12-13 10:41:51 -05005227 } else {
5228 spin_unlock(&eb->refs_lock);
Jan Schmidt815a51c2012-05-16 17:00:02 +02005229 }
Josef Bacik3083ee22012-03-09 16:01:49 -05005230
5231 /* Should be safe to release our pages at this point */
David Sterba55ac0132018-07-19 17:24:32 +02005232 btrfs_release_extent_buffer_pages(eb);
Josef Bacikbcb7e442015-03-16 17:38:02 -04005233#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
Nikolay Borisovb0132a32018-06-27 16:38:24 +03005234 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
Josef Bacikbcb7e442015-03-16 17:38:02 -04005235 __free_extent_buffer(eb);
5236 return 1;
5237 }
5238#endif
Josef Bacik3083ee22012-03-09 16:01:49 -05005239 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
Josef Bacike64860a2012-07-20 16:05:36 -04005240 return 1;
Josef Bacik3083ee22012-03-09 16:01:49 -05005241 }
5242 spin_unlock(&eb->refs_lock);
Josef Bacike64860a2012-07-20 16:05:36 -04005243
5244 return 0;
Josef Bacik3083ee22012-03-09 16:01:49 -05005245}
5246
Chris Masond1310b22008-01-24 16:13:08 -05005247void free_extent_buffer(struct extent_buffer *eb)
5248{
Chris Mason242e18c2013-01-29 17:49:37 -05005249 int refs;
5250 int old;
Chris Masond1310b22008-01-24 16:13:08 -05005251 if (!eb)
5252 return;
5253
Chris Mason242e18c2013-01-29 17:49:37 -05005254 while (1) {
5255 refs = atomic_read(&eb->refs);
Nikolay Borisov46cc7752018-10-15 17:04:01 +03005256 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
5257 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
5258 refs == 1))
Chris Mason242e18c2013-01-29 17:49:37 -05005259 break;
5260 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
5261 if (old == refs)
5262 return;
5263 }
5264
Josef Bacik3083ee22012-03-09 16:01:49 -05005265 spin_lock(&eb->refs_lock);
5266 if (atomic_read(&eb->refs) == 2 &&
5267 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005268 !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05005269 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5270 atomic_dec(&eb->refs);
Chris Masond1310b22008-01-24 16:13:08 -05005271
Josef Bacik3083ee22012-03-09 16:01:49 -05005272 /*
5273 * I know this is terrible, but it's temporary until we stop tracking
5274 * the uptodate bits and such for the extent buffers.
5275 */
David Sterbaf7a52a42013-04-26 14:56:29 +00005276 release_extent_buffer(eb);
Chris Masond1310b22008-01-24 16:13:08 -05005277}
Chris Masond1310b22008-01-24 16:13:08 -05005278
Josef Bacik3083ee22012-03-09 16:01:49 -05005279void free_extent_buffer_stale(struct extent_buffer *eb)
5280{
5281 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05005282 return;
5283
Josef Bacik3083ee22012-03-09 16:01:49 -05005284 spin_lock(&eb->refs_lock);
5285 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
5286
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005287 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05005288 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5289 atomic_dec(&eb->refs);
David Sterbaf7a52a42013-04-26 14:56:29 +00005290 release_extent_buffer(eb);
Chris Masond1310b22008-01-24 16:13:08 -05005291}
5292
Chris Mason1d4284b2012-03-28 20:31:37 -04005293void clear_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05005294{
David Sterbacc5e31a2018-03-01 18:20:27 +01005295 int i;
5296 int num_pages;
Chris Masond1310b22008-01-24 16:13:08 -05005297 struct page *page;
5298
David Sterba65ad0102018-06-29 10:56:49 +02005299 num_pages = num_extent_pages(eb);
Chris Masond1310b22008-01-24 16:13:08 -05005300
5301 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005302 page = eb->pages[i];
Chris Masonb9473432009-03-13 11:00:37 -04005303 if (!PageDirty(page))
Chris Masond2c3f4f2008-11-19 12:44:22 -05005304 continue;
5305
Chris Masona61e6f22008-07-22 11:18:08 -04005306 lock_page(page);
Chris Masoneb14ab82011-02-10 12:35:00 -05005307 WARN_ON(!PagePrivate(page));
5308
Chris Masond1310b22008-01-24 16:13:08 -05005309 clear_page_dirty_for_io(page);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07005310 xa_lock_irq(&page->mapping->i_pages);
Matthew Wilcox0a943c62017-12-04 10:37:22 -05005311 if (!PageDirty(page))
5312 __xa_clear_mark(&page->mapping->i_pages,
5313 page_index(page), PAGECACHE_TAG_DIRTY);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07005314 xa_unlock_irq(&page->mapping->i_pages);
Chris Masonbf0da8c2011-11-04 12:29:37 -04005315 ClearPageError(page);
Chris Masona61e6f22008-07-22 11:18:08 -04005316 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05005317 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005318 WARN_ON(atomic_read(&eb->refs) == 0);
Chris Masond1310b22008-01-24 16:13:08 -05005319}
Chris Masond1310b22008-01-24 16:13:08 -05005320
Liu Boabb57ef2018-09-14 01:44:42 +08005321bool set_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05005322{
David Sterbacc5e31a2018-03-01 18:20:27 +01005323 int i;
5324 int num_pages;
Liu Boabb57ef2018-09-14 01:44:42 +08005325 bool was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05005326
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005327 check_buffer_tree_ref(eb);
5328
Chris Masonb9473432009-03-13 11:00:37 -04005329 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005330
David Sterba65ad0102018-06-29 10:56:49 +02005331 num_pages = num_extent_pages(eb);
Josef Bacik3083ee22012-03-09 16:01:49 -05005332 WARN_ON(atomic_read(&eb->refs) == 0);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005333 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
5334
Liu Boabb57ef2018-09-14 01:44:42 +08005335 if (!was_dirty)
5336 for (i = 0; i < num_pages; i++)
5337 set_page_dirty(eb->pages[i]);
Liu Bo51995c32018-09-14 01:46:08 +08005338
5339#ifdef CONFIG_BTRFS_DEBUG
5340 for (i = 0; i < num_pages; i++)
5341 ASSERT(PageDirty(eb->pages[i]));
5342#endif
5343
Chris Masonb9473432009-03-13 11:00:37 -04005344 return was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05005345}
Chris Masond1310b22008-01-24 16:13:08 -05005346
David Sterba69ba3922015-12-03 13:08:59 +01005347void clear_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Mason1259ab72008-05-12 13:39:03 -04005348{
David Sterbacc5e31a2018-03-01 18:20:27 +01005349 int i;
Chris Mason1259ab72008-05-12 13:39:03 -04005350 struct page *page;
David Sterbacc5e31a2018-03-01 18:20:27 +01005351 int num_pages;
Chris Mason1259ab72008-05-12 13:39:03 -04005352
Chris Masonb4ce94d2009-02-04 09:25:08 -05005353 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
David Sterba65ad0102018-06-29 10:56:49 +02005354 num_pages = num_extent_pages(eb);
Chris Mason1259ab72008-05-12 13:39:03 -04005355 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005356 page = eb->pages[i];
Chris Mason33958dc2008-07-30 10:29:12 -04005357 if (page)
5358 ClearPageUptodate(page);
Chris Mason1259ab72008-05-12 13:39:03 -04005359 }
Chris Mason1259ab72008-05-12 13:39:03 -04005360}
5361
David Sterba09c25a82015-12-03 13:08:59 +01005362void set_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05005363{
David Sterbacc5e31a2018-03-01 18:20:27 +01005364 int i;
Chris Masond1310b22008-01-24 16:13:08 -05005365 struct page *page;
David Sterbacc5e31a2018-03-01 18:20:27 +01005366 int num_pages;
Chris Masond1310b22008-01-24 16:13:08 -05005367
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005368 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
David Sterba65ad0102018-06-29 10:56:49 +02005369 num_pages = num_extent_pages(eb);
Chris Masond1310b22008-01-24 16:13:08 -05005370 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005371 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005372 SetPageUptodate(page);
5373 }
Chris Masond1310b22008-01-24 16:13:08 -05005374}
Chris Masond1310b22008-01-24 16:13:08 -05005375
Nikolay Borisovc2ccfbc2019-04-10 17:24:40 +03005376int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05005377{
David Sterbacc5e31a2018-03-01 18:20:27 +01005378 int i;
Chris Masond1310b22008-01-24 16:13:08 -05005379 struct page *page;
5380 int err;
5381 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04005382 int locked_pages = 0;
5383 int all_uptodate = 1;
David Sterbacc5e31a2018-03-01 18:20:27 +01005384 int num_pages;
Chris Mason727011e2010-08-06 13:21:20 -04005385 unsigned long num_reads = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05005386 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04005387 unsigned long bio_flags = 0;
Nikolay Borisovc2ccfbc2019-04-10 17:24:40 +03005388 struct extent_io_tree *tree = &BTRFS_I(eb->fs_info->btree_inode)->io_tree;
Chris Masona86c12c2008-02-07 10:50:54 -05005389
Chris Masonb4ce94d2009-02-04 09:25:08 -05005390 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
Chris Masond1310b22008-01-24 16:13:08 -05005391 return 0;
5392
David Sterba65ad0102018-06-29 10:56:49 +02005393 num_pages = num_extent_pages(eb);
Josef Bacik8436ea912016-09-02 15:40:03 -04005394 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005395 page = eb->pages[i];
Arne Jansenbb82ab82011-06-10 14:06:53 +02005396 if (wait == WAIT_NONE) {
David Woodhouse2db04962008-08-07 11:19:43 -04005397 if (!trylock_page(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04005398 goto unlock_exit;
Chris Masond1310b22008-01-24 16:13:08 -05005399 } else {
5400 lock_page(page);
5401 }
Chris Masonce9adaa2008-04-09 16:28:12 -04005402 locked_pages++;
Liu Bo2571e732016-08-03 12:33:01 -07005403 }
5404 /*
5405 * We need to firstly lock all pages to make sure that
5406 * the uptodate bit of our pages won't be affected by
5407 * clear_extent_buffer_uptodate().
5408 */
Josef Bacik8436ea912016-09-02 15:40:03 -04005409 for (i = 0; i < num_pages; i++) {
Liu Bo2571e732016-08-03 12:33:01 -07005410 page = eb->pages[i];
Chris Mason727011e2010-08-06 13:21:20 -04005411 if (!PageUptodate(page)) {
5412 num_reads++;
Chris Masonce9adaa2008-04-09 16:28:12 -04005413 all_uptodate = 0;
Chris Mason727011e2010-08-06 13:21:20 -04005414 }
Chris Masonce9adaa2008-04-09 16:28:12 -04005415 }
Liu Bo2571e732016-08-03 12:33:01 -07005416
Chris Masonce9adaa2008-04-09 16:28:12 -04005417 if (all_uptodate) {
Josef Bacik8436ea912016-09-02 15:40:03 -04005418 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masonce9adaa2008-04-09 16:28:12 -04005419 goto unlock_exit;
5420 }
5421
Filipe Manana656f30d2014-09-26 12:25:56 +01005422 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
Josef Bacik5cf1ab52012-04-16 09:42:26 -04005423 eb->read_mirror = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005424 atomic_set(&eb->io_pages, num_reads);
Josef Bacik8436ea912016-09-02 15:40:03 -04005425 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005426 page = eb->pages[i];
Liu Bobaf863b2016-07-11 10:39:07 -07005427
Chris Masonce9adaa2008-04-09 16:28:12 -04005428 if (!PageUptodate(page)) {
Liu Bobaf863b2016-07-11 10:39:07 -07005429 if (ret) {
5430 atomic_dec(&eb->io_pages);
5431 unlock_page(page);
5432 continue;
5433 }
5434
Chris Masonf1885912008-04-09 16:28:12 -04005435 ClearPageError(page);
Chris Masona86c12c2008-02-07 10:50:54 -05005436 err = __extent_read_full_page(tree, page,
David Sterba6af49db2017-06-23 04:09:57 +02005437 btree_get_extent, &bio,
Josef Bacikd4c7ca82013-04-19 19:49:09 -04005438 mirror_num, &bio_flags,
Mike Christie1f7ad752016-06-05 14:31:51 -05005439 REQ_META);
Liu Bobaf863b2016-07-11 10:39:07 -07005440 if (err) {
Chris Masond1310b22008-01-24 16:13:08 -05005441 ret = err;
Liu Bobaf863b2016-07-11 10:39:07 -07005442 /*
5443 * We use &bio in above __extent_read_full_page,
5444 * so we ensure that if it returns error, the
5445 * current page fails to add itself to bio and
5446 * it's been unlocked.
5447 *
5448 * We must dec io_pages by ourselves.
5449 */
5450 atomic_dec(&eb->io_pages);
5451 }
Chris Masond1310b22008-01-24 16:13:08 -05005452 } else {
5453 unlock_page(page);
5454 }
5455 }
5456
Jeff Mahoney355808c2011-10-03 23:23:14 -04005457 if (bio) {
Mike Christie1f7ad752016-06-05 14:31:51 -05005458 err = submit_one_bio(bio, mirror_num, bio_flags);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01005459 if (err)
5460 return err;
Jeff Mahoney355808c2011-10-03 23:23:14 -04005461 }
Chris Masona86c12c2008-02-07 10:50:54 -05005462
Arne Jansenbb82ab82011-06-10 14:06:53 +02005463 if (ret || wait != WAIT_COMPLETE)
Chris Masond1310b22008-01-24 16:13:08 -05005464 return ret;
Chris Masond3977122009-01-05 21:25:51 -05005465
Josef Bacik8436ea912016-09-02 15:40:03 -04005466 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005467 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005468 wait_on_page_locked(page);
Chris Masond3977122009-01-05 21:25:51 -05005469 if (!PageUptodate(page))
Chris Masond1310b22008-01-24 16:13:08 -05005470 ret = -EIO;
Chris Masond1310b22008-01-24 16:13:08 -05005471 }
Chris Masond3977122009-01-05 21:25:51 -05005472
Chris Masond1310b22008-01-24 16:13:08 -05005473 return ret;
Chris Masonce9adaa2008-04-09 16:28:12 -04005474
5475unlock_exit:
Chris Masond3977122009-01-05 21:25:51 -05005476 while (locked_pages > 0) {
Chris Masonce9adaa2008-04-09 16:28:12 -04005477 locked_pages--;
Josef Bacik8436ea912016-09-02 15:40:03 -04005478 page = eb->pages[locked_pages];
5479 unlock_page(page);
Chris Masonce9adaa2008-04-09 16:28:12 -04005480 }
5481 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05005482}
Chris Masond1310b22008-01-24 16:13:08 -05005483
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06005484void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
5485 unsigned long start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05005486{
5487 size_t cur;
5488 size_t offset;
5489 struct page *page;
5490 char *kaddr;
5491 char *dst = (char *)dstv;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005492 size_t start_offset = offset_in_page(eb->start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005493 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005494
Liu Bof716abd2017-08-09 11:10:16 -06005495 if (start + len > eb->len) {
5496 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5497 eb->start, eb->len, start, len);
5498 memset(dst, 0, len);
5499 return;
5500 }
Chris Masond1310b22008-01-24 16:13:08 -05005501
Johannes Thumshirn70730172018-12-05 15:23:03 +01005502 offset = offset_in_page(start_offset + start);
Chris Masond1310b22008-01-24 16:13:08 -05005503
Chris Masond3977122009-01-05 21:25:51 -05005504 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005505 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005506
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005507 cur = min(len, (PAGE_SIZE - offset));
Chris Masona6591712011-07-19 12:04:14 -04005508 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005509 memcpy(dst, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005510
5511 dst += cur;
5512 len -= cur;
5513 offset = 0;
5514 i++;
5515 }
5516}
Chris Masond1310b22008-01-24 16:13:08 -05005517
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06005518int read_extent_buffer_to_user(const struct extent_buffer *eb,
5519 void __user *dstv,
5520 unsigned long start, unsigned long len)
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005521{
5522 size_t cur;
5523 size_t offset;
5524 struct page *page;
5525 char *kaddr;
5526 char __user *dst = (char __user *)dstv;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005527 size_t start_offset = offset_in_page(eb->start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005528 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005529 int ret = 0;
5530
5531 WARN_ON(start > eb->len);
5532 WARN_ON(start + len > eb->start + eb->len);
5533
Johannes Thumshirn70730172018-12-05 15:23:03 +01005534 offset = offset_in_page(start_offset + start);
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005535
5536 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005537 page = eb->pages[i];
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005538
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005539 cur = min(len, (PAGE_SIZE - offset));
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005540 kaddr = page_address(page);
5541 if (copy_to_user(dst, kaddr + offset, cur)) {
5542 ret = -EFAULT;
5543 break;
5544 }
5545
5546 dst += cur;
5547 len -= cur;
5548 offset = 0;
5549 i++;
5550 }
5551
5552 return ret;
5553}
5554
Liu Bo415b35a2016-06-17 19:16:21 -07005555/*
5556 * return 0 if the item is found within a page.
5557 * return 1 if the item spans two pages.
5558 * return -EINVAL otherwise.
5559 */
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06005560int map_private_extent_buffer(const struct extent_buffer *eb,
5561 unsigned long start, unsigned long min_len,
5562 char **map, unsigned long *map_start,
5563 unsigned long *map_len)
Chris Masond1310b22008-01-24 16:13:08 -05005564{
Johannes Thumshirncc2c39d2018-11-28 09:54:54 +01005565 size_t offset;
Chris Masond1310b22008-01-24 16:13:08 -05005566 char *kaddr;
5567 struct page *p;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005568 size_t start_offset = offset_in_page(eb->start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005569 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005570 unsigned long end_i = (start_offset + start + min_len - 1) >>
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005571 PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005572
Liu Bof716abd2017-08-09 11:10:16 -06005573 if (start + min_len > eb->len) {
5574 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5575 eb->start, eb->len, start, min_len);
5576 return -EINVAL;
5577 }
5578
Chris Masond1310b22008-01-24 16:13:08 -05005579 if (i != end_i)
Liu Bo415b35a2016-06-17 19:16:21 -07005580 return 1;
Chris Masond1310b22008-01-24 16:13:08 -05005581
5582 if (i == 0) {
5583 offset = start_offset;
5584 *map_start = 0;
5585 } else {
5586 offset = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005587 *map_start = ((u64)i << PAGE_SHIFT) - start_offset;
Chris Masond1310b22008-01-24 16:13:08 -05005588 }
Chris Masond3977122009-01-05 21:25:51 -05005589
David Sterbafb85fc92014-07-31 01:03:53 +02005590 p = eb->pages[i];
Chris Masona6591712011-07-19 12:04:14 -04005591 kaddr = page_address(p);
Chris Masond1310b22008-01-24 16:13:08 -05005592 *map = kaddr + offset;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005593 *map_len = PAGE_SIZE - offset;
Chris Masond1310b22008-01-24 16:13:08 -05005594 return 0;
5595}
Chris Masond1310b22008-01-24 16:13:08 -05005596
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06005597int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
5598 unsigned long start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05005599{
5600 size_t cur;
5601 size_t offset;
5602 struct page *page;
5603 char *kaddr;
5604 char *ptr = (char *)ptrv;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005605 size_t start_offset = offset_in_page(eb->start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005606 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005607 int ret = 0;
5608
5609 WARN_ON(start > eb->len);
5610 WARN_ON(start + len > eb->start + eb->len);
5611
Johannes Thumshirn70730172018-12-05 15:23:03 +01005612 offset = offset_in_page(start_offset + start);
Chris Masond1310b22008-01-24 16:13:08 -05005613
Chris Masond3977122009-01-05 21:25:51 -05005614 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005615 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005616
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005617 cur = min(len, (PAGE_SIZE - offset));
Chris Masond1310b22008-01-24 16:13:08 -05005618
Chris Masona6591712011-07-19 12:04:14 -04005619 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005620 ret = memcmp(ptr, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005621 if (ret)
5622 break;
5623
5624 ptr += cur;
5625 len -= cur;
5626 offset = 0;
5627 i++;
5628 }
5629 return ret;
5630}
Chris Masond1310b22008-01-24 16:13:08 -05005631
David Sterbaf157bf72016-11-09 17:43:38 +01005632void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
5633 const void *srcv)
5634{
5635 char *kaddr;
5636
5637 WARN_ON(!PageUptodate(eb->pages[0]));
5638 kaddr = page_address(eb->pages[0]);
5639 memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
5640 BTRFS_FSID_SIZE);
5641}
5642
5643void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv)
5644{
5645 char *kaddr;
5646
5647 WARN_ON(!PageUptodate(eb->pages[0]));
5648 kaddr = page_address(eb->pages[0]);
5649 memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
5650 BTRFS_FSID_SIZE);
5651}
5652
Chris Masond1310b22008-01-24 16:13:08 -05005653void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5654 unsigned long start, unsigned long len)
5655{
5656 size_t cur;
5657 size_t offset;
5658 struct page *page;
5659 char *kaddr;
5660 char *src = (char *)srcv;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005661 size_t start_offset = offset_in_page(eb->start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005662 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005663
5664 WARN_ON(start > eb->len);
5665 WARN_ON(start + len > eb->start + eb->len);
5666
Johannes Thumshirn70730172018-12-05 15:23:03 +01005667 offset = offset_in_page(start_offset + start);
Chris Masond1310b22008-01-24 16:13:08 -05005668
Chris Masond3977122009-01-05 21:25:51 -05005669 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005670 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005671 WARN_ON(!PageUptodate(page));
5672
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005673 cur = min(len, PAGE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04005674 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005675 memcpy(kaddr + offset, src, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005676
5677 src += cur;
5678 len -= cur;
5679 offset = 0;
5680 i++;
5681 }
5682}
Chris Masond1310b22008-01-24 16:13:08 -05005683
David Sterbab159fa22016-11-08 18:09:03 +01005684void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
5685 unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05005686{
5687 size_t cur;
5688 size_t offset;
5689 struct page *page;
5690 char *kaddr;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005691 size_t start_offset = offset_in_page(eb->start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005692 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005693
5694 WARN_ON(start > eb->len);
5695 WARN_ON(start + len > eb->start + eb->len);
5696
Johannes Thumshirn70730172018-12-05 15:23:03 +01005697 offset = offset_in_page(start_offset + start);
Chris Masond1310b22008-01-24 16:13:08 -05005698
Chris Masond3977122009-01-05 21:25:51 -05005699 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005700 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005701 WARN_ON(!PageUptodate(page));
5702
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005703 cur = min(len, PAGE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04005704 kaddr = page_address(page);
David Sterbab159fa22016-11-08 18:09:03 +01005705 memset(kaddr + offset, 0, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005706
5707 len -= cur;
5708 offset = 0;
5709 i++;
5710 }
5711}
Chris Masond1310b22008-01-24 16:13:08 -05005712
David Sterba58e80122016-11-08 18:30:31 +01005713void copy_extent_buffer_full(struct extent_buffer *dst,
5714 struct extent_buffer *src)
5715{
5716 int i;
David Sterbacc5e31a2018-03-01 18:20:27 +01005717 int num_pages;
David Sterba58e80122016-11-08 18:30:31 +01005718
5719 ASSERT(dst->len == src->len);
5720
David Sterba65ad0102018-06-29 10:56:49 +02005721 num_pages = num_extent_pages(dst);
David Sterba58e80122016-11-08 18:30:31 +01005722 for (i = 0; i < num_pages; i++)
5723 copy_page(page_address(dst->pages[i]),
5724 page_address(src->pages[i]));
5725}
5726
Chris Masond1310b22008-01-24 16:13:08 -05005727void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5728 unsigned long dst_offset, unsigned long src_offset,
5729 unsigned long len)
5730{
5731 u64 dst_len = dst->len;
5732 size_t cur;
5733 size_t offset;
5734 struct page *page;
5735 char *kaddr;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005736 size_t start_offset = offset_in_page(dst->start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005737 unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005738
5739 WARN_ON(src->len != dst_len);
5740
Johannes Thumshirn70730172018-12-05 15:23:03 +01005741 offset = offset_in_page(start_offset + dst_offset);
Chris Masond1310b22008-01-24 16:13:08 -05005742
Chris Masond3977122009-01-05 21:25:51 -05005743 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005744 page = dst->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005745 WARN_ON(!PageUptodate(page));
5746
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005747 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
Chris Masond1310b22008-01-24 16:13:08 -05005748
Chris Masona6591712011-07-19 12:04:14 -04005749 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005750 read_extent_buffer(src, kaddr + offset, src_offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005751
5752 src_offset += cur;
5753 len -= cur;
5754 offset = 0;
5755 i++;
5756 }
5757}
Chris Masond1310b22008-01-24 16:13:08 -05005758
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005759/*
5760 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5761 * given bit number
5762 * @eb: the extent buffer
5763 * @start: offset of the bitmap item in the extent buffer
5764 * @nr: bit number
5765 * @page_index: return index of the page in the extent buffer that contains the
5766 * given bit number
5767 * @page_offset: return offset into the page given by page_index
5768 *
5769 * This helper hides the ugliness of finding the byte in an extent buffer which
5770 * contains a given bit.
5771 */
5772static inline void eb_bitmap_offset(struct extent_buffer *eb,
5773 unsigned long start, unsigned long nr,
5774 unsigned long *page_index,
5775 size_t *page_offset)
5776{
Johannes Thumshirn70730172018-12-05 15:23:03 +01005777 size_t start_offset = offset_in_page(eb->start);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005778 size_t byte_offset = BIT_BYTE(nr);
5779 size_t offset;
5780
5781 /*
5782 * The byte we want is the offset of the extent buffer + the offset of
5783 * the bitmap item in the extent buffer + the offset of the byte in the
5784 * bitmap item.
5785 */
5786 offset = start_offset + start + byte_offset;
5787
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005788 *page_index = offset >> PAGE_SHIFT;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005789 *page_offset = offset_in_page(offset);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005790}
5791
5792/**
5793 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
5794 * @eb: the extent buffer
5795 * @start: offset of the bitmap item in the extent buffer
5796 * @nr: bit number to test
5797 */
5798int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
5799 unsigned long nr)
5800{
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005801 u8 *kaddr;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005802 struct page *page;
5803 unsigned long i;
5804 size_t offset;
5805
5806 eb_bitmap_offset(eb, start, nr, &i, &offset);
5807 page = eb->pages[i];
5808 WARN_ON(!PageUptodate(page));
5809 kaddr = page_address(page);
5810 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
5811}
5812
5813/**
5814 * extent_buffer_bitmap_set - set an area of a bitmap
5815 * @eb: the extent buffer
5816 * @start: offset of the bitmap item in the extent buffer
5817 * @pos: bit number of the first bit
5818 * @len: number of bits to set
5819 */
5820void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5821 unsigned long pos, unsigned long len)
5822{
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005823 u8 *kaddr;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005824 struct page *page;
5825 unsigned long i;
5826 size_t offset;
5827 const unsigned int size = pos + len;
5828 int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005829 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005830
5831 eb_bitmap_offset(eb, start, pos, &i, &offset);
5832 page = eb->pages[i];
5833 WARN_ON(!PageUptodate(page));
5834 kaddr = page_address(page);
5835
5836 while (len >= bits_to_set) {
5837 kaddr[offset] |= mask_to_set;
5838 len -= bits_to_set;
5839 bits_to_set = BITS_PER_BYTE;
Dan Carpenter9c894692016-10-12 11:33:21 +03005840 mask_to_set = ~0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005841 if (++offset >= PAGE_SIZE && len > 0) {
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005842 offset = 0;
5843 page = eb->pages[++i];
5844 WARN_ON(!PageUptodate(page));
5845 kaddr = page_address(page);
5846 }
5847 }
5848 if (len) {
5849 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5850 kaddr[offset] |= mask_to_set;
5851 }
5852}
5853
5854
5855/**
5856 * extent_buffer_bitmap_clear - clear an area of a bitmap
5857 * @eb: the extent buffer
5858 * @start: offset of the bitmap item in the extent buffer
5859 * @pos: bit number of the first bit
5860 * @len: number of bits to clear
5861 */
5862void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5863 unsigned long pos, unsigned long len)
5864{
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005865 u8 *kaddr;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005866 struct page *page;
5867 unsigned long i;
5868 size_t offset;
5869 const unsigned int size = pos + len;
5870 int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005871 u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005872
5873 eb_bitmap_offset(eb, start, pos, &i, &offset);
5874 page = eb->pages[i];
5875 WARN_ON(!PageUptodate(page));
5876 kaddr = page_address(page);
5877
5878 while (len >= bits_to_clear) {
5879 kaddr[offset] &= ~mask_to_clear;
5880 len -= bits_to_clear;
5881 bits_to_clear = BITS_PER_BYTE;
Dan Carpenter9c894692016-10-12 11:33:21 +03005882 mask_to_clear = ~0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005883 if (++offset >= PAGE_SIZE && len > 0) {
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005884 offset = 0;
5885 page = eb->pages[++i];
5886 WARN_ON(!PageUptodate(page));
5887 kaddr = page_address(page);
5888 }
5889 }
5890 if (len) {
5891 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5892 kaddr[offset] &= ~mask_to_clear;
5893 }
5894}
5895
Sergei Trofimovich33872062011-04-11 21:52:52 +00005896static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5897{
5898 unsigned long distance = (src > dst) ? src - dst : dst - src;
5899 return distance < len;
5900}
5901
Chris Masond1310b22008-01-24 16:13:08 -05005902static void copy_pages(struct page *dst_page, struct page *src_page,
5903 unsigned long dst_off, unsigned long src_off,
5904 unsigned long len)
5905{
Chris Masona6591712011-07-19 12:04:14 -04005906 char *dst_kaddr = page_address(dst_page);
Chris Masond1310b22008-01-24 16:13:08 -05005907 char *src_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04005908 int must_memmove = 0;
Chris Masond1310b22008-01-24 16:13:08 -05005909
Sergei Trofimovich33872062011-04-11 21:52:52 +00005910 if (dst_page != src_page) {
Chris Masona6591712011-07-19 12:04:14 -04005911 src_kaddr = page_address(src_page);
Sergei Trofimovich33872062011-04-11 21:52:52 +00005912 } else {
Chris Masond1310b22008-01-24 16:13:08 -05005913 src_kaddr = dst_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04005914 if (areas_overlap(src_off, dst_off, len))
5915 must_memmove = 1;
Sergei Trofimovich33872062011-04-11 21:52:52 +00005916 }
Chris Masond1310b22008-01-24 16:13:08 -05005917
Chris Mason727011e2010-08-06 13:21:20 -04005918 if (must_memmove)
5919 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5920 else
5921 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
Chris Masond1310b22008-01-24 16:13:08 -05005922}
5923
5924void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5925 unsigned long src_offset, unsigned long len)
5926{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04005927 struct btrfs_fs_info *fs_info = dst->fs_info;
Chris Masond1310b22008-01-24 16:13:08 -05005928 size_t cur;
5929 size_t dst_off_in_page;
5930 size_t src_off_in_page;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005931 size_t start_offset = offset_in_page(dst->start);
Chris Masond1310b22008-01-24 16:13:08 -05005932 unsigned long dst_i;
5933 unsigned long src_i;
5934
5935 if (src_offset + len > dst->len) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04005936 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04005937 "memmove bogus src_offset %lu move len %lu dst len %lu",
5938 src_offset, len, dst->len);
Arnd Bergmann290342f2019-03-25 14:02:25 +01005939 BUG();
Chris Masond1310b22008-01-24 16:13:08 -05005940 }
5941 if (dst_offset + len > dst->len) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04005942 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04005943 "memmove bogus dst_offset %lu move len %lu dst len %lu",
5944 dst_offset, len, dst->len);
Arnd Bergmann290342f2019-03-25 14:02:25 +01005945 BUG();
Chris Masond1310b22008-01-24 16:13:08 -05005946 }
5947
Chris Masond3977122009-01-05 21:25:51 -05005948 while (len > 0) {
Johannes Thumshirn70730172018-12-05 15:23:03 +01005949 dst_off_in_page = offset_in_page(start_offset + dst_offset);
5950 src_off_in_page = offset_in_page(start_offset + src_offset);
Chris Masond1310b22008-01-24 16:13:08 -05005951
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005952 dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
5953 src_i = (start_offset + src_offset) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005954
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005955 cur = min(len, (unsigned long)(PAGE_SIZE -
Chris Masond1310b22008-01-24 16:13:08 -05005956 src_off_in_page));
5957 cur = min_t(unsigned long, cur,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005958 (unsigned long)(PAGE_SIZE - dst_off_in_page));
Chris Masond1310b22008-01-24 16:13:08 -05005959
David Sterbafb85fc92014-07-31 01:03:53 +02005960 copy_pages(dst->pages[dst_i], dst->pages[src_i],
Chris Masond1310b22008-01-24 16:13:08 -05005961 dst_off_in_page, src_off_in_page, cur);
5962
5963 src_offset += cur;
5964 dst_offset += cur;
5965 len -= cur;
5966 }
5967}
Chris Masond1310b22008-01-24 16:13:08 -05005968
5969void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5970 unsigned long src_offset, unsigned long len)
5971{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04005972 struct btrfs_fs_info *fs_info = dst->fs_info;
Chris Masond1310b22008-01-24 16:13:08 -05005973 size_t cur;
5974 size_t dst_off_in_page;
5975 size_t src_off_in_page;
5976 unsigned long dst_end = dst_offset + len - 1;
5977 unsigned long src_end = src_offset + len - 1;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005978 size_t start_offset = offset_in_page(dst->start);
Chris Masond1310b22008-01-24 16:13:08 -05005979 unsigned long dst_i;
5980 unsigned long src_i;
5981
5982 if (src_offset + len > dst->len) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04005983 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04005984 "memmove bogus src_offset %lu move len %lu len %lu",
5985 src_offset, len, dst->len);
Arnd Bergmann290342f2019-03-25 14:02:25 +01005986 BUG();
Chris Masond1310b22008-01-24 16:13:08 -05005987 }
5988 if (dst_offset + len > dst->len) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04005989 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04005990 "memmove bogus dst_offset %lu move len %lu len %lu",
5991 dst_offset, len, dst->len);
Arnd Bergmann290342f2019-03-25 14:02:25 +01005992 BUG();
Chris Masond1310b22008-01-24 16:13:08 -05005993 }
Chris Mason727011e2010-08-06 13:21:20 -04005994 if (dst_offset < src_offset) {
Chris Masond1310b22008-01-24 16:13:08 -05005995 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5996 return;
5997 }
Chris Masond3977122009-01-05 21:25:51 -05005998 while (len > 0) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005999 dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
6000 src_i = (start_offset + src_end) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05006001
Johannes Thumshirn70730172018-12-05 15:23:03 +01006002 dst_off_in_page = offset_in_page(start_offset + dst_end);
6003 src_off_in_page = offset_in_page(start_offset + src_end);
Chris Masond1310b22008-01-24 16:13:08 -05006004
6005 cur = min_t(unsigned long, len, src_off_in_page + 1);
6006 cur = min(cur, dst_off_in_page + 1);
David Sterbafb85fc92014-07-31 01:03:53 +02006007 copy_pages(dst->pages[dst_i], dst->pages[src_i],
Chris Masond1310b22008-01-24 16:13:08 -05006008 dst_off_in_page - cur + 1,
6009 src_off_in_page - cur + 1, cur);
6010
6011 dst_end -= cur;
6012 src_end -= cur;
6013 len -= cur;
6014 }
6015}
Chris Mason6af118ce2008-07-22 11:18:07 -04006016
David Sterbaf7a52a42013-04-26 14:56:29 +00006017int try_release_extent_buffer(struct page *page)
Miao Xie19fe0a82010-10-26 20:57:29 -04006018{
Chris Mason6af118ce2008-07-22 11:18:07 -04006019 struct extent_buffer *eb;
Miao Xie897ca6e92010-10-26 20:57:29 -04006020
Miao Xie19fe0a82010-10-26 20:57:29 -04006021 /*
Nicholas D Steeves01327612016-05-19 21:18:45 -04006022 * We need to make sure nobody is attaching this page to an eb right
Josef Bacik3083ee22012-03-09 16:01:49 -05006023 * now.
Miao Xie19fe0a82010-10-26 20:57:29 -04006024 */
Josef Bacik3083ee22012-03-09 16:01:49 -05006025 spin_lock(&page->mapping->private_lock);
6026 if (!PagePrivate(page)) {
6027 spin_unlock(&page->mapping->private_lock);
6028 return 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04006029 }
6030
Josef Bacik3083ee22012-03-09 16:01:49 -05006031 eb = (struct extent_buffer *)page->private;
6032 BUG_ON(!eb);
Miao Xie19fe0a82010-10-26 20:57:29 -04006033
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006034 /*
Josef Bacik3083ee22012-03-09 16:01:49 -05006035 * This is a little awful but should be ok, we need to make sure that
6036 * the eb doesn't disappear out from under us while we're looking at
6037 * this page.
6038 */
6039 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006040 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
Josef Bacik3083ee22012-03-09 16:01:49 -05006041 spin_unlock(&eb->refs_lock);
6042 spin_unlock(&page->mapping->private_lock);
6043 return 0;
6044 }
6045 spin_unlock(&page->mapping->private_lock);
6046
Josef Bacik3083ee22012-03-09 16:01:49 -05006047 /*
6048 * If tree ref isn't set then we know the ref on this eb is a real ref,
6049 * so just return, this page will likely be freed soon anyway.
6050 */
6051 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
6052 spin_unlock(&eb->refs_lock);
6053 return 0;
6054 }
Josef Bacik3083ee22012-03-09 16:01:49 -05006055
David Sterbaf7a52a42013-04-26 14:56:29 +00006056 return release_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04006057}