blob: fcf2ff9ae4a8ccd9c4ba73faf877a8bd38a8bdb0 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
David Sterbac1d7c512018-04-03 19:23:33 +02002
Chris Masond1310b22008-01-24 16:13:08 -05003#include <linux/bitops.h>
4#include <linux/slab.h>
5#include <linux/bio.h>
6#include <linux/mm.h>
Chris Masond1310b22008-01-24 16:13:08 -05007#include <linux/pagemap.h>
8#include <linux/page-flags.h>
Chris Masond1310b22008-01-24 16:13:08 -05009#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
Chris Masond1310b22008-01-24 16:13:08 -050012#include <linux/writeback.h>
13#include <linux/pagevec.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070014#include <linux/prefetch.h>
Dan Magenheimer90a887c2011-05-26 10:01:56 -060015#include <linux/cleancache.h>
Chris Masond1310b22008-01-24 16:13:08 -050016#include "extent_io.h"
Josef Bacik9c7d3a52019-09-23 10:05:19 -040017#include "extent-io-tree.h"
Chris Masond1310b22008-01-24 16:13:08 -050018#include "extent_map.h"
David Woodhouse902b22f2008-08-20 08:51:49 -040019#include "ctree.h"
20#include "btrfs_inode.h"
Jan Schmidt4a54c8c2011-07-22 15:41:52 +020021#include "volumes.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010022#include "check-integrity.h"
Josef Bacik0b32f4b2012-03-13 09:38:00 -040023#include "locking.h"
Josef Bacik606686e2012-06-04 14:03:51 -040024#include "rcu-string.h"
Liu Bofe09e162013-09-22 12:54:23 +080025#include "backref.h"
David Sterba6af49db2017-06-23 04:09:57 +020026#include "disk-io.h"
Chris Masond1310b22008-01-24 16:13:08 -050027
Chris Masond1310b22008-01-24 16:13:08 -050028static struct kmem_cache *extent_state_cache;
29static struct kmem_cache *extent_buffer_cache;
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -040030static struct bio_set btrfs_bioset;
Chris Masond1310b22008-01-24 16:13:08 -050031
Filipe Manana27a35072014-07-06 20:09:59 +010032static inline bool extent_state_in_tree(const struct extent_state *state)
33{
34 return !RB_EMPTY_NODE(&state->rb_node);
35}
36
Eric Sandeen6d49ba12013-04-22 16:12:31 +000037#ifdef CONFIG_BTRFS_DEBUG
Chris Masond1310b22008-01-24 16:13:08 -050038static LIST_HEAD(states);
Chris Masond3977122009-01-05 21:25:51 -050039static DEFINE_SPINLOCK(leak_lock);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000040
Josef Bacik3fd63722020-02-14 16:11:40 -050041static inline void btrfs_leak_debug_add(spinlock_t *lock,
42 struct list_head *new,
43 struct list_head *head)
Eric Sandeen6d49ba12013-04-22 16:12:31 +000044{
45 unsigned long flags;
46
Josef Bacik3fd63722020-02-14 16:11:40 -050047 spin_lock_irqsave(lock, flags);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000048 list_add(new, head);
Josef Bacik3fd63722020-02-14 16:11:40 -050049 spin_unlock_irqrestore(lock, flags);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000050}
51
Josef Bacik3fd63722020-02-14 16:11:40 -050052static inline void btrfs_leak_debug_del(spinlock_t *lock,
53 struct list_head *entry)
Eric Sandeen6d49ba12013-04-22 16:12:31 +000054{
55 unsigned long flags;
56
Josef Bacik3fd63722020-02-14 16:11:40 -050057 spin_lock_irqsave(lock, flags);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000058 list_del(entry);
Josef Bacik3fd63722020-02-14 16:11:40 -050059 spin_unlock_irqrestore(lock, flags);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000060}
61
Josef Bacik3fd63722020-02-14 16:11:40 -050062void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
Josef Bacik33ca8322019-09-23 10:05:17 -040063{
64 struct extent_buffer *eb;
Josef Bacik3fd63722020-02-14 16:11:40 -050065 unsigned long flags;
Josef Bacik33ca8322019-09-23 10:05:17 -040066
Josef Bacik8c389382020-02-14 16:11:42 -050067 /*
68 * If we didn't get into open_ctree our allocated_ebs will not be
69 * initialized, so just skip this.
70 */
71 if (!fs_info->allocated_ebs.next)
72 return;
73
Josef Bacik3fd63722020-02-14 16:11:40 -050074 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
75 while (!list_empty(&fs_info->allocated_ebs)) {
76 eb = list_first_entry(&fs_info->allocated_ebs,
77 struct extent_buffer, leak_list);
Josef Bacik8c389382020-02-14 16:11:42 -050078 pr_err(
79 "BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
81 btrfs_header_owner(eb));
Josef Bacik33ca8322019-09-23 10:05:17 -040082 list_del(&eb->leak_list);
83 kmem_cache_free(extent_buffer_cache, eb);
84 }
Josef Bacik3fd63722020-02-14 16:11:40 -050085 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
Josef Bacik33ca8322019-09-23 10:05:17 -040086}
87
88static inline void btrfs_extent_state_leak_debug_check(void)
Eric Sandeen6d49ba12013-04-22 16:12:31 +000089{
90 struct extent_state *state;
Eric Sandeen6d49ba12013-04-22 16:12:31 +000091
92 while (!list_empty(&states)) {
93 state = list_entry(states.next, struct extent_state, leak_list);
David Sterba9ee49a042015-01-14 19:52:13 +010094 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
Filipe Manana27a35072014-07-06 20:09:59 +010095 state->start, state->end, state->state,
96 extent_state_in_tree(state),
Elena Reshetovab7ac31b2017-03-03 10:55:19 +020097 refcount_read(&state->refs));
Eric Sandeen6d49ba12013-04-22 16:12:31 +000098 list_del(&state->leak_list);
99 kmem_cache_free(extent_state_cache, state);
100 }
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000101}
David Sterba8d599ae2013-04-30 15:22:23 +0000102
Josef Bacika5dee372013-12-13 10:02:44 -0500103#define btrfs_debug_check_extent_io_range(tree, start, end) \
104 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
David Sterba8d599ae2013-04-30 15:22:23 +0000105static inline void __btrfs_debug_check_extent_io_range(const char *caller,
Josef Bacika5dee372013-12-13 10:02:44 -0500106 struct extent_io_tree *tree, u64 start, u64 end)
David Sterba8d599ae2013-04-30 15:22:23 +0000107{
Nikolay Borisov65a680f2018-11-01 14:09:49 +0200108 struct inode *inode = tree->private_data;
109 u64 isize;
110
111 if (!inode || !is_data_inode(inode))
112 return;
113
114 isize = i_size_read(inode);
115 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
116 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
117 "%s: ino %llu isize %llu odd range [%llu,%llu]",
118 caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
119 }
David Sterba8d599ae2013-04-30 15:22:23 +0000120}
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000121#else
Josef Bacik3fd63722020-02-14 16:11:40 -0500122#define btrfs_leak_debug_add(lock, new, head) do {} while (0)
123#define btrfs_leak_debug_del(lock, entry) do {} while (0)
Josef Bacik33ca8322019-09-23 10:05:17 -0400124#define btrfs_extent_state_leak_debug_check() do {} while (0)
David Sterba8d599ae2013-04-30 15:22:23 +0000125#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
Chris Mason4bef0842008-09-08 11:18:08 -0400126#endif
Chris Masond1310b22008-01-24 16:13:08 -0500127
Chris Masond1310b22008-01-24 16:13:08 -0500128struct tree_entry {
129 u64 start;
130 u64 end;
Chris Masond1310b22008-01-24 16:13:08 -0500131 struct rb_node rb_node;
132};
133
134struct extent_page_data {
135 struct bio *bio;
Chris Mason771ed682008-11-06 22:02:51 -0500136 /* tells writepage not to lock the state bits for this range
137 * it still does the unlocking
138 */
Chris Masonffbd5172009-04-20 15:50:09 -0400139 unsigned int extent_locked:1;
140
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600141 /* tells the submit_bio code to use REQ_SYNC */
Chris Masonffbd5172009-04-20 15:50:09 -0400142 unsigned int sync_io:1;
Chris Masond1310b22008-01-24 16:13:08 -0500143};
144
David Sterba57599c72018-03-01 17:56:34 +0100145static int add_extent_changeset(struct extent_state *state, unsigned bits,
Qu Wenruod38ed272015-10-12 14:53:37 +0800146 struct extent_changeset *changeset,
147 int set)
148{
149 int ret;
150
151 if (!changeset)
David Sterba57599c72018-03-01 17:56:34 +0100152 return 0;
Qu Wenruod38ed272015-10-12 14:53:37 +0800153 if (set && (state->state & bits) == bits)
David Sterba57599c72018-03-01 17:56:34 +0100154 return 0;
Qu Wenruofefdc552015-10-12 15:35:38 +0800155 if (!set && (state->state & bits) == 0)
David Sterba57599c72018-03-01 17:56:34 +0100156 return 0;
Qu Wenruod38ed272015-10-12 14:53:37 +0800157 changeset->bytes_changed += state->end - state->start + 1;
David Sterba53d32352017-02-13 13:42:29 +0100158 ret = ulist_add(&changeset->range_changed, state->start, state->end,
Qu Wenruod38ed272015-10-12 14:53:37 +0800159 GFP_ATOMIC);
David Sterba57599c72018-03-01 17:56:34 +0100160 return ret;
Qu Wenruod38ed272015-10-12 14:53:37 +0800161}
162
Qu Wenruobb58eb92019-01-25 13:09:15 +0800163static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
164 unsigned long bio_flags)
165{
166 blk_status_t ret = 0;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800167 struct extent_io_tree *tree = bio->bi_private;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800168
169 bio->bi_private = NULL;
170
171 if (tree->ops)
172 ret = tree->ops->submit_bio_hook(tree->private_data, bio,
Nikolay Borisov50489a52019-04-10 19:46:04 +0300173 mirror_num, bio_flags);
Qu Wenruobb58eb92019-01-25 13:09:15 +0800174 else
175 btrfsic_submit_bio(bio);
176
177 return blk_status_to_errno(ret);
178}
179
Qu Wenruo30659762019-03-20 14:27:42 +0800180/* Cleanup unsubmitted bios */
181static void end_write_bio(struct extent_page_data *epd, int ret)
182{
183 if (epd->bio) {
184 epd->bio->bi_status = errno_to_blk_status(ret);
185 bio_endio(epd->bio);
186 epd->bio = NULL;
187 }
188}
189
Qu Wenruof4340622019-03-20 14:27:41 +0800190/*
191 * Submit bio from extent page data via submit_one_bio
192 *
193 * Return 0 if everything is OK.
194 * Return <0 for error.
195 */
196static int __must_check flush_write_bio(struct extent_page_data *epd)
Qu Wenruobb58eb92019-01-25 13:09:15 +0800197{
Qu Wenruof4340622019-03-20 14:27:41 +0800198 int ret = 0;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800199
Qu Wenruof4340622019-03-20 14:27:41 +0800200 if (epd->bio) {
Qu Wenruobb58eb92019-01-25 13:09:15 +0800201 ret = submit_one_bio(epd->bio, 0, 0);
Qu Wenruof4340622019-03-20 14:27:41 +0800202 /*
203 * Clean up of epd->bio is handled by its endio function.
204 * And endio is either triggered by successful bio execution
205 * or the error handler of submit bio hook.
206 * So at this point, no matter what happened, we don't need
207 * to clean up epd->bio.
208 */
Qu Wenruobb58eb92019-01-25 13:09:15 +0800209 epd->bio = NULL;
210 }
Qu Wenruof4340622019-03-20 14:27:41 +0800211 return ret;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800212}
David Sterbae2932ee2017-06-23 04:16:17 +0200213
Josef Bacik6f0d04f2019-09-23 10:05:18 -0400214int __init extent_state_cache_init(void)
Chris Masond1310b22008-01-24 16:13:08 -0500215{
David Sterba837e1972012-09-07 03:00:48 -0600216 extent_state_cache = kmem_cache_create("btrfs_extent_state",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +0200217 sizeof(struct extent_state), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300218 SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500219 if (!extent_state_cache)
220 return -ENOMEM;
Josef Bacik6f0d04f2019-09-23 10:05:18 -0400221 return 0;
222}
Chris Masond1310b22008-01-24 16:13:08 -0500223
Josef Bacik6f0d04f2019-09-23 10:05:18 -0400224int __init extent_io_init(void)
225{
David Sterba837e1972012-09-07 03:00:48 -0600226 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +0200227 sizeof(struct extent_buffer), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300228 SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500229 if (!extent_buffer_cache)
Josef Bacik6f0d04f2019-09-23 10:05:18 -0400230 return -ENOMEM;
Chris Mason9be33952013-05-17 18:30:14 -0400231
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400232 if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
233 offsetof(struct btrfs_io_bio, bio),
234 BIOSET_NEED_BVECS))
Chris Mason9be33952013-05-17 18:30:14 -0400235 goto free_buffer_cache;
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700236
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400237 if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700238 goto free_bioset;
239
Chris Masond1310b22008-01-24 16:13:08 -0500240 return 0;
241
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700242free_bioset:
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400243 bioset_exit(&btrfs_bioset);
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700244
Chris Mason9be33952013-05-17 18:30:14 -0400245free_buffer_cache:
246 kmem_cache_destroy(extent_buffer_cache);
247 extent_buffer_cache = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500248 return -ENOMEM;
249}
250
Josef Bacik6f0d04f2019-09-23 10:05:18 -0400251void __cold extent_state_cache_exit(void)
252{
253 btrfs_extent_state_leak_debug_check();
254 kmem_cache_destroy(extent_state_cache);
255}
256
David Sterbae67c7182018-02-19 17:24:18 +0100257void __cold extent_io_exit(void)
Chris Masond1310b22008-01-24 16:13:08 -0500258{
Kirill A. Shutemov8c0a8532012-09-26 11:33:07 +1000259 /*
260 * Make sure all delayed rcu free are flushed before we
261 * destroy caches.
262 */
263 rcu_barrier();
Kinglong Mee5598e902016-01-29 21:36:35 +0800264 kmem_cache_destroy(extent_buffer_cache);
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400265 bioset_exit(&btrfs_bioset);
Chris Masond1310b22008-01-24 16:13:08 -0500266}
267
Josef Bacik41a2ee72020-01-17 09:02:21 -0500268/*
269 * For the file_extent_tree, we want to hold the inode lock when we lookup and
270 * update the disk_i_size, but lockdep will complain because our io_tree we hold
271 * the tree lock and get the inode lock when setting delalloc. These two things
272 * are unrelated, so make a class for the file_extent_tree so we don't get the
273 * two locking patterns mixed up.
274 */
275static struct lock_class_key file_extent_tree_class;
276
Qu Wenruoc258d6e2019-03-01 10:47:58 +0800277void extent_io_tree_init(struct btrfs_fs_info *fs_info,
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800278 struct extent_io_tree *tree, unsigned int owner,
279 void *private_data)
Chris Masond1310b22008-01-24 16:13:08 -0500280{
Qu Wenruoc258d6e2019-03-01 10:47:58 +0800281 tree->fs_info = fs_info;
Eric Paris6bef4d32010-02-23 19:43:04 +0000282 tree->state = RB_ROOT;
Chris Masond1310b22008-01-24 16:13:08 -0500283 tree->ops = NULL;
284 tree->dirty_bytes = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500285 spin_lock_init(&tree->lock);
Josef Bacikc6100a42017-05-05 11:57:13 -0400286 tree->private_data = private_data;
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800287 tree->owner = owner;
Josef Bacik41a2ee72020-01-17 09:02:21 -0500288 if (owner == IO_TREE_INODE_FILE_EXTENT)
289 lockdep_set_class(&tree->lock, &file_extent_tree_class);
Chris Masond1310b22008-01-24 16:13:08 -0500290}
Chris Masond1310b22008-01-24 16:13:08 -0500291
Nikolay Borisov41e7acd2019-03-25 14:31:24 +0200292void extent_io_tree_release(struct extent_io_tree *tree)
293{
294 spin_lock(&tree->lock);
295 /*
296 * Do a single barrier for the waitqueue_active check here, the state
297 * of the waitqueue should not change once extent_io_tree_release is
298 * called.
299 */
300 smp_mb();
301 while (!RB_EMPTY_ROOT(&tree->state)) {
302 struct rb_node *node;
303 struct extent_state *state;
304
305 node = rb_first(&tree->state);
306 state = rb_entry(node, struct extent_state, rb_node);
307 rb_erase(&state->rb_node, &tree->state);
308 RB_CLEAR_NODE(&state->rb_node);
309 /*
310 * btree io trees aren't supposed to have tasks waiting for
311 * changes in the flags of extent states ever.
312 */
313 ASSERT(!waitqueue_active(&state->wq));
314 free_extent_state(state);
315
316 cond_resched_lock(&tree->lock);
317 }
318 spin_unlock(&tree->lock);
319}
320
Christoph Hellwigb2950862008-12-02 09:54:17 -0500321static struct extent_state *alloc_extent_state(gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500322{
323 struct extent_state *state;
Chris Masond1310b22008-01-24 16:13:08 -0500324
Michal Hocko3ba7ab22017-01-09 15:39:02 +0100325 /*
326 * The given mask might be not appropriate for the slab allocator,
327 * drop the unsupported bits
328 */
329 mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
Chris Masond1310b22008-01-24 16:13:08 -0500330 state = kmem_cache_alloc(extent_state_cache, mask);
Peter2b114d12008-04-01 11:21:40 -0400331 if (!state)
Chris Masond1310b22008-01-24 16:13:08 -0500332 return state;
333 state->state = 0;
David Sterba47dc1962016-02-11 13:24:13 +0100334 state->failrec = NULL;
Filipe Manana27a35072014-07-06 20:09:59 +0100335 RB_CLEAR_NODE(&state->rb_node);
Josef Bacik3fd63722020-02-14 16:11:40 -0500336 btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states);
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200337 refcount_set(&state->refs, 1);
Chris Masond1310b22008-01-24 16:13:08 -0500338 init_waitqueue_head(&state->wq);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100339 trace_alloc_extent_state(state, mask, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500340 return state;
341}
Chris Masond1310b22008-01-24 16:13:08 -0500342
Chris Mason4845e442010-05-25 20:56:50 -0400343void free_extent_state(struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500344{
Chris Masond1310b22008-01-24 16:13:08 -0500345 if (!state)
346 return;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200347 if (refcount_dec_and_test(&state->refs)) {
Filipe Manana27a35072014-07-06 20:09:59 +0100348 WARN_ON(extent_state_in_tree(state));
Josef Bacik3fd63722020-02-14 16:11:40 -0500349 btrfs_leak_debug_del(&leak_lock, &state->leak_list);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100350 trace_free_extent_state(state, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500351 kmem_cache_free(extent_state_cache, state);
352 }
353}
Chris Masond1310b22008-01-24 16:13:08 -0500354
Filipe Mananaf2071b22014-02-12 15:05:53 +0000355static struct rb_node *tree_insert(struct rb_root *root,
356 struct rb_node *search_start,
357 u64 offset,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000358 struct rb_node *node,
359 struct rb_node ***p_in,
360 struct rb_node **parent_in)
Chris Masond1310b22008-01-24 16:13:08 -0500361{
Filipe Mananaf2071b22014-02-12 15:05:53 +0000362 struct rb_node **p;
Chris Masond3977122009-01-05 21:25:51 -0500363 struct rb_node *parent = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500364 struct tree_entry *entry;
365
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000366 if (p_in && parent_in) {
367 p = *p_in;
368 parent = *parent_in;
369 goto do_insert;
370 }
371
Filipe Mananaf2071b22014-02-12 15:05:53 +0000372 p = search_start ? &search_start : &root->rb_node;
Chris Masond3977122009-01-05 21:25:51 -0500373 while (*p) {
Chris Masond1310b22008-01-24 16:13:08 -0500374 parent = *p;
375 entry = rb_entry(parent, struct tree_entry, rb_node);
376
377 if (offset < entry->start)
378 p = &(*p)->rb_left;
379 else if (offset > entry->end)
380 p = &(*p)->rb_right;
381 else
382 return parent;
383 }
384
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000385do_insert:
Chris Masond1310b22008-01-24 16:13:08 -0500386 rb_link_node(node, parent, p);
387 rb_insert_color(node, root);
388 return NULL;
389}
390
Nikolay Borisov8666e632019-06-05 14:50:04 +0300391/**
392 * __etree_search - searche @tree for an entry that contains @offset. Such
393 * entry would have entry->start <= offset && entry->end >= offset.
394 *
395 * @tree - the tree to search
396 * @offset - offset that should fall within an entry in @tree
397 * @next_ret - pointer to the first entry whose range ends after @offset
398 * @prev - pointer to the first entry whose range begins before @offset
399 * @p_ret - pointer where new node should be anchored (used when inserting an
400 * entry in the tree)
401 * @parent_ret - points to entry which would have been the parent of the entry,
402 * containing @offset
403 *
404 * This function returns a pointer to the entry that contains @offset byte
405 * address. If no such entry exists, then NULL is returned and the other
406 * pointer arguments to the function are filled, otherwise the found entry is
407 * returned and other pointers are left untouched.
408 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500409static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000410 struct rb_node **next_ret,
Nikolay Borisov352646c2019-01-30 16:51:00 +0200411 struct rb_node **prev_ret,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000412 struct rb_node ***p_ret,
413 struct rb_node **parent_ret)
Chris Masond1310b22008-01-24 16:13:08 -0500414{
Chris Mason80ea96b2008-02-01 14:51:59 -0500415 struct rb_root *root = &tree->state;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000416 struct rb_node **n = &root->rb_node;
Chris Masond1310b22008-01-24 16:13:08 -0500417 struct rb_node *prev = NULL;
418 struct rb_node *orig_prev = NULL;
419 struct tree_entry *entry;
420 struct tree_entry *prev_entry = NULL;
421
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000422 while (*n) {
423 prev = *n;
424 entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500425 prev_entry = entry;
426
427 if (offset < entry->start)
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000428 n = &(*n)->rb_left;
Chris Masond1310b22008-01-24 16:13:08 -0500429 else if (offset > entry->end)
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000430 n = &(*n)->rb_right;
Chris Masond3977122009-01-05 21:25:51 -0500431 else
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000432 return *n;
Chris Masond1310b22008-01-24 16:13:08 -0500433 }
434
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000435 if (p_ret)
436 *p_ret = n;
437 if (parent_ret)
438 *parent_ret = prev;
439
Nikolay Borisov352646c2019-01-30 16:51:00 +0200440 if (next_ret) {
Chris Masond1310b22008-01-24 16:13:08 -0500441 orig_prev = prev;
Chris Masond3977122009-01-05 21:25:51 -0500442 while (prev && offset > prev_entry->end) {
Chris Masond1310b22008-01-24 16:13:08 -0500443 prev = rb_next(prev);
444 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
445 }
Nikolay Borisov352646c2019-01-30 16:51:00 +0200446 *next_ret = prev;
Chris Masond1310b22008-01-24 16:13:08 -0500447 prev = orig_prev;
448 }
449
Nikolay Borisov352646c2019-01-30 16:51:00 +0200450 if (prev_ret) {
Chris Masond1310b22008-01-24 16:13:08 -0500451 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500452 while (prev && offset < prev_entry->start) {
Chris Masond1310b22008-01-24 16:13:08 -0500453 prev = rb_prev(prev);
454 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
455 }
Nikolay Borisov352646c2019-01-30 16:51:00 +0200456 *prev_ret = prev;
Chris Masond1310b22008-01-24 16:13:08 -0500457 }
458 return NULL;
459}
460
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000461static inline struct rb_node *
462tree_search_for_insert(struct extent_io_tree *tree,
463 u64 offset,
464 struct rb_node ***p_ret,
465 struct rb_node **parent_ret)
Chris Masond1310b22008-01-24 16:13:08 -0500466{
Nikolay Borisov352646c2019-01-30 16:51:00 +0200467 struct rb_node *next= NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500468 struct rb_node *ret;
Chris Mason70dec802008-01-29 09:59:12 -0500469
Nikolay Borisov352646c2019-01-30 16:51:00 +0200470 ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
Chris Masond3977122009-01-05 21:25:51 -0500471 if (!ret)
Nikolay Borisov352646c2019-01-30 16:51:00 +0200472 return next;
Chris Masond1310b22008-01-24 16:13:08 -0500473 return ret;
474}
475
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000476static inline struct rb_node *tree_search(struct extent_io_tree *tree,
477 u64 offset)
478{
479 return tree_search_for_insert(tree, offset, NULL, NULL);
480}
481
Chris Masond1310b22008-01-24 16:13:08 -0500482/*
483 * utility function to look for merge candidates inside a given range.
484 * Any extents with matching state are merged together into a single
485 * extent in the tree. Extents with EXTENT_IO in their state field
486 * are not merged because the end_io handlers need to be able to do
487 * operations on them without sleeping (or doing allocations/splits).
488 *
489 * This should be called with the tree lock held.
490 */
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000491static void merge_state(struct extent_io_tree *tree,
492 struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500493{
494 struct extent_state *other;
495 struct rb_node *other_node;
496
Nikolay Borisov88826792019-03-14 15:28:31 +0200497 if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000498 return;
Chris Masond1310b22008-01-24 16:13:08 -0500499
500 other_node = rb_prev(&state->rb_node);
501 if (other_node) {
502 other = rb_entry(other_node, struct extent_state, rb_node);
503 if (other->end == state->start - 1 &&
504 other->state == state->state) {
Nikolay Borisov5c848192018-11-01 14:09:52 +0200505 if (tree->private_data &&
506 is_data_inode(tree->private_data))
507 btrfs_merge_delalloc_extent(tree->private_data,
508 state, other);
Chris Masond1310b22008-01-24 16:13:08 -0500509 state->start = other->start;
Chris Masond1310b22008-01-24 16:13:08 -0500510 rb_erase(&other->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100511 RB_CLEAR_NODE(&other->rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500512 free_extent_state(other);
513 }
514 }
515 other_node = rb_next(&state->rb_node);
516 if (other_node) {
517 other = rb_entry(other_node, struct extent_state, rb_node);
518 if (other->start == state->end + 1 &&
519 other->state == state->state) {
Nikolay Borisov5c848192018-11-01 14:09:52 +0200520 if (tree->private_data &&
521 is_data_inode(tree->private_data))
522 btrfs_merge_delalloc_extent(tree->private_data,
523 state, other);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400524 state->end = other->end;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400525 rb_erase(&other->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100526 RB_CLEAR_NODE(&other->rb_node);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400527 free_extent_state(other);
Chris Masond1310b22008-01-24 16:13:08 -0500528 }
529 }
Chris Masond1310b22008-01-24 16:13:08 -0500530}
531
Xiao Guangrong3150b692011-07-14 03:19:08 +0000532static void set_state_bits(struct extent_io_tree *tree,
Qu Wenruod38ed272015-10-12 14:53:37 +0800533 struct extent_state *state, unsigned *bits,
534 struct extent_changeset *changeset);
Xiao Guangrong3150b692011-07-14 03:19:08 +0000535
Chris Masond1310b22008-01-24 16:13:08 -0500536/*
537 * insert an extent_state struct into the tree. 'bits' are set on the
538 * struct before it is inserted.
539 *
540 * This may return -EEXIST if the extent is already there, in which case the
541 * state struct is freed.
542 *
543 * The tree lock is not taken internally. This is a utility function and
544 * probably isn't what you want to call (see set/clear_extent_bit).
545 */
546static int insert_state(struct extent_io_tree *tree,
547 struct extent_state *state, u64 start, u64 end,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000548 struct rb_node ***p,
549 struct rb_node **parent,
Qu Wenruod38ed272015-10-12 14:53:37 +0800550 unsigned *bits, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500551{
552 struct rb_node *node;
553
David Sterba27922372019-06-18 20:00:05 +0200554 if (end < start) {
555 btrfs_err(tree->fs_info,
556 "insert state: end < start %llu %llu", end, start);
557 WARN_ON(1);
558 }
Chris Masond1310b22008-01-24 16:13:08 -0500559 state->start = start;
560 state->end = end;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400561
Qu Wenruod38ed272015-10-12 14:53:37 +0800562 set_state_bits(tree, state, bits, changeset);
Xiao Guangrong3150b692011-07-14 03:19:08 +0000563
Filipe Mananaf2071b22014-02-12 15:05:53 +0000564 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
Chris Masond1310b22008-01-24 16:13:08 -0500565 if (node) {
566 struct extent_state *found;
567 found = rb_entry(node, struct extent_state, rb_node);
David Sterba27922372019-06-18 20:00:05 +0200568 btrfs_err(tree->fs_info,
569 "found node %llu %llu on insert of %llu %llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200570 found->start, found->end, start, end);
Chris Masond1310b22008-01-24 16:13:08 -0500571 return -EEXIST;
572 }
573 merge_state(tree, state);
574 return 0;
575}
576
577/*
578 * split a given extent state struct in two, inserting the preallocated
579 * struct 'prealloc' as the newly created second half. 'split' indicates an
580 * offset inside 'orig' where it should be split.
581 *
582 * Before calling,
583 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
584 * are two extent state structs in the tree:
585 * prealloc: [orig->start, split - 1]
586 * orig: [ split, orig->end ]
587 *
588 * The tree locks are not taken by this function. They need to be held
589 * by the caller.
590 */
591static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
592 struct extent_state *prealloc, u64 split)
593{
594 struct rb_node *node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400595
Nikolay Borisovabbb55f2018-11-01 14:09:53 +0200596 if (tree->private_data && is_data_inode(tree->private_data))
597 btrfs_split_delalloc_extent(tree->private_data, orig, split);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400598
Chris Masond1310b22008-01-24 16:13:08 -0500599 prealloc->start = orig->start;
600 prealloc->end = split - 1;
601 prealloc->state = orig->state;
602 orig->start = split;
603
Filipe Mananaf2071b22014-02-12 15:05:53 +0000604 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
605 &prealloc->rb_node, NULL, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500606 if (node) {
Chris Masond1310b22008-01-24 16:13:08 -0500607 free_extent_state(prealloc);
608 return -EEXIST;
609 }
610 return 0;
611}
612
Li Zefancdc6a392012-03-12 16:39:48 +0800613static struct extent_state *next_state(struct extent_state *state)
614{
615 struct rb_node *next = rb_next(&state->rb_node);
616 if (next)
617 return rb_entry(next, struct extent_state, rb_node);
618 else
619 return NULL;
620}
621
Chris Masond1310b22008-01-24 16:13:08 -0500622/*
623 * utility function to clear some bits in an extent state struct.
Andrea Gelmini52042d82018-11-28 12:05:13 +0100624 * it will optionally wake up anyone waiting on this state (wake == 1).
Chris Masond1310b22008-01-24 16:13:08 -0500625 *
626 * If no bits are set on the state struct after clearing things, the
627 * struct is freed and removed from the tree
628 */
Li Zefancdc6a392012-03-12 16:39:48 +0800629static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
630 struct extent_state *state,
Qu Wenruofefdc552015-10-12 15:35:38 +0800631 unsigned *bits, int wake,
632 struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500633{
Li Zefancdc6a392012-03-12 16:39:48 +0800634 struct extent_state *next;
David Sterba9ee49a042015-01-14 19:52:13 +0100635 unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
David Sterba57599c72018-03-01 17:56:34 +0100636 int ret;
Chris Masond1310b22008-01-24 16:13:08 -0500637
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400638 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500639 u64 range = state->end - state->start + 1;
640 WARN_ON(range > tree->dirty_bytes);
641 tree->dirty_bytes -= range;
642 }
Nikolay Borisova36bb5f2018-11-01 14:09:51 +0200643
644 if (tree->private_data && is_data_inode(tree->private_data))
645 btrfs_clear_delalloc_extent(tree->private_data, state, bits);
646
David Sterba57599c72018-03-01 17:56:34 +0100647 ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
648 BUG_ON(ret < 0);
Josef Bacik32c00af2009-10-08 13:34:05 -0400649 state->state &= ~bits_to_clear;
Chris Masond1310b22008-01-24 16:13:08 -0500650 if (wake)
651 wake_up(&state->wq);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400652 if (state->state == 0) {
Li Zefancdc6a392012-03-12 16:39:48 +0800653 next = next_state(state);
Filipe Manana27a35072014-07-06 20:09:59 +0100654 if (extent_state_in_tree(state)) {
Chris Masond1310b22008-01-24 16:13:08 -0500655 rb_erase(&state->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100656 RB_CLEAR_NODE(&state->rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500657 free_extent_state(state);
658 } else {
659 WARN_ON(1);
660 }
661 } else {
662 merge_state(tree, state);
Li Zefancdc6a392012-03-12 16:39:48 +0800663 next = next_state(state);
Chris Masond1310b22008-01-24 16:13:08 -0500664 }
Li Zefancdc6a392012-03-12 16:39:48 +0800665 return next;
Chris Masond1310b22008-01-24 16:13:08 -0500666}
667
Xiao Guangrong82337672011-04-20 06:44:57 +0000668static struct extent_state *
669alloc_extent_state_atomic(struct extent_state *prealloc)
670{
671 if (!prealloc)
672 prealloc = alloc_extent_state(GFP_ATOMIC);
673
674 return prealloc;
675}
676
Eric Sandeen48a3b632013-04-25 20:41:01 +0000677static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400678{
David Sterba05912a32018-07-18 19:23:45 +0200679 struct inode *inode = tree->private_data;
680
681 btrfs_panic(btrfs_sb(inode->i_sb), err,
682 "locking error: extent tree was modified by another thread while locked");
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400683}
684
Chris Masond1310b22008-01-24 16:13:08 -0500685/*
686 * clear some bits on a range in the tree. This may require splitting
687 * or inserting elements in the tree, so the gfp mask is used to
688 * indicate which allocations or sleeping are allowed.
689 *
690 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
691 * the given range from the tree regardless of state (ie for truncate).
692 *
693 * the range [start, end] is inclusive.
694 *
Jeff Mahoney6763af82012-03-01 14:56:29 +0100695 * This takes the tree lock, and returns 0 on success and < 0 on error.
Chris Masond1310b22008-01-24 16:13:08 -0500696 */
David Sterba66b0c882017-10-31 16:30:47 +0100697int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruofefdc552015-10-12 15:35:38 +0800698 unsigned bits, int wake, int delete,
699 struct extent_state **cached_state,
700 gfp_t mask, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500701{
702 struct extent_state *state;
Chris Mason2c64c532009-09-02 15:04:12 -0400703 struct extent_state *cached;
Chris Masond1310b22008-01-24 16:13:08 -0500704 struct extent_state *prealloc = NULL;
705 struct rb_node *node;
Yan Zheng5c939df2009-05-27 09:16:03 -0400706 u64 last_end;
Chris Masond1310b22008-01-24 16:13:08 -0500707 int err;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000708 int clear = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500709
Josef Bacika5dee372013-12-13 10:02:44 -0500710 btrfs_debug_check_extent_io_range(tree, start, end);
Qu Wenruoa1d19842019-03-01 10:48:00 +0800711 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
David Sterba8d599ae2013-04-30 15:22:23 +0000712
Josef Bacik7ee9e442013-06-21 16:37:03 -0400713 if (bits & EXTENT_DELALLOC)
714 bits |= EXTENT_NORESERVE;
715
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400716 if (delete)
717 bits |= ~EXTENT_CTLBITS;
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400718
Nikolay Borisov88826792019-03-14 15:28:31 +0200719 if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
Josef Bacik2ac55d42010-02-03 19:33:23 +0000720 clear = 1;
Chris Masond1310b22008-01-24 16:13:08 -0500721again:
Mel Gormand0164ad2015-11-06 16:28:21 -0800722 if (!prealloc && gfpflags_allow_blocking(mask)) {
Filipe Mananac7bc6312014-11-03 14:12:57 +0000723 /*
724 * Don't care for allocation failure here because we might end
725 * up not needing the pre-allocated extent state at all, which
726 * is the case if we only have in the tree extent states that
727 * cover our input range and don't cover too any other range.
728 * If we end up needing a new extent state we allocate it later.
729 */
Chris Masond1310b22008-01-24 16:13:08 -0500730 prealloc = alloc_extent_state(mask);
Chris Masond1310b22008-01-24 16:13:08 -0500731 }
732
Chris Masoncad321a2008-12-17 14:51:42 -0500733 spin_lock(&tree->lock);
Chris Mason2c64c532009-09-02 15:04:12 -0400734 if (cached_state) {
735 cached = *cached_state;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000736
737 if (clear) {
738 *cached_state = NULL;
739 cached_state = NULL;
740 }
741
Filipe Manana27a35072014-07-06 20:09:59 +0100742 if (cached && extent_state_in_tree(cached) &&
743 cached->start <= start && cached->end > start) {
Josef Bacik2ac55d42010-02-03 19:33:23 +0000744 if (clear)
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200745 refcount_dec(&cached->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400746 state = cached;
Chris Mason42daec22009-09-23 19:51:09 -0400747 goto hit_next;
Chris Mason2c64c532009-09-02 15:04:12 -0400748 }
Josef Bacik2ac55d42010-02-03 19:33:23 +0000749 if (clear)
750 free_extent_state(cached);
Chris Mason2c64c532009-09-02 15:04:12 -0400751 }
Chris Masond1310b22008-01-24 16:13:08 -0500752 /*
753 * this search will find the extents that end after
754 * our range starts
755 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500756 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500757 if (!node)
758 goto out;
759 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason2c64c532009-09-02 15:04:12 -0400760hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500761 if (state->start > end)
762 goto out;
763 WARN_ON(state->end < start);
Yan Zheng5c939df2009-05-27 09:16:03 -0400764 last_end = state->end;
Chris Masond1310b22008-01-24 16:13:08 -0500765
Liu Bo04493142012-02-16 18:34:37 +0800766 /* the state doesn't have the wanted bits, go ahead */
Li Zefancdc6a392012-03-12 16:39:48 +0800767 if (!(state->state & bits)) {
768 state = next_state(state);
Liu Bo04493142012-02-16 18:34:37 +0800769 goto next;
Li Zefancdc6a392012-03-12 16:39:48 +0800770 }
Liu Bo04493142012-02-16 18:34:37 +0800771
Chris Masond1310b22008-01-24 16:13:08 -0500772 /*
773 * | ---- desired range ---- |
774 * | state | or
775 * | ------------- state -------------- |
776 *
777 * We need to split the extent we found, and may flip
778 * bits on second half.
779 *
780 * If the extent we found extends past our range, we
781 * just split and search again. It'll get split again
782 * the next time though.
783 *
784 * If the extent we found is inside our range, we clear
785 * the desired bit on it.
786 */
787
788 if (state->start < start) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000789 prealloc = alloc_extent_state_atomic(prealloc);
790 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500791 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400792 if (err)
793 extent_io_tree_panic(tree, err);
794
Chris Masond1310b22008-01-24 16:13:08 -0500795 prealloc = NULL;
796 if (err)
797 goto out;
798 if (state->end <= end) {
Qu Wenruofefdc552015-10-12 15:35:38 +0800799 state = clear_state_bit(tree, state, &bits, wake,
800 changeset);
Liu Bod1ac6e42012-05-10 18:10:39 +0800801 goto next;
Chris Masond1310b22008-01-24 16:13:08 -0500802 }
803 goto search_again;
804 }
805 /*
806 * | ---- desired range ---- |
807 * | state |
808 * We need to split the extent, and clear the bit
809 * on the first half
810 */
811 if (state->start <= end && state->end > end) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000812 prealloc = alloc_extent_state_atomic(prealloc);
813 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500814 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400815 if (err)
816 extent_io_tree_panic(tree, err);
817
Chris Masond1310b22008-01-24 16:13:08 -0500818 if (wake)
819 wake_up(&state->wq);
Chris Mason42daec22009-09-23 19:51:09 -0400820
Qu Wenruofefdc552015-10-12 15:35:38 +0800821 clear_state_bit(tree, prealloc, &bits, wake, changeset);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400822
Chris Masond1310b22008-01-24 16:13:08 -0500823 prealloc = NULL;
824 goto out;
825 }
Chris Mason42daec22009-09-23 19:51:09 -0400826
Qu Wenruofefdc552015-10-12 15:35:38 +0800827 state = clear_state_bit(tree, state, &bits, wake, changeset);
Liu Bo04493142012-02-16 18:34:37 +0800828next:
Yan Zheng5c939df2009-05-27 09:16:03 -0400829 if (last_end == (u64)-1)
830 goto out;
831 start = last_end + 1;
Li Zefancdc6a392012-03-12 16:39:48 +0800832 if (start <= end && state && !need_resched())
Liu Bo692e5752012-02-16 18:34:36 +0800833 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500834
835search_again:
836 if (start > end)
837 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500838 spin_unlock(&tree->lock);
Mel Gormand0164ad2015-11-06 16:28:21 -0800839 if (gfpflags_allow_blocking(mask))
Chris Masond1310b22008-01-24 16:13:08 -0500840 cond_resched();
841 goto again;
David Sterba7ab5cb22016-04-27 01:02:15 +0200842
843out:
844 spin_unlock(&tree->lock);
845 if (prealloc)
846 free_extent_state(prealloc);
847
848 return 0;
849
Chris Masond1310b22008-01-24 16:13:08 -0500850}
Chris Masond1310b22008-01-24 16:13:08 -0500851
Jeff Mahoney143bede2012-03-01 14:56:26 +0100852static void wait_on_state(struct extent_io_tree *tree,
853 struct extent_state *state)
Christoph Hellwig641f5212008-12-02 06:36:10 -0500854 __releases(tree->lock)
855 __acquires(tree->lock)
Chris Masond1310b22008-01-24 16:13:08 -0500856{
857 DEFINE_WAIT(wait);
858 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
Chris Masoncad321a2008-12-17 14:51:42 -0500859 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500860 schedule();
Chris Masoncad321a2008-12-17 14:51:42 -0500861 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500862 finish_wait(&state->wq, &wait);
Chris Masond1310b22008-01-24 16:13:08 -0500863}
864
865/*
866 * waits for one or more bits to clear on a range in the state tree.
867 * The range [start, end] is inclusive.
868 * The tree lock is taken by this function
869 */
David Sterba41074882013-04-29 13:38:46 +0000870static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
871 unsigned long bits)
Chris Masond1310b22008-01-24 16:13:08 -0500872{
873 struct extent_state *state;
874 struct rb_node *node;
875
Josef Bacika5dee372013-12-13 10:02:44 -0500876 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +0000877
Chris Masoncad321a2008-12-17 14:51:42 -0500878 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500879again:
880 while (1) {
881 /*
882 * this search will find all the extents that end after
883 * our range starts
884 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500885 node = tree_search(tree, start);
Filipe Mananac50d3e72014-03-31 14:53:25 +0100886process_node:
Chris Masond1310b22008-01-24 16:13:08 -0500887 if (!node)
888 break;
889
890 state = rb_entry(node, struct extent_state, rb_node);
891
892 if (state->start > end)
893 goto out;
894
895 if (state->state & bits) {
896 start = state->start;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200897 refcount_inc(&state->refs);
Chris Masond1310b22008-01-24 16:13:08 -0500898 wait_on_state(tree, state);
899 free_extent_state(state);
900 goto again;
901 }
902 start = state->end + 1;
903
904 if (start > end)
905 break;
906
Filipe Mananac50d3e72014-03-31 14:53:25 +0100907 if (!cond_resched_lock(&tree->lock)) {
908 node = rb_next(node);
909 goto process_node;
910 }
Chris Masond1310b22008-01-24 16:13:08 -0500911 }
912out:
Chris Masoncad321a2008-12-17 14:51:42 -0500913 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500914}
Chris Masond1310b22008-01-24 16:13:08 -0500915
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000916static void set_state_bits(struct extent_io_tree *tree,
Chris Masond1310b22008-01-24 16:13:08 -0500917 struct extent_state *state,
Qu Wenruod38ed272015-10-12 14:53:37 +0800918 unsigned *bits, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500919{
David Sterba9ee49a042015-01-14 19:52:13 +0100920 unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
David Sterba57599c72018-03-01 17:56:34 +0100921 int ret;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400922
Nikolay Borisove06a1fc2018-11-01 14:09:50 +0200923 if (tree->private_data && is_data_inode(tree->private_data))
924 btrfs_set_delalloc_extent(tree->private_data, state, bits);
925
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400926 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500927 u64 range = state->end - state->start + 1;
928 tree->dirty_bytes += range;
929 }
David Sterba57599c72018-03-01 17:56:34 +0100930 ret = add_extent_changeset(state, bits_to_set, changeset, 1);
931 BUG_ON(ret < 0);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400932 state->state |= bits_to_set;
Chris Masond1310b22008-01-24 16:13:08 -0500933}
934
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100935static void cache_state_if_flags(struct extent_state *state,
936 struct extent_state **cached_ptr,
David Sterba9ee49a042015-01-14 19:52:13 +0100937 unsigned flags)
Chris Mason2c64c532009-09-02 15:04:12 -0400938{
939 if (cached_ptr && !(*cached_ptr)) {
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100940 if (!flags || (state->state & flags)) {
Chris Mason2c64c532009-09-02 15:04:12 -0400941 *cached_ptr = state;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200942 refcount_inc(&state->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400943 }
944 }
945}
946
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100947static void cache_state(struct extent_state *state,
948 struct extent_state **cached_ptr)
949{
950 return cache_state_if_flags(state, cached_ptr,
Nikolay Borisov88826792019-03-14 15:28:31 +0200951 EXTENT_LOCKED | EXTENT_BOUNDARY);
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100952}
953
Chris Masond1310b22008-01-24 16:13:08 -0500954/*
Chris Mason1edbb732009-09-02 13:24:36 -0400955 * set some bits on a range in the tree. This may require allocations or
956 * sleeping, so the gfp mask is used to indicate what is allowed.
Chris Masond1310b22008-01-24 16:13:08 -0500957 *
Chris Mason1edbb732009-09-02 13:24:36 -0400958 * If any of the exclusive bits are set, this will fail with -EEXIST if some
959 * part of the range already has the desired bits set. The start of the
960 * existing range is returned in failed_start in this case.
Chris Masond1310b22008-01-24 16:13:08 -0500961 *
Chris Mason1edbb732009-09-02 13:24:36 -0400962 * [start, end] is inclusive This takes the tree lock.
Chris Masond1310b22008-01-24 16:13:08 -0500963 */
Chris Mason1edbb732009-09-02 13:24:36 -0400964
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +0100965static int __must_check
966__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100967 unsigned bits, unsigned exclusive_bits,
David Sterba41074882013-04-29 13:38:46 +0000968 u64 *failed_start, struct extent_state **cached_state,
Qu Wenruod38ed272015-10-12 14:53:37 +0800969 gfp_t mask, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500970{
971 struct extent_state *state;
972 struct extent_state *prealloc = NULL;
973 struct rb_node *node;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000974 struct rb_node **p;
975 struct rb_node *parent;
Chris Masond1310b22008-01-24 16:13:08 -0500976 int err = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500977 u64 last_start;
978 u64 last_end;
Chris Mason42daec22009-09-23 19:51:09 -0400979
Josef Bacika5dee372013-12-13 10:02:44 -0500980 btrfs_debug_check_extent_io_range(tree, start, end);
Qu Wenruoa1d19842019-03-01 10:48:00 +0800981 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
David Sterba8d599ae2013-04-30 15:22:23 +0000982
Chris Masond1310b22008-01-24 16:13:08 -0500983again:
Mel Gormand0164ad2015-11-06 16:28:21 -0800984 if (!prealloc && gfpflags_allow_blocking(mask)) {
David Sterba059f7912016-04-27 01:03:45 +0200985 /*
986 * Don't care for allocation failure here because we might end
987 * up not needing the pre-allocated extent state at all, which
988 * is the case if we only have in the tree extent states that
989 * cover our input range and don't cover too any other range.
990 * If we end up needing a new extent state we allocate it later.
991 */
Chris Masond1310b22008-01-24 16:13:08 -0500992 prealloc = alloc_extent_state(mask);
Chris Masond1310b22008-01-24 16:13:08 -0500993 }
994
Chris Masoncad321a2008-12-17 14:51:42 -0500995 spin_lock(&tree->lock);
Chris Mason9655d292009-09-02 15:22:30 -0400996 if (cached_state && *cached_state) {
997 state = *cached_state;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400998 if (state->start <= start && state->end > start &&
Filipe Manana27a35072014-07-06 20:09:59 +0100999 extent_state_in_tree(state)) {
Chris Mason9655d292009-09-02 15:22:30 -04001000 node = &state->rb_node;
1001 goto hit_next;
1002 }
1003 }
Chris Masond1310b22008-01-24 16:13:08 -05001004 /*
1005 * this search will find all the extents that end after
1006 * our range starts.
1007 */
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001008 node = tree_search_for_insert(tree, start, &p, &parent);
Chris Masond1310b22008-01-24 16:13:08 -05001009 if (!node) {
Xiao Guangrong82337672011-04-20 06:44:57 +00001010 prealloc = alloc_extent_state_atomic(prealloc);
1011 BUG_ON(!prealloc);
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001012 err = insert_state(tree, prealloc, start, end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001013 &p, &parent, &bits, changeset);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001014 if (err)
1015 extent_io_tree_panic(tree, err);
1016
Filipe David Borba Mananac42ac0b2013-11-26 15:01:34 +00001017 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001018 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05001019 goto out;
1020 }
Chris Masond1310b22008-01-24 16:13:08 -05001021 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason40431d62009-08-05 12:57:59 -04001022hit_next:
Chris Masond1310b22008-01-24 16:13:08 -05001023 last_start = state->start;
1024 last_end = state->end;
1025
1026 /*
1027 * | ---- desired range ---- |
1028 * | state |
1029 *
1030 * Just lock what we found and keep going
1031 */
1032 if (state->start == start && state->end <= end) {
Chris Mason1edbb732009-09-02 13:24:36 -04001033 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001034 *failed_start = state->start;
1035 err = -EEXIST;
1036 goto out;
1037 }
Chris Mason42daec22009-09-23 19:51:09 -04001038
Qu Wenruod38ed272015-10-12 14:53:37 +08001039 set_state_bits(tree, state, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -04001040 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001041 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -04001042 if (last_end == (u64)-1)
1043 goto out;
1044 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001045 state = next_state(state);
1046 if (start < end && state && state->start == start &&
1047 !need_resched())
1048 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -05001049 goto search_again;
1050 }
1051
1052 /*
1053 * | ---- desired range ---- |
1054 * | state |
1055 * or
1056 * | ------------- state -------------- |
1057 *
1058 * We need to split the extent we found, and may flip bits on
1059 * second half.
1060 *
1061 * If the extent we found extends past our
1062 * range, we just split and search again. It'll get split
1063 * again the next time though.
1064 *
1065 * If the extent we found is inside our range, we set the
1066 * desired bit on it.
1067 */
1068 if (state->start < start) {
Chris Mason1edbb732009-09-02 13:24:36 -04001069 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001070 *failed_start = start;
1071 err = -EEXIST;
1072 goto out;
1073 }
Xiao Guangrong82337672011-04-20 06:44:57 +00001074
Filipe Manana55ffaab2020-02-13 10:20:02 +00001075 /*
1076 * If this extent already has all the bits we want set, then
1077 * skip it, not necessary to split it or do anything with it.
1078 */
1079 if ((state->state & bits) == bits) {
1080 start = state->end + 1;
1081 cache_state(state, cached_state);
1082 goto search_again;
1083 }
1084
Xiao Guangrong82337672011-04-20 06:44:57 +00001085 prealloc = alloc_extent_state_atomic(prealloc);
1086 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -05001087 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001088 if (err)
1089 extent_io_tree_panic(tree, err);
1090
Chris Masond1310b22008-01-24 16:13:08 -05001091 prealloc = NULL;
1092 if (err)
1093 goto out;
1094 if (state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001095 set_state_bits(tree, state, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -04001096 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001097 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -04001098 if (last_end == (u64)-1)
1099 goto out;
1100 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001101 state = next_state(state);
1102 if (start < end && state && state->start == start &&
1103 !need_resched())
1104 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -05001105 }
1106 goto search_again;
1107 }
1108 /*
1109 * | ---- desired range ---- |
1110 * | state | or | state |
1111 *
1112 * There's a hole, we need to insert something in it and
1113 * ignore the extent we found.
1114 */
1115 if (state->start > start) {
1116 u64 this_end;
1117 if (end < last_start)
1118 this_end = end;
1119 else
Chris Masond3977122009-01-05 21:25:51 -05001120 this_end = last_start - 1;
Xiao Guangrong82337672011-04-20 06:44:57 +00001121
1122 prealloc = alloc_extent_state_atomic(prealloc);
1123 BUG_ON(!prealloc);
Xiao Guangrongc7f895a2011-04-20 06:45:49 +00001124
1125 /*
1126 * Avoid to free 'prealloc' if it can be merged with
1127 * the later extent.
1128 */
Chris Masond1310b22008-01-24 16:13:08 -05001129 err = insert_state(tree, prealloc, start, this_end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001130 NULL, NULL, &bits, changeset);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001131 if (err)
1132 extent_io_tree_panic(tree, err);
1133
Chris Mason2c64c532009-09-02 15:04:12 -04001134 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001135 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05001136 start = this_end + 1;
1137 goto search_again;
1138 }
1139 /*
1140 * | ---- desired range ---- |
1141 * | state |
1142 * We need to split the extent, and set the bit
1143 * on the first half
1144 */
1145 if (state->start <= end && state->end > end) {
Chris Mason1edbb732009-09-02 13:24:36 -04001146 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001147 *failed_start = start;
1148 err = -EEXIST;
1149 goto out;
1150 }
Xiao Guangrong82337672011-04-20 06:44:57 +00001151
1152 prealloc = alloc_extent_state_atomic(prealloc);
1153 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -05001154 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001155 if (err)
1156 extent_io_tree_panic(tree, err);
Chris Masond1310b22008-01-24 16:13:08 -05001157
Qu Wenruod38ed272015-10-12 14:53:37 +08001158 set_state_bits(tree, prealloc, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -04001159 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001160 merge_state(tree, prealloc);
1161 prealloc = NULL;
1162 goto out;
1163 }
1164
David Sterbab5a4ba142016-04-27 01:02:15 +02001165search_again:
1166 if (start > end)
1167 goto out;
1168 spin_unlock(&tree->lock);
1169 if (gfpflags_allow_blocking(mask))
1170 cond_resched();
1171 goto again;
Chris Masond1310b22008-01-24 16:13:08 -05001172
1173out:
Chris Masoncad321a2008-12-17 14:51:42 -05001174 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001175 if (prealloc)
1176 free_extent_state(prealloc);
1177
1178 return err;
1179
Chris Masond1310b22008-01-24 16:13:08 -05001180}
Chris Masond1310b22008-01-24 16:13:08 -05001181
David Sterba41074882013-04-29 13:38:46 +00001182int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +01001183 unsigned bits, u64 * failed_start,
David Sterba41074882013-04-29 13:38:46 +00001184 struct extent_state **cached_state, gfp_t mask)
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001185{
1186 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
Qu Wenruod38ed272015-10-12 14:53:37 +08001187 cached_state, mask, NULL);
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001188}
1189
1190
Josef Bacik462d6fa2011-09-26 13:56:12 -04001191/**
Liu Bo10983f22012-07-11 15:26:19 +08001192 * convert_extent_bit - convert all bits in a given range from one bit to
1193 * another
Josef Bacik462d6fa2011-09-26 13:56:12 -04001194 * @tree: the io tree to search
1195 * @start: the start offset in bytes
1196 * @end: the end offset in bytes (inclusive)
1197 * @bits: the bits to set in this range
1198 * @clear_bits: the bits to clear in this range
Josef Bacike6138872012-09-27 17:07:30 -04001199 * @cached_state: state that we're going to cache
Josef Bacik462d6fa2011-09-26 13:56:12 -04001200 *
1201 * This will go through and set bits for the given range. If any states exist
1202 * already in this range they are set with the given bit and cleared of the
1203 * clear_bits. This is only meant to be used by things that are mergeable, ie
1204 * converting from say DELALLOC to DIRTY. This is not meant to be used with
1205 * boundary bits like LOCK.
David Sterba210aa272016-04-26 23:54:39 +02001206 *
1207 * All allocations are done with GFP_NOFS.
Josef Bacik462d6fa2011-09-26 13:56:12 -04001208 */
1209int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +01001210 unsigned bits, unsigned clear_bits,
David Sterba210aa272016-04-26 23:54:39 +02001211 struct extent_state **cached_state)
Josef Bacik462d6fa2011-09-26 13:56:12 -04001212{
1213 struct extent_state *state;
1214 struct extent_state *prealloc = NULL;
1215 struct rb_node *node;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001216 struct rb_node **p;
1217 struct rb_node *parent;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001218 int err = 0;
1219 u64 last_start;
1220 u64 last_end;
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001221 bool first_iteration = true;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001222
Josef Bacika5dee372013-12-13 10:02:44 -05001223 btrfs_debug_check_extent_io_range(tree, start, end);
Qu Wenruoa1d19842019-03-01 10:48:00 +08001224 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1225 clear_bits);
David Sterba8d599ae2013-04-30 15:22:23 +00001226
Josef Bacik462d6fa2011-09-26 13:56:12 -04001227again:
David Sterba210aa272016-04-26 23:54:39 +02001228 if (!prealloc) {
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001229 /*
1230 * Best effort, don't worry if extent state allocation fails
1231 * here for the first iteration. We might have a cached state
1232 * that matches exactly the target range, in which case no
1233 * extent state allocations are needed. We'll only know this
1234 * after locking the tree.
1235 */
David Sterba210aa272016-04-26 23:54:39 +02001236 prealloc = alloc_extent_state(GFP_NOFS);
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001237 if (!prealloc && !first_iteration)
Josef Bacik462d6fa2011-09-26 13:56:12 -04001238 return -ENOMEM;
1239 }
1240
1241 spin_lock(&tree->lock);
Josef Bacike6138872012-09-27 17:07:30 -04001242 if (cached_state && *cached_state) {
1243 state = *cached_state;
1244 if (state->start <= start && state->end > start &&
Filipe Manana27a35072014-07-06 20:09:59 +01001245 extent_state_in_tree(state)) {
Josef Bacike6138872012-09-27 17:07:30 -04001246 node = &state->rb_node;
1247 goto hit_next;
1248 }
1249 }
1250
Josef Bacik462d6fa2011-09-26 13:56:12 -04001251 /*
1252 * this search will find all the extents that end after
1253 * our range starts.
1254 */
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001255 node = tree_search_for_insert(tree, start, &p, &parent);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001256 if (!node) {
1257 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001258 if (!prealloc) {
1259 err = -ENOMEM;
1260 goto out;
1261 }
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001262 err = insert_state(tree, prealloc, start, end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001263 &p, &parent, &bits, NULL);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001264 if (err)
1265 extent_io_tree_panic(tree, err);
Filipe David Borba Mananac42ac0b2013-11-26 15:01:34 +00001266 cache_state(prealloc, cached_state);
1267 prealloc = NULL;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001268 goto out;
1269 }
1270 state = rb_entry(node, struct extent_state, rb_node);
1271hit_next:
1272 last_start = state->start;
1273 last_end = state->end;
1274
1275 /*
1276 * | ---- desired range ---- |
1277 * | state |
1278 *
1279 * Just lock what we found and keep going
1280 */
1281 if (state->start == start && state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001282 set_state_bits(tree, state, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001283 cache_state(state, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001284 state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001285 if (last_end == (u64)-1)
1286 goto out;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001287 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001288 if (start < end && state && state->start == start &&
1289 !need_resched())
1290 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001291 goto search_again;
1292 }
1293
1294 /*
1295 * | ---- desired range ---- |
1296 * | state |
1297 * or
1298 * | ------------- state -------------- |
1299 *
1300 * We need to split the extent we found, and may flip bits on
1301 * second half.
1302 *
1303 * If the extent we found extends past our
1304 * range, we just split and search again. It'll get split
1305 * again the next time though.
1306 *
1307 * If the extent we found is inside our range, we set the
1308 * desired bit on it.
1309 */
1310 if (state->start < start) {
1311 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001312 if (!prealloc) {
1313 err = -ENOMEM;
1314 goto out;
1315 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001316 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001317 if (err)
1318 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001319 prealloc = NULL;
1320 if (err)
1321 goto out;
1322 if (state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001323 set_state_bits(tree, state, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001324 cache_state(state, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001325 state = clear_state_bit(tree, state, &clear_bits, 0,
1326 NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001327 if (last_end == (u64)-1)
1328 goto out;
1329 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001330 if (start < end && state && state->start == start &&
1331 !need_resched())
1332 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001333 }
1334 goto search_again;
1335 }
1336 /*
1337 * | ---- desired range ---- |
1338 * | state | or | state |
1339 *
1340 * There's a hole, we need to insert something in it and
1341 * ignore the extent we found.
1342 */
1343 if (state->start > start) {
1344 u64 this_end;
1345 if (end < last_start)
1346 this_end = end;
1347 else
1348 this_end = last_start - 1;
1349
1350 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001351 if (!prealloc) {
1352 err = -ENOMEM;
1353 goto out;
1354 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001355
1356 /*
1357 * Avoid to free 'prealloc' if it can be merged with
1358 * the later extent.
1359 */
1360 err = insert_state(tree, prealloc, start, this_end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001361 NULL, NULL, &bits, NULL);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001362 if (err)
1363 extent_io_tree_panic(tree, err);
Josef Bacike6138872012-09-27 17:07:30 -04001364 cache_state(prealloc, cached_state);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001365 prealloc = NULL;
1366 start = this_end + 1;
1367 goto search_again;
1368 }
1369 /*
1370 * | ---- desired range ---- |
1371 * | state |
1372 * We need to split the extent, and set the bit
1373 * on the first half
1374 */
1375 if (state->start <= end && state->end > end) {
1376 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001377 if (!prealloc) {
1378 err = -ENOMEM;
1379 goto out;
1380 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001381
1382 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001383 if (err)
1384 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001385
Qu Wenruod38ed272015-10-12 14:53:37 +08001386 set_state_bits(tree, prealloc, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001387 cache_state(prealloc, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001388 clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001389 prealloc = NULL;
1390 goto out;
1391 }
1392
Josef Bacik462d6fa2011-09-26 13:56:12 -04001393search_again:
1394 if (start > end)
1395 goto out;
1396 spin_unlock(&tree->lock);
David Sterba210aa272016-04-26 23:54:39 +02001397 cond_resched();
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001398 first_iteration = false;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001399 goto again;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001400
1401out:
1402 spin_unlock(&tree->lock);
1403 if (prealloc)
1404 free_extent_state(prealloc);
1405
1406 return err;
1407}
1408
Chris Masond1310b22008-01-24 16:13:08 -05001409/* wrappers around set/clear extent bit */
Qu Wenruod38ed272015-10-12 14:53:37 +08001410int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba2c53b912016-04-26 23:54:39 +02001411 unsigned bits, struct extent_changeset *changeset)
Qu Wenruod38ed272015-10-12 14:53:37 +08001412{
1413 /*
1414 * We don't support EXTENT_LOCKED yet, as current changeset will
1415 * record any bits changed, so for EXTENT_LOCKED case, it will
1416 * either fail with -EEXIST or changeset will record the whole
1417 * range.
1418 */
1419 BUG_ON(bits & EXTENT_LOCKED);
1420
David Sterba2c53b912016-04-26 23:54:39 +02001421 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
Qu Wenruod38ed272015-10-12 14:53:37 +08001422 changeset);
1423}
1424
Nikolay Borisov4ca73652019-03-27 14:24:10 +02001425int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
1426 unsigned bits)
1427{
1428 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1429 GFP_NOWAIT, NULL);
1430}
1431
Qu Wenruofefdc552015-10-12 15:35:38 +08001432int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1433 unsigned bits, int wake, int delete,
David Sterbaae0f1622017-10-31 16:37:52 +01001434 struct extent_state **cached)
Qu Wenruofefdc552015-10-12 15:35:38 +08001435{
1436 return __clear_extent_bit(tree, start, end, bits, wake, delete,
David Sterbaae0f1622017-10-31 16:37:52 +01001437 cached, GFP_NOFS, NULL);
Qu Wenruofefdc552015-10-12 15:35:38 +08001438}
1439
Qu Wenruofefdc552015-10-12 15:35:38 +08001440int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaf734c442016-04-26 23:54:39 +02001441 unsigned bits, struct extent_changeset *changeset)
Qu Wenruofefdc552015-10-12 15:35:38 +08001442{
1443 /*
1444 * Don't support EXTENT_LOCKED case, same reason as
1445 * set_record_extent_bits().
1446 */
1447 BUG_ON(bits & EXTENT_LOCKED);
1448
David Sterbaf734c442016-04-26 23:54:39 +02001449 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
Qu Wenruofefdc552015-10-12 15:35:38 +08001450 changeset);
1451}
1452
Chris Masond352ac62008-09-29 15:18:18 -04001453/*
1454 * either insert or lock state struct between start and end use mask to tell
1455 * us if waiting is desired.
1456 */
Chris Mason1edbb732009-09-02 13:24:36 -04001457int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaff13db42015-12-03 14:30:40 +01001458 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001459{
1460 int err;
1461 u64 failed_start;
David Sterba9ee49a042015-01-14 19:52:13 +01001462
Chris Masond1310b22008-01-24 16:13:08 -05001463 while (1) {
David Sterbaff13db42015-12-03 14:30:40 +01001464 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001465 EXTENT_LOCKED, &failed_start,
Qu Wenruod38ed272015-10-12 14:53:37 +08001466 cached_state, GFP_NOFS, NULL);
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001467 if (err == -EEXIST) {
Chris Masond1310b22008-01-24 16:13:08 -05001468 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1469 start = failed_start;
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001470 } else
Chris Masond1310b22008-01-24 16:13:08 -05001471 break;
Chris Masond1310b22008-01-24 16:13:08 -05001472 WARN_ON(start > end);
1473 }
1474 return err;
1475}
Chris Masond1310b22008-01-24 16:13:08 -05001476
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001477int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
Josef Bacik25179202008-10-29 14:49:05 -04001478{
1479 int err;
1480 u64 failed_start;
1481
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001482 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
Qu Wenruod38ed272015-10-12 14:53:37 +08001483 &failed_start, NULL, GFP_NOFS, NULL);
Yan Zheng66435582008-10-30 14:19:50 -04001484 if (err == -EEXIST) {
1485 if (failed_start > start)
1486 clear_extent_bit(tree, start, failed_start - 1,
David Sterbaae0f1622017-10-31 16:37:52 +01001487 EXTENT_LOCKED, 1, 0, NULL);
Josef Bacik25179202008-10-29 14:49:05 -04001488 return 0;
Yan Zheng66435582008-10-30 14:19:50 -04001489 }
Josef Bacik25179202008-10-29 14:49:05 -04001490 return 1;
1491}
Josef Bacik25179202008-10-29 14:49:05 -04001492
David Sterbabd1fa4f2015-12-03 13:08:59 +01001493void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
Chris Mason4adaa612013-03-26 13:07:00 -04001494{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001495 unsigned long index = start >> PAGE_SHIFT;
1496 unsigned long end_index = end >> PAGE_SHIFT;
Chris Mason4adaa612013-03-26 13:07:00 -04001497 struct page *page;
1498
1499 while (index <= end_index) {
1500 page = find_get_page(inode->i_mapping, index);
1501 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1502 clear_page_dirty_for_io(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001503 put_page(page);
Chris Mason4adaa612013-03-26 13:07:00 -04001504 index++;
1505 }
Chris Mason4adaa612013-03-26 13:07:00 -04001506}
1507
David Sterbaf6311572015-12-03 13:08:59 +01001508void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
Chris Mason4adaa612013-03-26 13:07:00 -04001509{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001510 unsigned long index = start >> PAGE_SHIFT;
1511 unsigned long end_index = end >> PAGE_SHIFT;
Chris Mason4adaa612013-03-26 13:07:00 -04001512 struct page *page;
1513
1514 while (index <= end_index) {
1515 page = find_get_page(inode->i_mapping, index);
1516 BUG_ON(!page); /* Pages should be in the extent_io_tree */
Chris Mason4adaa612013-03-26 13:07:00 -04001517 __set_page_dirty_nobuffers(page);
Konstantin Khebnikov8d386332015-02-11 15:26:55 -08001518 account_page_redirty(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001519 put_page(page);
Chris Mason4adaa612013-03-26 13:07:00 -04001520 index++;
1521 }
Chris Mason4adaa612013-03-26 13:07:00 -04001522}
1523
Chris Masond352ac62008-09-29 15:18:18 -04001524/* find the first state struct with 'bits' set after 'start', and
1525 * return it. tree->lock must be held. NULL will returned if
1526 * nothing was found after 'start'
1527 */
Eric Sandeen48a3b632013-04-25 20:41:01 +00001528static struct extent_state *
1529find_first_extent_bit_state(struct extent_io_tree *tree,
David Sterba9ee49a042015-01-14 19:52:13 +01001530 u64 start, unsigned bits)
Chris Masond7fc6402008-02-18 12:12:38 -05001531{
1532 struct rb_node *node;
1533 struct extent_state *state;
1534
1535 /*
1536 * this search will find all the extents that end after
1537 * our range starts.
1538 */
1539 node = tree_search(tree, start);
Chris Masond3977122009-01-05 21:25:51 -05001540 if (!node)
Chris Masond7fc6402008-02-18 12:12:38 -05001541 goto out;
Chris Masond7fc6402008-02-18 12:12:38 -05001542
Chris Masond3977122009-01-05 21:25:51 -05001543 while (1) {
Chris Masond7fc6402008-02-18 12:12:38 -05001544 state = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -05001545 if (state->end >= start && (state->state & bits))
Chris Masond7fc6402008-02-18 12:12:38 -05001546 return state;
Chris Masond3977122009-01-05 21:25:51 -05001547
Chris Masond7fc6402008-02-18 12:12:38 -05001548 node = rb_next(node);
1549 if (!node)
1550 break;
1551 }
1552out:
1553 return NULL;
1554}
Chris Masond7fc6402008-02-18 12:12:38 -05001555
Chris Masond352ac62008-09-29 15:18:18 -04001556/*
Xiao Guangrong69261c42011-07-14 03:19:45 +00001557 * find the first offset in the io tree with 'bits' set. zero is
1558 * returned if we find something, and *start_ret and *end_ret are
1559 * set to reflect the state struct that was found.
1560 *
Wang Sheng-Hui477d7ea2012-04-06 14:35:47 +08001561 * If nothing was found, 1 is returned. If found something, return 0.
Xiao Guangrong69261c42011-07-14 03:19:45 +00001562 */
1563int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
David Sterba9ee49a042015-01-14 19:52:13 +01001564 u64 *start_ret, u64 *end_ret, unsigned bits,
Josef Bacike6138872012-09-27 17:07:30 -04001565 struct extent_state **cached_state)
Xiao Guangrong69261c42011-07-14 03:19:45 +00001566{
1567 struct extent_state *state;
1568 int ret = 1;
1569
1570 spin_lock(&tree->lock);
Josef Bacike6138872012-09-27 17:07:30 -04001571 if (cached_state && *cached_state) {
1572 state = *cached_state;
Filipe Manana27a35072014-07-06 20:09:59 +01001573 if (state->end == start - 1 && extent_state_in_tree(state)) {
Liu Bo9688e9a2018-08-23 03:14:53 +08001574 while ((state = next_state(state)) != NULL) {
Josef Bacike6138872012-09-27 17:07:30 -04001575 if (state->state & bits)
1576 goto got_it;
Josef Bacike6138872012-09-27 17:07:30 -04001577 }
1578 free_extent_state(*cached_state);
1579 *cached_state = NULL;
1580 goto out;
1581 }
1582 free_extent_state(*cached_state);
1583 *cached_state = NULL;
1584 }
1585
Xiao Guangrong69261c42011-07-14 03:19:45 +00001586 state = find_first_extent_bit_state(tree, start, bits);
Josef Bacike6138872012-09-27 17:07:30 -04001587got_it:
Xiao Guangrong69261c42011-07-14 03:19:45 +00001588 if (state) {
Filipe Mananae38e2ed2014-10-13 12:28:38 +01001589 cache_state_if_flags(state, cached_state, 0);
Xiao Guangrong69261c42011-07-14 03:19:45 +00001590 *start_ret = state->start;
1591 *end_ret = state->end;
1592 ret = 0;
1593 }
Josef Bacike6138872012-09-27 17:07:30 -04001594out:
Xiao Guangrong69261c42011-07-14 03:19:45 +00001595 spin_unlock(&tree->lock);
1596 return ret;
1597}
1598
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001599/**
Josef Bacik41a2ee72020-01-17 09:02:21 -05001600 * find_contiguous_extent_bit: find a contiguous area of bits
1601 * @tree - io tree to check
1602 * @start - offset to start the search from
1603 * @start_ret - the first offset we found with the bits set
1604 * @end_ret - the final contiguous range of the bits that were set
1605 * @bits - bits to look for
1606 *
1607 * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
1608 * to set bits appropriately, and then merge them again. During this time it
1609 * will drop the tree->lock, so use this helper if you want to find the actual
1610 * contiguous area for given bits. We will search to the first bit we find, and
1611 * then walk down the tree until we find a non-contiguous area. The area
1612 * returned will be the full contiguous area with the bits set.
1613 */
1614int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
1615 u64 *start_ret, u64 *end_ret, unsigned bits)
1616{
1617 struct extent_state *state;
1618 int ret = 1;
1619
1620 spin_lock(&tree->lock);
1621 state = find_first_extent_bit_state(tree, start, bits);
1622 if (state) {
1623 *start_ret = state->start;
1624 *end_ret = state->end;
1625 while ((state = next_state(state)) != NULL) {
1626 if (state->start > (*end_ret + 1))
1627 break;
1628 *end_ret = state->end;
1629 }
1630 ret = 0;
1631 }
1632 spin_unlock(&tree->lock);
1633 return ret;
1634}
1635
1636/**
Nikolay Borisov1eaebb32019-06-03 13:06:02 +03001637 * find_first_clear_extent_bit - find the first range that has @bits not set.
1638 * This range could start before @start.
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001639 *
1640 * @tree - the tree to search
1641 * @start - the offset at/after which the found extent should start
1642 * @start_ret - records the beginning of the range
1643 * @end_ret - records the end of the range (inclusive)
1644 * @bits - the set of bits which must be unset
1645 *
1646 * Since unallocated range is also considered one which doesn't have the bits
1647 * set it's possible that @end_ret contains -1, this happens in case the range
1648 * spans (last_range_end, end of device]. In this case it's up to the caller to
1649 * trim @end_ret to the appropriate size.
1650 */
1651void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1652 u64 *start_ret, u64 *end_ret, unsigned bits)
1653{
1654 struct extent_state *state;
1655 struct rb_node *node, *prev = NULL, *next;
1656
1657 spin_lock(&tree->lock);
1658
1659 /* Find first extent with bits cleared */
1660 while (1) {
1661 node = __etree_search(tree, start, &next, &prev, NULL, NULL);
Nikolay Borisov5750c372020-01-27 11:59:26 +02001662 if (!node && !next && !prev) {
1663 /*
1664 * Tree is completely empty, send full range and let
1665 * caller deal with it
1666 */
1667 *start_ret = 0;
1668 *end_ret = -1;
1669 goto out;
1670 } else if (!node && !next) {
1671 /*
1672 * We are past the last allocated chunk, set start at
1673 * the end of the last extent.
1674 */
1675 state = rb_entry(prev, struct extent_state, rb_node);
1676 *start_ret = state->end + 1;
1677 *end_ret = -1;
1678 goto out;
1679 } else if (!node) {
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001680 node = next;
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001681 }
Nikolay Borisov1eaebb32019-06-03 13:06:02 +03001682 /*
1683 * At this point 'node' either contains 'start' or start is
1684 * before 'node'
1685 */
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001686 state = rb_entry(node, struct extent_state, rb_node);
Nikolay Borisov1eaebb32019-06-03 13:06:02 +03001687
1688 if (in_range(start, state->start, state->end - state->start + 1)) {
1689 if (state->state & bits) {
1690 /*
1691 * |--range with bits sets--|
1692 * |
1693 * start
1694 */
1695 start = state->end + 1;
1696 } else {
1697 /*
1698 * 'start' falls within a range that doesn't
1699 * have the bits set, so take its start as
1700 * the beginning of the desired range
1701 *
1702 * |--range with bits cleared----|
1703 * |
1704 * start
1705 */
1706 *start_ret = state->start;
1707 break;
1708 }
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001709 } else {
Nikolay Borisov1eaebb32019-06-03 13:06:02 +03001710 /*
1711 * |---prev range---|---hole/unset---|---node range---|
1712 * |
1713 * start
1714 *
1715 * or
1716 *
1717 * |---hole/unset--||--first node--|
1718 * 0 |
1719 * start
1720 */
1721 if (prev) {
1722 state = rb_entry(prev, struct extent_state,
1723 rb_node);
1724 *start_ret = state->end + 1;
1725 } else {
1726 *start_ret = 0;
1727 }
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001728 break;
1729 }
1730 }
1731
1732 /*
1733 * Find the longest stretch from start until an entry which has the
1734 * bits set
1735 */
1736 while (1) {
1737 state = rb_entry(node, struct extent_state, rb_node);
1738 if (state->end >= start && !(state->state & bits)) {
1739 *end_ret = state->end;
1740 } else {
1741 *end_ret = state->start - 1;
1742 break;
1743 }
1744
1745 node = rb_next(node);
1746 if (!node)
1747 break;
1748 }
1749out:
1750 spin_unlock(&tree->lock);
1751}
1752
Xiao Guangrong69261c42011-07-14 03:19:45 +00001753/*
Chris Masond352ac62008-09-29 15:18:18 -04001754 * find a contiguous range of bytes in the file marked as delalloc, not
1755 * more than 'max_bytes'. start and end are used to return the range,
1756 *
Lu Fengqi3522e902018-11-29 11:33:38 +08001757 * true is returned if we find something, false if nothing was in the tree
Chris Masond352ac62008-09-29 15:18:18 -04001758 */
Josef Bacik083e75e2019-09-23 10:05:20 -04001759bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
1760 u64 *end, u64 max_bytes,
1761 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001762{
1763 struct rb_node *node;
1764 struct extent_state *state;
1765 u64 cur_start = *start;
Lu Fengqi3522e902018-11-29 11:33:38 +08001766 bool found = false;
Chris Masond1310b22008-01-24 16:13:08 -05001767 u64 total_bytes = 0;
1768
Chris Masoncad321a2008-12-17 14:51:42 -05001769 spin_lock(&tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -04001770
Chris Masond1310b22008-01-24 16:13:08 -05001771 /*
1772 * this search will find all the extents that end after
1773 * our range starts.
1774 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001775 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001776 if (!node) {
Lu Fengqi3522e902018-11-29 11:33:38 +08001777 *end = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05001778 goto out;
1779 }
1780
Chris Masond3977122009-01-05 21:25:51 -05001781 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001782 state = rb_entry(node, struct extent_state, rb_node);
Zheng Yan5b21f2e2008-09-26 10:05:38 -04001783 if (found && (state->start != cur_start ||
1784 (state->state & EXTENT_BOUNDARY))) {
Chris Masond1310b22008-01-24 16:13:08 -05001785 goto out;
1786 }
1787 if (!(state->state & EXTENT_DELALLOC)) {
1788 if (!found)
1789 *end = state->end;
1790 goto out;
1791 }
Josef Bacikc2a128d2010-02-02 21:19:11 +00001792 if (!found) {
Chris Masond1310b22008-01-24 16:13:08 -05001793 *start = state->start;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001794 *cached_state = state;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +02001795 refcount_inc(&state->refs);
Josef Bacikc2a128d2010-02-02 21:19:11 +00001796 }
Lu Fengqi3522e902018-11-29 11:33:38 +08001797 found = true;
Chris Masond1310b22008-01-24 16:13:08 -05001798 *end = state->end;
1799 cur_start = state->end + 1;
1800 node = rb_next(node);
Chris Masond1310b22008-01-24 16:13:08 -05001801 total_bytes += state->end - state->start + 1;
Josef Bacik7bf811a52013-10-07 22:11:09 -04001802 if (total_bytes >= max_bytes)
Josef Bacik573aeca2013-08-30 14:38:49 -04001803 break;
Josef Bacik573aeca2013-08-30 14:38:49 -04001804 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001805 break;
1806 }
1807out:
Chris Masoncad321a2008-12-17 14:51:42 -05001808 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001809 return found;
1810}
1811
Liu Boda2c7002017-02-10 16:41:05 +01001812static int __process_pages_contig(struct address_space *mapping,
1813 struct page *locked_page,
1814 pgoff_t start_index, pgoff_t end_index,
1815 unsigned long page_ops, pgoff_t *index_ret);
1816
Jeff Mahoney143bede2012-03-01 14:56:26 +01001817static noinline void __unlock_for_delalloc(struct inode *inode,
1818 struct page *locked_page,
1819 u64 start, u64 end)
Chris Masonc8b97812008-10-29 14:49:59 -04001820{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001821 unsigned long index = start >> PAGE_SHIFT;
1822 unsigned long end_index = end >> PAGE_SHIFT;
Chris Masonc8b97812008-10-29 14:49:59 -04001823
Liu Bo76c00212017-02-10 16:42:14 +01001824 ASSERT(locked_page);
Chris Masonc8b97812008-10-29 14:49:59 -04001825 if (index == locked_page->index && end_index == index)
Jeff Mahoney143bede2012-03-01 14:56:26 +01001826 return;
Chris Masonc8b97812008-10-29 14:49:59 -04001827
Liu Bo76c00212017-02-10 16:42:14 +01001828 __process_pages_contig(inode->i_mapping, locked_page, index, end_index,
1829 PAGE_UNLOCK, NULL);
Chris Masonc8b97812008-10-29 14:49:59 -04001830}
1831
1832static noinline int lock_delalloc_pages(struct inode *inode,
1833 struct page *locked_page,
1834 u64 delalloc_start,
1835 u64 delalloc_end)
1836{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001837 unsigned long index = delalloc_start >> PAGE_SHIFT;
Liu Bo76c00212017-02-10 16:42:14 +01001838 unsigned long index_ret = index;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001839 unsigned long end_index = delalloc_end >> PAGE_SHIFT;
Chris Masonc8b97812008-10-29 14:49:59 -04001840 int ret;
Chris Masonc8b97812008-10-29 14:49:59 -04001841
Liu Bo76c00212017-02-10 16:42:14 +01001842 ASSERT(locked_page);
Chris Masonc8b97812008-10-29 14:49:59 -04001843 if (index == locked_page->index && index == end_index)
1844 return 0;
1845
Liu Bo76c00212017-02-10 16:42:14 +01001846 ret = __process_pages_contig(inode->i_mapping, locked_page, index,
1847 end_index, PAGE_LOCK, &index_ret);
1848 if (ret == -EAGAIN)
1849 __unlock_for_delalloc(inode, locked_page, delalloc_start,
1850 (u64)index_ret << PAGE_SHIFT);
Chris Masonc8b97812008-10-29 14:49:59 -04001851 return ret;
1852}
1853
1854/*
Lu Fengqi3522e902018-11-29 11:33:38 +08001855 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
1856 * more than @max_bytes. @Start and @end are used to return the range,
Chris Masonc8b97812008-10-29 14:49:59 -04001857 *
Lu Fengqi3522e902018-11-29 11:33:38 +08001858 * Return: true if we find something
1859 * false if nothing was in the tree
Chris Masonc8b97812008-10-29 14:49:59 -04001860 */
Johannes Thumshirnce9f9672018-11-19 10:38:17 +01001861EXPORT_FOR_TESTS
Lu Fengqi3522e902018-11-29 11:33:38 +08001862noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
Josef Bacik294e30f2013-10-09 12:00:56 -04001863 struct page *locked_page, u64 *start,
Nikolay Borisov917aace2018-10-26 14:43:20 +03001864 u64 *end)
Chris Masonc8b97812008-10-29 14:49:59 -04001865{
Goldwyn Rodrigues99780592019-06-21 10:02:54 -05001866 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
Nikolay Borisov917aace2018-10-26 14:43:20 +03001867 u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -04001868 u64 delalloc_start;
1869 u64 delalloc_end;
Lu Fengqi3522e902018-11-29 11:33:38 +08001870 bool found;
Chris Mason9655d292009-09-02 15:22:30 -04001871 struct extent_state *cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001872 int ret;
1873 int loops = 0;
1874
1875again:
1876 /* step one, find a bunch of delalloc bytes starting at start */
1877 delalloc_start = *start;
1878 delalloc_end = 0;
Josef Bacik083e75e2019-09-23 10:05:20 -04001879 found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1880 max_bytes, &cached_state);
Chris Mason70b99e62008-10-31 12:46:39 -04001881 if (!found || delalloc_end <= *start) {
Chris Masonc8b97812008-10-29 14:49:59 -04001882 *start = delalloc_start;
1883 *end = delalloc_end;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001884 free_extent_state(cached_state);
Lu Fengqi3522e902018-11-29 11:33:38 +08001885 return false;
Chris Masonc8b97812008-10-29 14:49:59 -04001886 }
1887
1888 /*
Chris Mason70b99e62008-10-31 12:46:39 -04001889 * start comes from the offset of locked_page. We have to lock
1890 * pages in order, so we can't process delalloc bytes before
1891 * locked_page
1892 */
Chris Masond3977122009-01-05 21:25:51 -05001893 if (delalloc_start < *start)
Chris Mason70b99e62008-10-31 12:46:39 -04001894 delalloc_start = *start;
Chris Mason70b99e62008-10-31 12:46:39 -04001895
1896 /*
Chris Masonc8b97812008-10-29 14:49:59 -04001897 * make sure to limit the number of pages we try to lock down
Chris Masonc8b97812008-10-29 14:49:59 -04001898 */
Josef Bacik7bf811a52013-10-07 22:11:09 -04001899 if (delalloc_end + 1 - delalloc_start > max_bytes)
1900 delalloc_end = delalloc_start + max_bytes - 1;
Chris Masond3977122009-01-05 21:25:51 -05001901
Chris Masonc8b97812008-10-29 14:49:59 -04001902 /* step two, lock all the pages after the page that has start */
1903 ret = lock_delalloc_pages(inode, locked_page,
1904 delalloc_start, delalloc_end);
Nikolay Borisov9bfd61d2018-10-26 14:43:21 +03001905 ASSERT(!ret || ret == -EAGAIN);
Chris Masonc8b97812008-10-29 14:49:59 -04001906 if (ret == -EAGAIN) {
1907 /* some of the pages are gone, lets avoid looping by
1908 * shortening the size of the delalloc range we're searching
1909 */
Chris Mason9655d292009-09-02 15:22:30 -04001910 free_extent_state(cached_state);
Chris Mason7d788742014-05-21 05:49:54 -07001911 cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001912 if (!loops) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001913 max_bytes = PAGE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -04001914 loops = 1;
1915 goto again;
1916 } else {
Lu Fengqi3522e902018-11-29 11:33:38 +08001917 found = false;
Chris Masonc8b97812008-10-29 14:49:59 -04001918 goto out_failed;
1919 }
1920 }
Chris Masonc8b97812008-10-29 14:49:59 -04001921
1922 /* step three, lock the state bits for the whole range */
David Sterbaff13db42015-12-03 14:30:40 +01001923 lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001924
1925 /* then test to make sure it is all still delalloc */
1926 ret = test_range_bit(tree, delalloc_start, delalloc_end,
Chris Mason9655d292009-09-02 15:22:30 -04001927 EXTENT_DELALLOC, 1, cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001928 if (!ret) {
Chris Mason9655d292009-09-02 15:22:30 -04001929 unlock_extent_cached(tree, delalloc_start, delalloc_end,
David Sterbae43bbe52017-12-12 21:43:52 +01001930 &cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001931 __unlock_for_delalloc(inode, locked_page,
1932 delalloc_start, delalloc_end);
1933 cond_resched();
1934 goto again;
1935 }
Chris Mason9655d292009-09-02 15:22:30 -04001936 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001937 *start = delalloc_start;
1938 *end = delalloc_end;
1939out_failed:
1940 return found;
1941}
1942
Liu Boda2c7002017-02-10 16:41:05 +01001943static int __process_pages_contig(struct address_space *mapping,
1944 struct page *locked_page,
1945 pgoff_t start_index, pgoff_t end_index,
1946 unsigned long page_ops, pgoff_t *index_ret)
Chris Masonc8b97812008-10-29 14:49:59 -04001947{
Liu Bo873695b2017-02-02 17:49:22 -08001948 unsigned long nr_pages = end_index - start_index + 1;
Liu Boda2c7002017-02-10 16:41:05 +01001949 unsigned long pages_locked = 0;
Liu Bo873695b2017-02-02 17:49:22 -08001950 pgoff_t index = start_index;
Chris Masonc8b97812008-10-29 14:49:59 -04001951 struct page *pages[16];
Liu Bo873695b2017-02-02 17:49:22 -08001952 unsigned ret;
Liu Boda2c7002017-02-10 16:41:05 +01001953 int err = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001954 int i;
Chris Mason771ed682008-11-06 22:02:51 -05001955
Liu Boda2c7002017-02-10 16:41:05 +01001956 if (page_ops & PAGE_LOCK) {
1957 ASSERT(page_ops == PAGE_LOCK);
1958 ASSERT(index_ret && *index_ret == start_index);
1959 }
1960
Filipe Manana704de492014-10-06 22:14:22 +01001961 if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
Liu Bo873695b2017-02-02 17:49:22 -08001962 mapping_set_error(mapping, -EIO);
Filipe Manana704de492014-10-06 22:14:22 +01001963
Chris Masond3977122009-01-05 21:25:51 -05001964 while (nr_pages > 0) {
Liu Bo873695b2017-02-02 17:49:22 -08001965 ret = find_get_pages_contig(mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001966 min_t(unsigned long,
1967 nr_pages, ARRAY_SIZE(pages)), pages);
Liu Boda2c7002017-02-10 16:41:05 +01001968 if (ret == 0) {
1969 /*
1970 * Only if we're going to lock these pages,
1971 * can we find nothing at @index.
1972 */
1973 ASSERT(page_ops & PAGE_LOCK);
Liu Bo49d4a332017-03-06 18:20:56 -08001974 err = -EAGAIN;
1975 goto out;
Liu Boda2c7002017-02-10 16:41:05 +01001976 }
Chris Mason8b62b722009-09-02 16:53:46 -04001977
Liu Boda2c7002017-02-10 16:41:05 +01001978 for (i = 0; i < ret; i++) {
Josef Bacikc2790a22013-07-29 11:20:47 -04001979 if (page_ops & PAGE_SET_PRIVATE2)
Chris Mason8b62b722009-09-02 16:53:46 -04001980 SetPagePrivate2(pages[i]);
1981
Chris Mason1d53c9e2019-07-10 12:28:16 -07001982 if (locked_page && pages[i] == locked_page) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001983 put_page(pages[i]);
Liu Boda2c7002017-02-10 16:41:05 +01001984 pages_locked++;
Chris Masonc8b97812008-10-29 14:49:59 -04001985 continue;
1986 }
Josef Bacikc2790a22013-07-29 11:20:47 -04001987 if (page_ops & PAGE_CLEAR_DIRTY)
Chris Masonc8b97812008-10-29 14:49:59 -04001988 clear_page_dirty_for_io(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001989 if (page_ops & PAGE_SET_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001990 set_page_writeback(pages[i]);
Filipe Manana704de492014-10-06 22:14:22 +01001991 if (page_ops & PAGE_SET_ERROR)
1992 SetPageError(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001993 if (page_ops & PAGE_END_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001994 end_page_writeback(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001995 if (page_ops & PAGE_UNLOCK)
Chris Mason771ed682008-11-06 22:02:51 -05001996 unlock_page(pages[i]);
Liu Boda2c7002017-02-10 16:41:05 +01001997 if (page_ops & PAGE_LOCK) {
1998 lock_page(pages[i]);
1999 if (!PageDirty(pages[i]) ||
2000 pages[i]->mapping != mapping) {
2001 unlock_page(pages[i]);
2002 put_page(pages[i]);
2003 err = -EAGAIN;
2004 goto out;
2005 }
2006 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002007 put_page(pages[i]);
Liu Boda2c7002017-02-10 16:41:05 +01002008 pages_locked++;
Chris Masonc8b97812008-10-29 14:49:59 -04002009 }
2010 nr_pages -= ret;
2011 index += ret;
2012 cond_resched();
2013 }
Liu Boda2c7002017-02-10 16:41:05 +01002014out:
2015 if (err && index_ret)
2016 *index_ret = start_index + pages_locked - 1;
2017 return err;
Chris Masonc8b97812008-10-29 14:49:59 -04002018}
Chris Masonc8b97812008-10-29 14:49:59 -04002019
Liu Bo873695b2017-02-02 17:49:22 -08002020void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
Nikolay Borisov74e91942019-07-17 16:18:16 +03002021 struct page *locked_page,
2022 unsigned clear_bits,
2023 unsigned long page_ops)
Liu Bo873695b2017-02-02 17:49:22 -08002024{
2025 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 1, 0,
David Sterbaae0f1622017-10-31 16:37:52 +01002026 NULL);
Liu Bo873695b2017-02-02 17:49:22 -08002027
2028 __process_pages_contig(inode->i_mapping, locked_page,
2029 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
Liu Boda2c7002017-02-10 16:41:05 +01002030 page_ops, NULL);
Liu Bo873695b2017-02-02 17:49:22 -08002031}
2032
Chris Masond352ac62008-09-29 15:18:18 -04002033/*
2034 * count the number of bytes in the tree that have a given bit(s)
2035 * set. This can be fairly slow, except for EXTENT_DIRTY which is
2036 * cached. The total number found is returned.
2037 */
Chris Masond1310b22008-01-24 16:13:08 -05002038u64 count_range_bits(struct extent_io_tree *tree,
2039 u64 *start, u64 search_end, u64 max_bytes,
David Sterba9ee49a042015-01-14 19:52:13 +01002040 unsigned bits, int contig)
Chris Masond1310b22008-01-24 16:13:08 -05002041{
2042 struct rb_node *node;
2043 struct extent_state *state;
2044 u64 cur_start = *start;
2045 u64 total_bytes = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05002046 u64 last = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002047 int found = 0;
2048
Dulshani Gunawardhanafae7f212013-10-31 10:30:08 +05302049 if (WARN_ON(search_end <= cur_start))
Chris Masond1310b22008-01-24 16:13:08 -05002050 return 0;
Chris Masond1310b22008-01-24 16:13:08 -05002051
Chris Masoncad321a2008-12-17 14:51:42 -05002052 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002053 if (cur_start == 0 && bits == EXTENT_DIRTY) {
2054 total_bytes = tree->dirty_bytes;
2055 goto out;
2056 }
2057 /*
2058 * this search will find all the extents that end after
2059 * our range starts.
2060 */
Chris Mason80ea96b2008-02-01 14:51:59 -05002061 node = tree_search(tree, cur_start);
Chris Masond3977122009-01-05 21:25:51 -05002062 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05002063 goto out;
Chris Masond1310b22008-01-24 16:13:08 -05002064
Chris Masond3977122009-01-05 21:25:51 -05002065 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05002066 state = rb_entry(node, struct extent_state, rb_node);
2067 if (state->start > search_end)
2068 break;
Chris Masonec29ed52011-02-23 16:23:20 -05002069 if (contig && found && state->start > last + 1)
2070 break;
2071 if (state->end >= cur_start && (state->state & bits) == bits) {
Chris Masond1310b22008-01-24 16:13:08 -05002072 total_bytes += min(search_end, state->end) + 1 -
2073 max(cur_start, state->start);
2074 if (total_bytes >= max_bytes)
2075 break;
2076 if (!found) {
Josef Bacikaf60bed2011-05-04 11:11:17 -04002077 *start = max(cur_start, state->start);
Chris Masond1310b22008-01-24 16:13:08 -05002078 found = 1;
2079 }
Chris Masonec29ed52011-02-23 16:23:20 -05002080 last = state->end;
2081 } else if (contig && found) {
2082 break;
Chris Masond1310b22008-01-24 16:13:08 -05002083 }
2084 node = rb_next(node);
2085 if (!node)
2086 break;
2087 }
2088out:
Chris Masoncad321a2008-12-17 14:51:42 -05002089 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002090 return total_bytes;
2091}
Christoph Hellwigb2950862008-12-02 09:54:17 -05002092
Chris Masond352ac62008-09-29 15:18:18 -04002093/*
2094 * set the private field for a given byte offset in the tree. If there isn't
2095 * an extent_state there already, this does nothing.
2096 */
Josef Bacikb3f167a2019-09-23 10:05:21 -04002097int set_state_failrec(struct extent_io_tree *tree, u64 start,
2098 struct io_failure_record *failrec)
Chris Masond1310b22008-01-24 16:13:08 -05002099{
2100 struct rb_node *node;
2101 struct extent_state *state;
2102 int ret = 0;
2103
Chris Masoncad321a2008-12-17 14:51:42 -05002104 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002105 /*
2106 * this search will find all the extents that end after
2107 * our range starts.
2108 */
Chris Mason80ea96b2008-02-01 14:51:59 -05002109 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04002110 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05002111 ret = -ENOENT;
2112 goto out;
2113 }
2114 state = rb_entry(node, struct extent_state, rb_node);
2115 if (state->start != start) {
2116 ret = -ENOENT;
2117 goto out;
2118 }
David Sterba47dc1962016-02-11 13:24:13 +01002119 state->failrec = failrec;
Chris Masond1310b22008-01-24 16:13:08 -05002120out:
Chris Masoncad321a2008-12-17 14:51:42 -05002121 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002122 return ret;
2123}
2124
Josef Bacikb3f167a2019-09-23 10:05:21 -04002125int get_state_failrec(struct extent_io_tree *tree, u64 start,
2126 struct io_failure_record **failrec)
Chris Masond1310b22008-01-24 16:13:08 -05002127{
2128 struct rb_node *node;
2129 struct extent_state *state;
2130 int ret = 0;
2131
Chris Masoncad321a2008-12-17 14:51:42 -05002132 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002133 /*
2134 * this search will find all the extents that end after
2135 * our range starts.
2136 */
Chris Mason80ea96b2008-02-01 14:51:59 -05002137 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04002138 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05002139 ret = -ENOENT;
2140 goto out;
2141 }
2142 state = rb_entry(node, struct extent_state, rb_node);
2143 if (state->start != start) {
2144 ret = -ENOENT;
2145 goto out;
2146 }
David Sterba47dc1962016-02-11 13:24:13 +01002147 *failrec = state->failrec;
Chris Masond1310b22008-01-24 16:13:08 -05002148out:
Chris Masoncad321a2008-12-17 14:51:42 -05002149 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002150 return ret;
2151}
2152
2153/*
2154 * searches a range in the state tree for a given mask.
Chris Mason70dec802008-01-29 09:59:12 -05002155 * If 'filled' == 1, this returns 1 only if every extent in the tree
Chris Masond1310b22008-01-24 16:13:08 -05002156 * has the bits set. Otherwise, 1 is returned if any bit in the
2157 * range is found set.
2158 */
2159int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +01002160 unsigned bits, int filled, struct extent_state *cached)
Chris Masond1310b22008-01-24 16:13:08 -05002161{
2162 struct extent_state *state = NULL;
2163 struct rb_node *node;
2164 int bitset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002165
Chris Masoncad321a2008-12-17 14:51:42 -05002166 spin_lock(&tree->lock);
Filipe Manana27a35072014-07-06 20:09:59 +01002167 if (cached && extent_state_in_tree(cached) && cached->start <= start &&
Josef Bacikdf98b6e2011-06-20 14:53:48 -04002168 cached->end > start)
Chris Mason9655d292009-09-02 15:22:30 -04002169 node = &cached->rb_node;
2170 else
2171 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -05002172 while (node && start <= end) {
2173 state = rb_entry(node, struct extent_state, rb_node);
2174
2175 if (filled && state->start > start) {
2176 bitset = 0;
2177 break;
2178 }
2179
2180 if (state->start > end)
2181 break;
2182
2183 if (state->state & bits) {
2184 bitset = 1;
2185 if (!filled)
2186 break;
2187 } else if (filled) {
2188 bitset = 0;
2189 break;
2190 }
Chris Mason46562ce2009-09-23 20:23:16 -04002191
2192 if (state->end == (u64)-1)
2193 break;
2194
Chris Masond1310b22008-01-24 16:13:08 -05002195 start = state->end + 1;
2196 if (start > end)
2197 break;
2198 node = rb_next(node);
2199 if (!node) {
2200 if (filled)
2201 bitset = 0;
2202 break;
2203 }
2204 }
Chris Masoncad321a2008-12-17 14:51:42 -05002205 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002206 return bitset;
2207}
Chris Masond1310b22008-01-24 16:13:08 -05002208
2209/*
2210 * helper function to set a given page up to date if all the
2211 * extents in the tree for that page are up to date
2212 */
Jeff Mahoney143bede2012-03-01 14:56:26 +01002213static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
Chris Masond1310b22008-01-24 16:13:08 -05002214{
Miao Xie4eee4fa2012-12-21 09:17:45 +00002215 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002216 u64 end = start + PAGE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04002217 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05002218 SetPageUptodate(page);
Chris Masond1310b22008-01-24 16:13:08 -05002219}
2220
Josef Bacik7870d082017-05-05 11:57:15 -04002221int free_io_failure(struct extent_io_tree *failure_tree,
2222 struct extent_io_tree *io_tree,
2223 struct io_failure_record *rec)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002224{
2225 int ret;
2226 int err = 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002227
David Sterba47dc1962016-02-11 13:24:13 +01002228 set_state_failrec(failure_tree, rec->start, NULL);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002229 ret = clear_extent_bits(failure_tree, rec->start,
2230 rec->start + rec->len - 1,
David Sterba91166212016-04-26 23:54:39 +02002231 EXTENT_LOCKED | EXTENT_DIRTY);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002232 if (ret)
2233 err = ret;
2234
Josef Bacik7870d082017-05-05 11:57:15 -04002235 ret = clear_extent_bits(io_tree, rec->start,
David Woodhouse53b381b2013-01-29 18:40:14 -05002236 rec->start + rec->len - 1,
David Sterba91166212016-04-26 23:54:39 +02002237 EXTENT_DAMAGED);
David Woodhouse53b381b2013-01-29 18:40:14 -05002238 if (ret && !err)
2239 err = ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002240
2241 kfree(rec);
2242 return err;
2243}
2244
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002245/*
2246 * this bypasses the standard btrfs submit functions deliberately, as
2247 * the standard behavior is to write all copies in a raid setup. here we only
2248 * want to write the one bad copy. so we do the mapping for ourselves and issue
2249 * submit_bio directly.
Stefan Behrens3ec706c2012-11-05 15:46:42 +01002250 * to avoid any synchronization issues, wait for the data after writing, which
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002251 * actually prevents the read that triggered the error from finishing.
2252 * currently, there can be no more than two copies of every data bit. thus,
2253 * exactly one rewrite is required.
2254 */
Josef Bacik6ec656b2017-05-05 11:57:14 -04002255int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2256 u64 length, u64 logical, struct page *page,
2257 unsigned int pg_offset, int mirror_num)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002258{
2259 struct bio *bio;
2260 struct btrfs_device *dev;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002261 u64 map_length = 0;
2262 u64 sector;
2263 struct btrfs_bio *bbio = NULL;
2264 int ret;
2265
Linus Torvalds1751e8a2017-11-27 13:05:09 -08002266 ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002267 BUG_ON(!mirror_num);
2268
David Sterbac5e4c3d2017-06-12 17:29:41 +02002269 bio = btrfs_io_bio_alloc(1);
Kent Overstreet4f024f32013-10-11 15:44:27 -07002270 bio->bi_iter.bi_size = 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002271 map_length = length;
2272
Filipe Mananab5de8d02016-05-27 22:21:27 +01002273 /*
2274 * Avoid races with device replace and make sure our bbio has devices
2275 * associated to its stripes that don't go away while we are doing the
2276 * read repair operation.
2277 */
2278 btrfs_bio_counter_inc_blocked(fs_info);
Nikolay Borisove4ff5fb2017-07-19 10:48:42 +03002279 if (btrfs_is_parity_mirror(fs_info, logical, length)) {
Liu Boc7253282017-03-29 10:53:58 -07002280 /*
2281 * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2282 * to update all raid stripes, but here we just want to correct
2283 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2284 * stripe's dev and sector.
2285 */
2286 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2287 &map_length, &bbio, 0);
2288 if (ret) {
2289 btrfs_bio_counter_dec(fs_info);
2290 bio_put(bio);
2291 return -EIO;
2292 }
2293 ASSERT(bbio->mirror_num == 1);
2294 } else {
2295 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2296 &map_length, &bbio, mirror_num);
2297 if (ret) {
2298 btrfs_bio_counter_dec(fs_info);
2299 bio_put(bio);
2300 return -EIO;
2301 }
2302 BUG_ON(mirror_num != bbio->mirror_num);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002303 }
Liu Boc7253282017-03-29 10:53:58 -07002304
2305 sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002306 bio->bi_iter.bi_sector = sector;
Liu Boc7253282017-03-29 10:53:58 -07002307 dev = bbio->stripes[bbio->mirror_num - 1].dev;
Zhao Lei6e9606d2015-01-20 15:11:34 +08002308 btrfs_put_bbio(bbio);
Anand Jainebbede42017-12-04 12:54:52 +08002309 if (!dev || !dev->bdev ||
2310 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
Filipe Mananab5de8d02016-05-27 22:21:27 +01002311 btrfs_bio_counter_dec(fs_info);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002312 bio_put(bio);
2313 return -EIO;
2314 }
Christoph Hellwig74d46992017-08-23 19:10:32 +02002315 bio_set_dev(bio, dev->bdev);
Christoph Hellwig70fd7612016-11-01 07:40:10 -06002316 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
Miao Xieffdd2012014-09-12 18:44:00 +08002317 bio_add_page(bio, page, length, pg_offset);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002318
Mike Christie4e49ea42016-06-05 14:31:41 -05002319 if (btrfsic_submit_bio_wait(bio)) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002320 /* try to remap that extent elsewhere? */
Filipe Mananab5de8d02016-05-27 22:21:27 +01002321 btrfs_bio_counter_dec(fs_info);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002322 bio_put(bio);
Stefan Behrens442a4f62012-05-25 16:06:08 +02002323 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002324 return -EIO;
2325 }
2326
David Sterbab14af3b2015-10-08 10:43:10 +02002327 btrfs_info_rl_in_rcu(fs_info,
2328 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
Josef Bacik6ec656b2017-05-05 11:57:14 -04002329 ino, start,
Miao Xie1203b682014-09-12 18:44:01 +08002330 rcu_str_deref(dev->name), sector);
Filipe Mananab5de8d02016-05-27 22:21:27 +01002331 btrfs_bio_counter_dec(fs_info);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002332 bio_put(bio);
2333 return 0;
2334}
2335
David Sterba20a1fbf92019-03-20 11:23:44 +01002336int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num)
Josef Bacikea466792012-03-26 21:57:36 -04002337{
David Sterba20a1fbf92019-03-20 11:23:44 +01002338 struct btrfs_fs_info *fs_info = eb->fs_info;
Josef Bacikea466792012-03-26 21:57:36 -04002339 u64 start = eb->start;
David Sterbacc5e31a2018-03-01 18:20:27 +01002340 int i, num_pages = num_extent_pages(eb);
Chris Masond95603b2012-04-12 15:55:15 -04002341 int ret = 0;
Josef Bacikea466792012-03-26 21:57:36 -04002342
David Howellsbc98a422017-07-17 08:45:34 +01002343 if (sb_rdonly(fs_info->sb))
Ilya Dryomov908960c2013-11-03 19:06:39 +02002344 return -EROFS;
2345
Josef Bacikea466792012-03-26 21:57:36 -04002346 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02002347 struct page *p = eb->pages[i];
Miao Xie1203b682014-09-12 18:44:01 +08002348
Josef Bacik6ec656b2017-05-05 11:57:14 -04002349 ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
Miao Xie1203b682014-09-12 18:44:01 +08002350 start - page_offset(p), mirror_num);
Josef Bacikea466792012-03-26 21:57:36 -04002351 if (ret)
2352 break;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002353 start += PAGE_SIZE;
Josef Bacikea466792012-03-26 21:57:36 -04002354 }
2355
2356 return ret;
2357}
2358
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002359/*
2360 * each time an IO finishes, we do a fast check in the IO failure tree
2361 * to see if we need to process or clean up an io_failure_record
2362 */
Josef Bacik7870d082017-05-05 11:57:15 -04002363int clean_io_failure(struct btrfs_fs_info *fs_info,
2364 struct extent_io_tree *failure_tree,
2365 struct extent_io_tree *io_tree, u64 start,
2366 struct page *page, u64 ino, unsigned int pg_offset)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002367{
2368 u64 private;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002369 struct io_failure_record *failrec;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002370 struct extent_state *state;
2371 int num_copies;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002372 int ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002373
2374 private = 0;
Josef Bacik7870d082017-05-05 11:57:15 -04002375 ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2376 EXTENT_DIRTY, 0);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002377 if (!ret)
2378 return 0;
2379
Josef Bacik7870d082017-05-05 11:57:15 -04002380 ret = get_state_failrec(failure_tree, start, &failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002381 if (ret)
2382 return 0;
2383
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002384 BUG_ON(!failrec->this_mirror);
2385
2386 if (failrec->in_validation) {
2387 /* there was no real error, just free the record */
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002388 btrfs_debug(fs_info,
2389 "clean_io_failure: freeing dummy error at %llu",
2390 failrec->start);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002391 goto out;
2392 }
David Howellsbc98a422017-07-17 08:45:34 +01002393 if (sb_rdonly(fs_info->sb))
Ilya Dryomov908960c2013-11-03 19:06:39 +02002394 goto out;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002395
Josef Bacik7870d082017-05-05 11:57:15 -04002396 spin_lock(&io_tree->lock);
2397 state = find_first_extent_bit_state(io_tree,
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002398 failrec->start,
2399 EXTENT_LOCKED);
Josef Bacik7870d082017-05-05 11:57:15 -04002400 spin_unlock(&io_tree->lock);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002401
Miao Xie883d0de2013-07-25 19:22:35 +08002402 if (state && state->start <= failrec->start &&
2403 state->end >= failrec->start + failrec->len - 1) {
Stefan Behrens3ec706c2012-11-05 15:46:42 +01002404 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2405 failrec->len);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002406 if (num_copies > 1) {
Josef Bacik7870d082017-05-05 11:57:15 -04002407 repair_io_failure(fs_info, ino, start, failrec->len,
2408 failrec->logical, page, pg_offset,
2409 failrec->failed_mirror);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002410 }
2411 }
2412
2413out:
Josef Bacik7870d082017-05-05 11:57:15 -04002414 free_io_failure(failure_tree, io_tree, failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002415
Miao Xie454ff3d2014-09-12 18:43:58 +08002416 return 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002417}
2418
Miao Xief6124962014-09-12 18:44:04 +08002419/*
2420 * Can be called when
2421 * - hold extent lock
2422 * - under ordered extent
2423 * - the inode is freeing
2424 */
Nikolay Borisov7ab79562017-02-20 13:50:57 +02002425void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
Miao Xief6124962014-09-12 18:44:04 +08002426{
Nikolay Borisov7ab79562017-02-20 13:50:57 +02002427 struct extent_io_tree *failure_tree = &inode->io_failure_tree;
Miao Xief6124962014-09-12 18:44:04 +08002428 struct io_failure_record *failrec;
2429 struct extent_state *state, *next;
2430
2431 if (RB_EMPTY_ROOT(&failure_tree->state))
2432 return;
2433
2434 spin_lock(&failure_tree->lock);
2435 state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2436 while (state) {
2437 if (state->start > end)
2438 break;
2439
2440 ASSERT(state->end <= end);
2441
2442 next = next_state(state);
2443
David Sterba47dc1962016-02-11 13:24:13 +01002444 failrec = state->failrec;
Miao Xief6124962014-09-12 18:44:04 +08002445 free_extent_state(state);
2446 kfree(failrec);
2447
2448 state = next;
2449 }
2450 spin_unlock(&failure_tree->lock);
2451}
2452
Miao Xie2fe63032014-09-12 18:43:59 +08002453int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
David Sterba47dc1962016-02-11 13:24:13 +01002454 struct io_failure_record **failrec_ret)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002455{
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002456 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie2fe63032014-09-12 18:43:59 +08002457 struct io_failure_record *failrec;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002458 struct extent_map *em;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002459 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2460 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2461 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002462 int ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002463 u64 logical;
2464
David Sterba47dc1962016-02-11 13:24:13 +01002465 ret = get_state_failrec(failure_tree, start, &failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002466 if (ret) {
2467 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2468 if (!failrec)
2469 return -ENOMEM;
Miao Xie2fe63032014-09-12 18:43:59 +08002470
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002471 failrec->start = start;
2472 failrec->len = end - start + 1;
2473 failrec->this_mirror = 0;
2474 failrec->bio_flags = 0;
2475 failrec->in_validation = 0;
2476
2477 read_lock(&em_tree->lock);
2478 em = lookup_extent_mapping(em_tree, start, failrec->len);
2479 if (!em) {
2480 read_unlock(&em_tree->lock);
2481 kfree(failrec);
2482 return -EIO;
2483 }
2484
Filipe David Borba Manana68ba9902013-11-25 03:22:07 +00002485 if (em->start > start || em->start + em->len <= start) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002486 free_extent_map(em);
2487 em = NULL;
2488 }
2489 read_unlock(&em_tree->lock);
Tsutomu Itoh7a2d6a62012-10-01 03:07:15 -06002490 if (!em) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002491 kfree(failrec);
2492 return -EIO;
2493 }
Miao Xie2fe63032014-09-12 18:43:59 +08002494
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002495 logical = start - em->start;
2496 logical = em->block_start + logical;
2497 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2498 logical = em->block_start;
2499 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2500 extent_set_compress_type(&failrec->bio_flags,
2501 em->compress_type);
2502 }
Miao Xie2fe63032014-09-12 18:43:59 +08002503
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002504 btrfs_debug(fs_info,
2505 "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2506 logical, start, failrec->len);
Miao Xie2fe63032014-09-12 18:43:59 +08002507
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002508 failrec->logical = logical;
2509 free_extent_map(em);
2510
2511 /* set the bits in the private failure tree */
2512 ret = set_extent_bits(failure_tree, start, end,
David Sterbaceeb0ae2016-04-26 23:54:39 +02002513 EXTENT_LOCKED | EXTENT_DIRTY);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002514 if (ret >= 0)
David Sterba47dc1962016-02-11 13:24:13 +01002515 ret = set_state_failrec(failure_tree, start, failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002516 /* set the bits in the inode's tree */
2517 if (ret >= 0)
David Sterbaceeb0ae2016-04-26 23:54:39 +02002518 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002519 if (ret < 0) {
2520 kfree(failrec);
2521 return ret;
2522 }
2523 } else {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002524 btrfs_debug(fs_info,
2525 "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
2526 failrec->logical, failrec->start, failrec->len,
2527 failrec->in_validation);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002528 /*
2529 * when data can be on disk more than twice, add to failrec here
2530 * (e.g. with a list for failed_mirror) to make
2531 * clean_io_failure() clean all those errors at once.
2532 */
2533 }
Miao Xie2fe63032014-09-12 18:43:59 +08002534
2535 *failrec_ret = failrec;
2536
2537 return 0;
2538}
2539
Omar Sandovalc7333972020-04-16 14:46:14 -07002540bool btrfs_check_repairable(struct inode *inode, bool needs_validation,
2541 struct io_failure_record *failrec,
2542 int failed_mirror)
Miao Xie2fe63032014-09-12 18:43:59 +08002543{
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002544 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie2fe63032014-09-12 18:43:59 +08002545 int num_copies;
2546
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002547 num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002548 if (num_copies == 1) {
2549 /*
2550 * we only have a single copy of the data, so don't bother with
2551 * all the retry and error correction code that follows. no
2552 * matter what the error is, it is very likely to persist.
2553 */
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002554 btrfs_debug(fs_info,
2555 "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2556 num_copies, failrec->this_mirror, failed_mirror);
Liu Boc3cfb652017-07-13 15:00:50 -07002557 return false;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002558 }
2559
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002560 /*
2561 * there are two premises:
2562 * a) deliver good data to the caller
2563 * b) correct the bad sectors on disk
2564 */
Omar Sandovalc7333972020-04-16 14:46:14 -07002565 if (needs_validation) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002566 /*
2567 * to fulfill b), we need to know the exact failing sectors, as
2568 * we don't want to rewrite any more than the failed ones. thus,
2569 * we need separate read requests for the failed bio
2570 *
2571 * if the following BUG_ON triggers, our validation request got
2572 * merged. we need separate requests for our algorithm to work.
2573 */
2574 BUG_ON(failrec->in_validation);
2575 failrec->in_validation = 1;
2576 failrec->this_mirror = failed_mirror;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002577 } else {
2578 /*
2579 * we're ready to fulfill a) and b) alongside. get a good copy
2580 * of the failed sector and if we succeed, we have setup
2581 * everything for repair_io_failure to do the rest for us.
2582 */
2583 if (failrec->in_validation) {
2584 BUG_ON(failrec->this_mirror != failed_mirror);
2585 failrec->in_validation = 0;
2586 failrec->this_mirror = 0;
2587 }
2588 failrec->failed_mirror = failed_mirror;
2589 failrec->this_mirror++;
2590 if (failrec->this_mirror == failed_mirror)
2591 failrec->this_mirror++;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002592 }
2593
Miao Xiefacc8a222013-07-25 19:22:34 +08002594 if (failrec->this_mirror > num_copies) {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002595 btrfs_debug(fs_info,
2596 "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2597 num_copies, failrec->this_mirror, failed_mirror);
Liu Boc3cfb652017-07-13 15:00:50 -07002598 return false;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002599 }
2600
Liu Boc3cfb652017-07-13 15:00:50 -07002601 return true;
Miao Xie2fe63032014-09-12 18:43:59 +08002602}
2603
2604
2605struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
2606 struct io_failure_record *failrec,
2607 struct page *page, int pg_offset, int icsum,
Miao Xie8b110e32014-09-12 18:44:03 +08002608 bio_end_io_t *endio_func, void *data)
Miao Xie2fe63032014-09-12 18:43:59 +08002609{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002610 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie2fe63032014-09-12 18:43:59 +08002611 struct bio *bio;
2612 struct btrfs_io_bio *btrfs_failed_bio;
2613 struct btrfs_io_bio *btrfs_bio;
2614
David Sterbac5e4c3d2017-06-12 17:29:41 +02002615 bio = btrfs_io_bio_alloc(1);
Miao Xie2fe63032014-09-12 18:43:59 +08002616 bio->bi_end_io = endio_func;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002617 bio->bi_iter.bi_sector = failrec->logical >> 9;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002618 bio->bi_iter.bi_size = 0;
Miao Xie8b110e32014-09-12 18:44:03 +08002619 bio->bi_private = data;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002620
Miao Xiefacc8a222013-07-25 19:22:34 +08002621 btrfs_failed_bio = btrfs_io_bio(failed_bio);
2622 if (btrfs_failed_bio->csum) {
Miao Xiefacc8a222013-07-25 19:22:34 +08002623 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2624
2625 btrfs_bio = btrfs_io_bio(bio);
2626 btrfs_bio->csum = btrfs_bio->csum_inline;
Miao Xie2fe63032014-09-12 18:43:59 +08002627 icsum *= csum_size;
2628 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
Miao Xiefacc8a222013-07-25 19:22:34 +08002629 csum_size);
2630 }
2631
Miao Xie2fe63032014-09-12 18:43:59 +08002632 bio_add_page(bio, page, failrec->len, pg_offset);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002633
Miao Xie2fe63032014-09-12 18:43:59 +08002634 return bio;
2635}
2636
Omar Sandovalc7333972020-04-16 14:46:14 -07002637static bool btrfs_io_needs_validation(struct inode *inode, struct bio *bio)
2638{
2639 struct bio_vec *bvec;
2640 u64 len = 0;
2641 int i;
2642
2643 /*
2644 * We need to validate each sector individually if the failed I/O was
2645 * for multiple sectors.
2646 */
2647 bio_for_each_bvec_all(bvec, bio, i) {
2648 len += bvec->bv_len;
2649 if (len > inode->i_sb->s_blocksize)
2650 return true;
2651 }
2652 return false;
2653}
2654
Miao Xie2fe63032014-09-12 18:43:59 +08002655/*
Nikolay Borisov78e62c02018-11-22 10:17:49 +02002656 * This is a generic handler for readpage errors. If other copies exist, read
2657 * those and write back good data to the failed position. Does not investigate
2658 * in remapping the failed extent elsewhere, hoping the device will be smart
2659 * enough to do this as needed
Miao Xie2fe63032014-09-12 18:43:59 +08002660 */
Miao Xie2fe63032014-09-12 18:43:59 +08002661static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2662 struct page *page, u64 start, u64 end,
2663 int failed_mirror)
2664{
2665 struct io_failure_record *failrec;
2666 struct inode *inode = page->mapping->host;
2667 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
Josef Bacik7870d082017-05-05 11:57:15 -04002668 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
Omar Sandovalc7333972020-04-16 14:46:14 -07002669 bool need_validation;
Miao Xie2fe63032014-09-12 18:43:59 +08002670 struct bio *bio;
Christoph Hellwig70fd7612016-11-01 07:40:10 -06002671 int read_mode = 0;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002672 blk_status_t status;
Miao Xie2fe63032014-09-12 18:43:59 +08002673 int ret;
2674
Mike Christie1f7ad752016-06-05 14:31:51 -05002675 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
Miao Xie2fe63032014-09-12 18:43:59 +08002676
2677 ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
2678 if (ret)
2679 return ret;
2680
Omar Sandovalc7333972020-04-16 14:46:14 -07002681 need_validation = btrfs_io_needs_validation(inode, failed_bio);
2682
2683 if (!btrfs_check_repairable(inode, need_validation, failrec,
Liu Boc3cfb652017-07-13 15:00:50 -07002684 failed_mirror)) {
Josef Bacik7870d082017-05-05 11:57:15 -04002685 free_io_failure(failure_tree, tree, failrec);
Miao Xie2fe63032014-09-12 18:43:59 +08002686 return -EIO;
2687 }
2688
Omar Sandovalc7333972020-04-16 14:46:14 -07002689 if (need_validation)
Christoph Hellwig70fd7612016-11-01 07:40:10 -06002690 read_mode |= REQ_FAILFAST_DEV;
Miao Xie2fe63032014-09-12 18:43:59 +08002691
2692 phy_offset >>= inode->i_sb->s_blocksize_bits;
2693 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
2694 start - page_offset(page),
Miao Xie8b110e32014-09-12 18:44:03 +08002695 (int)phy_offset, failed_bio->bi_end_io,
2696 NULL);
David Sterbaebcc3262018-06-29 10:56:53 +02002697 bio->bi_opf = REQ_OP_READ | read_mode;
Miao Xie2fe63032014-09-12 18:43:59 +08002698
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002699 btrfs_debug(btrfs_sb(inode->i_sb),
2700 "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
2701 read_mode, failrec->this_mirror, failrec->in_validation);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002702
Linus Torvalds8c27cb32017-07-05 16:41:23 -07002703 status = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror,
Nikolay Borisov50489a52019-04-10 19:46:04 +03002704 failrec->bio_flags);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002705 if (status) {
Josef Bacik7870d082017-05-05 11:57:15 -04002706 free_io_failure(failure_tree, tree, failrec);
Miao Xie6c387ab2014-09-12 18:43:57 +08002707 bio_put(bio);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002708 ret = blk_status_to_errno(status);
Miao Xie6c387ab2014-09-12 18:43:57 +08002709 }
2710
Tsutomu Itoh013bd4c2012-02-16 10:11:40 +09002711 return ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002712}
2713
Chris Masond1310b22008-01-24 16:13:08 -05002714/* lots and lots of room for performance fixes in the end_bio funcs */
2715
David Sterbab5227c02015-12-03 13:08:59 +01002716void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
Jeff Mahoney87826df2012-02-15 16:23:57 +01002717{
2718 int uptodate = (err == 0);
Eric Sandeen3e2426b2014-06-12 00:39:58 -05002719 int ret = 0;
Jeff Mahoney87826df2012-02-15 16:23:57 +01002720
Nikolay Borisovc6297322018-11-08 10:18:08 +02002721 btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
Jeff Mahoney87826df2012-02-15 16:23:57 +01002722
Jeff Mahoney87826df2012-02-15 16:23:57 +01002723 if (!uptodate) {
Jeff Mahoney87826df2012-02-15 16:23:57 +01002724 ClearPageUptodate(page);
2725 SetPageError(page);
Colin Ian Kingbff5baf2017-05-09 18:14:01 +01002726 ret = err < 0 ? err : -EIO;
Liu Bo5dca6ee2014-05-12 12:47:36 +08002727 mapping_set_error(page->mapping, ret);
Jeff Mahoney87826df2012-02-15 16:23:57 +01002728 }
Jeff Mahoney87826df2012-02-15 16:23:57 +01002729}
2730
Chris Masond1310b22008-01-24 16:13:08 -05002731/*
2732 * after a writepage IO is done, we need to:
2733 * clear the uptodate bits on error
2734 * clear the writeback bits in the extent tree for this IO
2735 * end_page_writeback if the page has no more pending IO
2736 *
2737 * Scheduling is not allowed, so the extent state tree is expected
2738 * to have one and only one object corresponding to this IO.
2739 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002740static void end_bio_extent_writepage(struct bio *bio)
Chris Masond1310b22008-01-24 16:13:08 -05002741{
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002742 int error = blk_status_to_errno(bio->bi_status);
Kent Overstreet2c30c712013-11-07 12:20:26 -08002743 struct bio_vec *bvec;
Chris Masond1310b22008-01-24 16:13:08 -05002744 u64 start;
2745 u64 end;
Ming Lei6dc4f102019-02-15 19:13:19 +08002746 struct bvec_iter_all iter_all;
Chris Masond1310b22008-01-24 16:13:08 -05002747
David Sterbac09abff2017-07-13 18:10:07 +02002748 ASSERT(!bio_flagged(bio, BIO_CLONED));
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02002749 bio_for_each_segment_all(bvec, bio, iter_all) {
Chris Masond1310b22008-01-24 16:13:08 -05002750 struct page *page = bvec->bv_page;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002751 struct inode *inode = page->mapping->host;
2752 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
David Woodhouse902b22f2008-08-20 08:51:49 -04002753
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002754 /* We always issue full-page reads, but if some block
2755 * in a page fails to read, blk_update_request() will
2756 * advance bv_offset and adjust bv_len to compensate.
2757 * Print a warning for nonzero offsets, and an error
2758 * if they don't add up to a full page. */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002759 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2760 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002761 btrfs_err(fs_info,
Frank Holtonefe120a2013-12-20 11:37:06 -05002762 "partial page write in btrfs with offset %u and length %u",
2763 bvec->bv_offset, bvec->bv_len);
2764 else
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002765 btrfs_info(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04002766 "incomplete page write in btrfs with offset %u and length %u",
Frank Holtonefe120a2013-12-20 11:37:06 -05002767 bvec->bv_offset, bvec->bv_len);
2768 }
Chris Masond1310b22008-01-24 16:13:08 -05002769
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002770 start = page_offset(page);
2771 end = start + bvec->bv_offset + bvec->bv_len - 1;
Chris Masond1310b22008-01-24 16:13:08 -05002772
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002773 end_extent_writepage(page, error, start, end);
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002774 end_page_writeback(page);
Kent Overstreet2c30c712013-11-07 12:20:26 -08002775 }
Chris Mason2b1f55b2008-09-24 11:48:04 -04002776
Chris Masond1310b22008-01-24 16:13:08 -05002777 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002778}
2779
Miao Xie883d0de2013-07-25 19:22:35 +08002780static void
2781endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2782 int uptodate)
2783{
2784 struct extent_state *cached = NULL;
2785 u64 end = start + len - 1;
2786
2787 if (uptodate && tree->track_uptodate)
2788 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
David Sterbad810a4b2017-12-07 18:52:54 +01002789 unlock_extent_cached_atomic(tree, start, end, &cached);
Miao Xie883d0de2013-07-25 19:22:35 +08002790}
2791
Chris Masond1310b22008-01-24 16:13:08 -05002792/*
2793 * after a readpage IO is done, we need to:
2794 * clear the uptodate bits on error
2795 * set the uptodate bits if things worked
2796 * set the page up to date if all extents in the tree are uptodate
2797 * clear the lock bit in the extent tree
2798 * unlock the page if there are no other extents locked for it
2799 *
2800 * Scheduling is not allowed, so the extent state tree is expected
2801 * to have one and only one object corresponding to this IO.
2802 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002803static void end_bio_extent_readpage(struct bio *bio)
Chris Masond1310b22008-01-24 16:13:08 -05002804{
Kent Overstreet2c30c712013-11-07 12:20:26 -08002805 struct bio_vec *bvec;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002806 int uptodate = !bio->bi_status;
Miao Xiefacc8a222013-07-25 19:22:34 +08002807 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
Josef Bacik7870d082017-05-05 11:57:15 -04002808 struct extent_io_tree *tree, *failure_tree;
Miao Xiefacc8a222013-07-25 19:22:34 +08002809 u64 offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002810 u64 start;
2811 u64 end;
Miao Xiefacc8a222013-07-25 19:22:34 +08002812 u64 len;
Miao Xie883d0de2013-07-25 19:22:35 +08002813 u64 extent_start = 0;
2814 u64 extent_len = 0;
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002815 int mirror;
Chris Masond1310b22008-01-24 16:13:08 -05002816 int ret;
Ming Lei6dc4f102019-02-15 19:13:19 +08002817 struct bvec_iter_all iter_all;
Chris Masond1310b22008-01-24 16:13:08 -05002818
David Sterbac09abff2017-07-13 18:10:07 +02002819 ASSERT(!bio_flagged(bio, BIO_CLONED));
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02002820 bio_for_each_segment_all(bvec, bio, iter_all) {
Chris Masond1310b22008-01-24 16:13:08 -05002821 struct page *page = bvec->bv_page;
Josef Bacika71754f2013-06-17 17:14:39 -04002822 struct inode *inode = page->mapping->host;
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002823 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Nikolay Borisov78e62c02018-11-22 10:17:49 +02002824 bool data_inode = btrfs_ino(BTRFS_I(inode))
2825 != BTRFS_BTREE_INODE_OBJECTID;
Arne Jansen507903b2011-04-06 10:02:20 +00002826
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002827 btrfs_debug(fs_info,
2828 "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002829 (u64)bio->bi_iter.bi_sector, bio->bi_status,
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002830 io_bio->mirror_num);
Josef Bacika71754f2013-06-17 17:14:39 -04002831 tree = &BTRFS_I(inode)->io_tree;
Josef Bacik7870d082017-05-05 11:57:15 -04002832 failure_tree = &BTRFS_I(inode)->io_failure_tree;
David Woodhouse902b22f2008-08-20 08:51:49 -04002833
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002834 /* We always issue full-page reads, but if some block
2835 * in a page fails to read, blk_update_request() will
2836 * advance bv_offset and adjust bv_len to compensate.
2837 * Print a warning for nonzero offsets, and an error
2838 * if they don't add up to a full page. */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002839 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2840 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002841 btrfs_err(fs_info,
2842 "partial page read in btrfs with offset %u and length %u",
Frank Holtonefe120a2013-12-20 11:37:06 -05002843 bvec->bv_offset, bvec->bv_len);
2844 else
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002845 btrfs_info(fs_info,
2846 "incomplete page read in btrfs with offset %u and length %u",
Frank Holtonefe120a2013-12-20 11:37:06 -05002847 bvec->bv_offset, bvec->bv_len);
2848 }
Chris Masond1310b22008-01-24 16:13:08 -05002849
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002850 start = page_offset(page);
2851 end = start + bvec->bv_offset + bvec->bv_len - 1;
Miao Xiefacc8a222013-07-25 19:22:34 +08002852 len = bvec->bv_len;
Chris Masond1310b22008-01-24 16:13:08 -05002853
Chris Mason9be33952013-05-17 18:30:14 -04002854 mirror = io_bio->mirror_num;
Nikolay Borisov78e62c02018-11-22 10:17:49 +02002855 if (likely(uptodate)) {
Miao Xiefacc8a222013-07-25 19:22:34 +08002856 ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2857 page, start, end,
2858 mirror);
Stefan Behrens5ee08442012-08-27 08:30:03 -06002859 if (ret)
Chris Masond1310b22008-01-24 16:13:08 -05002860 uptodate = 0;
Stefan Behrens5ee08442012-08-27 08:30:03 -06002861 else
Josef Bacik7870d082017-05-05 11:57:15 -04002862 clean_io_failure(BTRFS_I(inode)->root->fs_info,
2863 failure_tree, tree, start,
2864 page,
2865 btrfs_ino(BTRFS_I(inode)), 0);
Chris Masond1310b22008-01-24 16:13:08 -05002866 }
Josef Bacikea466792012-03-26 21:57:36 -04002867
Miao Xief2a09da2013-07-25 19:22:33 +08002868 if (likely(uptodate))
2869 goto readpage_ok;
2870
Nikolay Borisov78e62c02018-11-22 10:17:49 +02002871 if (data_inode) {
Liu Bo9d0d1c82017-03-24 15:04:50 -07002872
2873 /*
Nikolay Borisov78e62c02018-11-22 10:17:49 +02002874 * The generic bio_readpage_error handles errors the
2875 * following way: If possible, new read requests are
2876 * created and submitted and will end up in
2877 * end_bio_extent_readpage as well (if we're lucky,
2878 * not in the !uptodate case). In that case it returns
2879 * 0 and we just go on with the next page in our bio.
2880 * If it can't handle the error it will return -EIO and
2881 * we remain responsible for that page.
Liu Bo9d0d1c82017-03-24 15:04:50 -07002882 */
Nikolay Borisov78e62c02018-11-22 10:17:49 +02002883 ret = bio_readpage_error(bio, offset, page, start, end,
2884 mirror);
2885 if (ret == 0) {
2886 uptodate = !bio->bi_status;
2887 offset += len;
2888 continue;
2889 }
2890 } else {
2891 struct extent_buffer *eb;
2892
2893 eb = (struct extent_buffer *)page->private;
2894 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
2895 eb->read_mirror = mirror;
2896 atomic_dec(&eb->io_pages);
2897 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD,
2898 &eb->bflags))
2899 btree_readahead_hook(eb, -EIO);
Chris Mason7e383262008-04-09 16:28:12 -04002900 }
Miao Xief2a09da2013-07-25 19:22:33 +08002901readpage_ok:
Miao Xie883d0de2013-07-25 19:22:35 +08002902 if (likely(uptodate)) {
Josef Bacika71754f2013-06-17 17:14:39 -04002903 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002904 pgoff_t end_index = i_size >> PAGE_SHIFT;
Liu Boa583c022014-08-19 23:32:22 +08002905 unsigned off;
Josef Bacika71754f2013-06-17 17:14:39 -04002906
2907 /* Zero out the end if this page straddles i_size */
Johannes Thumshirn70730172018-12-05 15:23:03 +01002908 off = offset_in_page(i_size);
Liu Boa583c022014-08-19 23:32:22 +08002909 if (page->index == end_index && off)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002910 zero_user_segment(page, off, PAGE_SIZE);
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002911 SetPageUptodate(page);
Chris Mason70dec802008-01-29 09:59:12 -05002912 } else {
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002913 ClearPageUptodate(page);
2914 SetPageError(page);
Chris Mason70dec802008-01-29 09:59:12 -05002915 }
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002916 unlock_page(page);
Miao Xiefacc8a222013-07-25 19:22:34 +08002917 offset += len;
Miao Xie883d0de2013-07-25 19:22:35 +08002918
2919 if (unlikely(!uptodate)) {
2920 if (extent_len) {
2921 endio_readpage_release_extent(tree,
2922 extent_start,
2923 extent_len, 1);
2924 extent_start = 0;
2925 extent_len = 0;
2926 }
2927 endio_readpage_release_extent(tree, start,
2928 end - start + 1, 0);
2929 } else if (!extent_len) {
2930 extent_start = start;
2931 extent_len = end + 1 - start;
2932 } else if (extent_start + extent_len == start) {
2933 extent_len += end + 1 - start;
2934 } else {
2935 endio_readpage_release_extent(tree, extent_start,
2936 extent_len, uptodate);
2937 extent_start = start;
2938 extent_len = end + 1 - start;
2939 }
Kent Overstreet2c30c712013-11-07 12:20:26 -08002940 }
Chris Masond1310b22008-01-24 16:13:08 -05002941
Miao Xie883d0de2013-07-25 19:22:35 +08002942 if (extent_len)
2943 endio_readpage_release_extent(tree, extent_start, extent_len,
2944 uptodate);
David Sterbab3a0dd52018-11-22 17:16:49 +01002945 btrfs_io_bio_free_csum(io_bio);
Chris Masond1310b22008-01-24 16:13:08 -05002946 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002947}
2948
Chris Mason9be33952013-05-17 18:30:14 -04002949/*
David Sterba184f9992017-06-12 17:29:39 +02002950 * Initialize the members up to but not including 'bio'. Use after allocating a
2951 * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
2952 * 'bio' because use of __GFP_ZERO is not supported.
Chris Mason9be33952013-05-17 18:30:14 -04002953 */
David Sterba184f9992017-06-12 17:29:39 +02002954static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
Chris Masond1310b22008-01-24 16:13:08 -05002955{
David Sterba184f9992017-06-12 17:29:39 +02002956 memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
2957}
2958
2959/*
David Sterba6e707bc2017-06-02 17:26:26 +02002960 * The following helpers allocate a bio. As it's backed by a bioset, it'll
2961 * never fail. We're returning a bio right now but you can call btrfs_io_bio
2962 * for the appropriate container_of magic
Chris Masond1310b22008-01-24 16:13:08 -05002963 */
David Sterbae749af442019-06-18 20:00:16 +02002964struct bio *btrfs_bio_alloc(u64 first_byte)
Chris Masond1310b22008-01-24 16:13:08 -05002965{
2966 struct bio *bio;
2967
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -04002968 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &btrfs_bioset);
David Sterbac821e7f32017-06-02 18:35:36 +02002969 bio->bi_iter.bi_sector = first_byte >> 9;
David Sterba184f9992017-06-12 17:29:39 +02002970 btrfs_io_bio_init(btrfs_io_bio(bio));
Chris Masond1310b22008-01-24 16:13:08 -05002971 return bio;
2972}
2973
David Sterba8b6c1d52017-06-02 17:48:13 +02002974struct bio *btrfs_bio_clone(struct bio *bio)
Chris Mason9be33952013-05-17 18:30:14 -04002975{
Miao Xie23ea8e52014-09-12 18:43:54 +08002976 struct btrfs_io_bio *btrfs_bio;
2977 struct bio *new;
Chris Mason9be33952013-05-17 18:30:14 -04002978
David Sterba6e707bc2017-06-02 17:26:26 +02002979 /* Bio allocation backed by a bioset does not fail */
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -04002980 new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
David Sterba6e707bc2017-06-02 17:26:26 +02002981 btrfs_bio = btrfs_io_bio(new);
David Sterba184f9992017-06-12 17:29:39 +02002982 btrfs_io_bio_init(btrfs_bio);
David Sterba6e707bc2017-06-02 17:26:26 +02002983 btrfs_bio->iter = bio->bi_iter;
Miao Xie23ea8e52014-09-12 18:43:54 +08002984 return new;
2985}
Chris Mason9be33952013-05-17 18:30:14 -04002986
David Sterbac5e4c3d2017-06-12 17:29:41 +02002987struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
Chris Mason9be33952013-05-17 18:30:14 -04002988{
Miao Xiefacc8a222013-07-25 19:22:34 +08002989 struct bio *bio;
2990
David Sterba6e707bc2017-06-02 17:26:26 +02002991 /* Bio allocation backed by a bioset does not fail */
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -04002992 bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
David Sterba184f9992017-06-12 17:29:39 +02002993 btrfs_io_bio_init(btrfs_io_bio(bio));
Miao Xiefacc8a222013-07-25 19:22:34 +08002994 return bio;
Chris Mason9be33952013-05-17 18:30:14 -04002995}
2996
Liu Boe4770942017-05-16 10:57:14 -07002997struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
Liu Bo2f8e9142017-05-15 17:43:31 -07002998{
2999 struct bio *bio;
3000 struct btrfs_io_bio *btrfs_bio;
3001
3002 /* this will never fail when it's backed by a bioset */
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -04003003 bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
Liu Bo2f8e9142017-05-15 17:43:31 -07003004 ASSERT(bio);
3005
3006 btrfs_bio = btrfs_io_bio(bio);
David Sterba184f9992017-06-12 17:29:39 +02003007 btrfs_io_bio_init(btrfs_bio);
Liu Bo2f8e9142017-05-15 17:43:31 -07003008
3009 bio_trim(bio, offset >> 9, size >> 9);
Liu Bo17347ce2017-05-15 15:33:27 -07003010 btrfs_bio->iter = bio->bi_iter;
Liu Bo2f8e9142017-05-15 17:43:31 -07003011 return bio;
3012}
Chris Mason9be33952013-05-17 18:30:14 -04003013
David Sterba4b81ba42017-06-06 19:14:26 +02003014/*
3015 * @opf: bio REQ_OP_* and REQ_* flags as one value
David Sterbab8b3d622017-06-12 19:50:41 +02003016 * @wbc: optional writeback control for io accounting
3017 * @page: page to add to the bio
3018 * @pg_offset: offset of the new bio or to check whether we are adding
3019 * a contiguous page to the previous one
3020 * @size: portion of page that we want to write
3021 * @offset: starting offset in the page
David Sterba5c2b1fd2017-06-06 19:22:55 +02003022 * @bio_ret: must be valid pointer, newly allocated bio will be stored there
David Sterbab8b3d622017-06-12 19:50:41 +02003023 * @end_io_func: end_io callback for new bio
3024 * @mirror_num: desired mirror to read/write
3025 * @prev_bio_flags: flags of previous bio to see if we can merge the current one
3026 * @bio_flags: flags of the current bio to see if we can merge them
David Sterba4b81ba42017-06-06 19:14:26 +02003027 */
David Sterba0ceb34b2020-02-05 19:09:28 +01003028static int submit_extent_page(unsigned int opf,
Chris Masonda2f0f72015-07-02 13:57:22 -07003029 struct writeback_control *wbc,
David Sterba6273b7f2017-10-04 17:30:11 +02003030 struct page *page, u64 offset,
David Sterba6c5a4e22017-10-04 17:10:34 +02003031 size_t size, unsigned long pg_offset,
Chris Masond1310b22008-01-24 16:13:08 -05003032 struct bio **bio_ret,
Chris Masonf1885912008-04-09 16:28:12 -04003033 bio_end_io_t end_io_func,
Chris Masonc8b97812008-10-29 14:49:59 -04003034 int mirror_num,
3035 unsigned long prev_bio_flags,
Filipe Manana005efed2015-09-14 09:09:31 +01003036 unsigned long bio_flags,
3037 bool force_bio_submit)
Chris Masond1310b22008-01-24 16:13:08 -05003038{
3039 int ret = 0;
3040 struct bio *bio;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003041 size_t page_size = min_t(size_t, size, PAGE_SIZE);
David Sterba6273b7f2017-10-04 17:30:11 +02003042 sector_t sector = offset >> 9;
David Sterba0ceb34b2020-02-05 19:09:28 +01003043 struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree;
Chris Masond1310b22008-01-24 16:13:08 -05003044
David Sterba5c2b1fd2017-06-06 19:22:55 +02003045 ASSERT(bio_ret);
3046
3047 if (*bio_ret) {
David Sterba0c8508a2017-06-12 20:00:43 +02003048 bool contig;
3049 bool can_merge = true;
3050
Chris Masond1310b22008-01-24 16:13:08 -05003051 bio = *bio_ret;
David Sterba0c8508a2017-06-12 20:00:43 +02003052 if (prev_bio_flags & EXTENT_BIO_COMPRESSED)
Kent Overstreet4f024f32013-10-11 15:44:27 -07003053 contig = bio->bi_iter.bi_sector == sector;
Chris Masonc8b97812008-10-29 14:49:59 -04003054 else
Kent Overstreetf73a1c72012-09-25 15:05:12 -07003055 contig = bio_end_sector(bio) == sector;
Chris Masonc8b97812008-10-29 14:49:59 -04003056
Nikolay Borisovda12fe52018-11-27 20:57:58 +02003057 ASSERT(tree->ops);
3058 if (btrfs_bio_fits_in_stripe(page, page_size, bio, bio_flags))
David Sterba0c8508a2017-06-12 20:00:43 +02003059 can_merge = false;
3060
3061 if (prev_bio_flags != bio_flags || !contig || !can_merge ||
Filipe Manana005efed2015-09-14 09:09:31 +01003062 force_bio_submit ||
David Sterba6c5a4e22017-10-04 17:10:34 +02003063 bio_add_page(bio, page, page_size, pg_offset) < page_size) {
Mike Christie1f7ad752016-06-05 14:31:51 -05003064 ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
Naohiro Aota289454a2015-01-06 01:01:03 +09003065 if (ret < 0) {
3066 *bio_ret = NULL;
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003067 return ret;
Naohiro Aota289454a2015-01-06 01:01:03 +09003068 }
Chris Masond1310b22008-01-24 16:13:08 -05003069 bio = NULL;
3070 } else {
Chris Masonda2f0f72015-07-02 13:57:22 -07003071 if (wbc)
Tejun Heo34e51a52019-06-27 13:39:49 -07003072 wbc_account_cgroup_owner(wbc, page, page_size);
Chris Masond1310b22008-01-24 16:13:08 -05003073 return 0;
3074 }
3075 }
Chris Masonc8b97812008-10-29 14:49:59 -04003076
David Sterbae749af442019-06-18 20:00:16 +02003077 bio = btrfs_bio_alloc(offset);
David Sterba6c5a4e22017-10-04 17:10:34 +02003078 bio_add_page(bio, page, page_size, pg_offset);
Chris Masond1310b22008-01-24 16:13:08 -05003079 bio->bi_end_io = end_io_func;
3080 bio->bi_private = tree;
Jens Axboee6959b92017-06-27 11:51:28 -06003081 bio->bi_write_hint = page->mapping->host->i_write_hint;
David Sterba4b81ba42017-06-06 19:14:26 +02003082 bio->bi_opf = opf;
Chris Masonda2f0f72015-07-02 13:57:22 -07003083 if (wbc) {
David Sterba429aebc2019-11-18 23:27:55 +01003084 struct block_device *bdev;
3085
3086 bdev = BTRFS_I(page->mapping->host)->root->fs_info->fs_devices->latest_bdev;
3087 bio_set_dev(bio, bdev);
Chris Masonda2f0f72015-07-02 13:57:22 -07003088 wbc_init_bio(wbc, bio);
Tejun Heo34e51a52019-06-27 13:39:49 -07003089 wbc_account_cgroup_owner(wbc, page, page_size);
Chris Masonda2f0f72015-07-02 13:57:22 -07003090 }
Chris Mason70dec802008-01-29 09:59:12 -05003091
David Sterba5c2b1fd2017-06-06 19:22:55 +02003092 *bio_ret = bio;
Chris Masond1310b22008-01-24 16:13:08 -05003093
3094 return ret;
3095}
3096
Eric Sandeen48a3b632013-04-25 20:41:01 +00003097static void attach_extent_buffer_page(struct extent_buffer *eb,
3098 struct page *page)
Josef Bacik4f2de97a2012-03-07 16:20:05 -05003099{
3100 if (!PagePrivate(page)) {
3101 SetPagePrivate(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003102 get_page(page);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05003103 set_page_private(page, (unsigned long)eb);
3104 } else {
3105 WARN_ON(page->private != (unsigned long)eb);
3106 }
3107}
3108
Chris Masond1310b22008-01-24 16:13:08 -05003109void set_page_extent_mapped(struct page *page)
3110{
3111 if (!PagePrivate(page)) {
3112 SetPagePrivate(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003113 get_page(page);
Chris Mason6af118ce2008-07-22 11:18:07 -04003114 set_page_private(page, EXTENT_PAGE_PRIVATE);
Chris Masond1310b22008-01-24 16:13:08 -05003115 }
3116}
3117
Miao Xie125bac012013-07-25 19:22:37 +08003118static struct extent_map *
3119__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
3120 u64 start, u64 len, get_extent_t *get_extent,
3121 struct extent_map **em_cached)
3122{
3123 struct extent_map *em;
3124
3125 if (em_cached && *em_cached) {
3126 em = *em_cached;
Filipe Mananacbc0e922014-02-25 14:15:12 +00003127 if (extent_map_in_tree(em) && start >= em->start &&
Miao Xie125bac012013-07-25 19:22:37 +08003128 start < extent_map_end(em)) {
Elena Reshetova490b54d2017-03-03 10:55:12 +02003129 refcount_inc(&em->refs);
Miao Xie125bac012013-07-25 19:22:37 +08003130 return em;
3131 }
3132
3133 free_extent_map(em);
3134 *em_cached = NULL;
3135 }
3136
Omar Sandoval39b07b52019-12-02 17:34:23 -08003137 em = get_extent(BTRFS_I(inode), page, pg_offset, start, len);
Miao Xie125bac012013-07-25 19:22:37 +08003138 if (em_cached && !IS_ERR_OR_NULL(em)) {
3139 BUG_ON(*em_cached);
Elena Reshetova490b54d2017-03-03 10:55:12 +02003140 refcount_inc(&em->refs);
Miao Xie125bac012013-07-25 19:22:37 +08003141 *em_cached = em;
3142 }
3143 return em;
3144}
Chris Masond1310b22008-01-24 16:13:08 -05003145/*
3146 * basic readpage implementation. Locked extent state structs are inserted
3147 * into the tree that are removed when the IO is done (by the end_io
3148 * handlers)
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003149 * XXX JDM: This needs looking at to ensure proper page locking
Liu Bobaf863b2016-07-11 10:39:07 -07003150 * return 0 on success, otherwise return error
Chris Masond1310b22008-01-24 16:13:08 -05003151 */
David Sterbaf657a312020-02-05 19:09:42 +01003152static int __do_readpage(struct page *page,
Miao Xie99740902013-07-25 19:22:36 +08003153 get_extent_t *get_extent,
Miao Xie125bac012013-07-25 19:22:37 +08003154 struct extent_map **em_cached,
Miao Xie99740902013-07-25 19:22:36 +08003155 struct bio **bio, int mirror_num,
David Sterbaf1c77c52017-06-06 19:03:49 +02003156 unsigned long *bio_flags, unsigned int read_flags,
Filipe Manana005efed2015-09-14 09:09:31 +01003157 u64 *prev_em_start)
Chris Masond1310b22008-01-24 16:13:08 -05003158{
3159 struct inode *inode = page->mapping->host;
Miao Xie4eee4fa2012-12-21 09:17:45 +00003160 u64 start = page_offset(page);
David Sterba8eec8292017-06-06 19:50:13 +02003161 const u64 end = start + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05003162 u64 cur = start;
3163 u64 extent_offset;
3164 u64 last_byte = i_size_read(inode);
3165 u64 block_start;
3166 u64 cur_end;
Chris Masond1310b22008-01-24 16:13:08 -05003167 struct extent_map *em;
Liu Bobaf863b2016-07-11 10:39:07 -07003168 int ret = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003169 int nr = 0;
David Sterba306e16c2011-04-19 14:29:38 +02003170 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003171 size_t iosize;
Chris Masonc8b97812008-10-29 14:49:59 -04003172 size_t disk_io_size;
Chris Masond1310b22008-01-24 16:13:08 -05003173 size_t blocksize = inode->i_sb->s_blocksize;
Filipe Manana7f042a82016-01-27 19:17:20 +00003174 unsigned long this_bio_flag = 0;
David Sterbaf657a312020-02-05 19:09:42 +01003175 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
David Sterbaae6957e2020-02-05 19:09:30 +01003176
Chris Masond1310b22008-01-24 16:13:08 -05003177 set_page_extent_mapped(page);
3178
Dan Magenheimer90a887c2011-05-26 10:01:56 -06003179 if (!PageUptodate(page)) {
3180 if (cleancache_get_page(page) == 0) {
3181 BUG_ON(blocksize != PAGE_SIZE);
Miao Xie99740902013-07-25 19:22:36 +08003182 unlock_extent(tree, start, end);
Dan Magenheimer90a887c2011-05-26 10:01:56 -06003183 goto out;
3184 }
3185 }
3186
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003187 if (page->index == last_byte >> PAGE_SHIFT) {
Chris Masonc8b97812008-10-29 14:49:59 -04003188 char *userpage;
Johannes Thumshirn70730172018-12-05 15:23:03 +01003189 size_t zero_offset = offset_in_page(last_byte);
Chris Masonc8b97812008-10-29 14:49:59 -04003190
3191 if (zero_offset) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003192 iosize = PAGE_SIZE - zero_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08003193 userpage = kmap_atomic(page);
Chris Masonc8b97812008-10-29 14:49:59 -04003194 memset(userpage + zero_offset, 0, iosize);
3195 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08003196 kunmap_atomic(userpage);
Chris Masonc8b97812008-10-29 14:49:59 -04003197 }
3198 }
Chris Masond1310b22008-01-24 16:13:08 -05003199 while (cur <= end) {
Filipe Manana005efed2015-09-14 09:09:31 +01003200 bool force_bio_submit = false;
David Sterba6273b7f2017-10-04 17:30:11 +02003201 u64 offset;
Josef Bacikc8f2f242013-02-11 11:33:00 -05003202
Chris Masond1310b22008-01-24 16:13:08 -05003203 if (cur >= last_byte) {
3204 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00003205 struct extent_state *cached = NULL;
3206
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003207 iosize = PAGE_SIZE - pg_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08003208 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02003209 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003210 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08003211 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05003212 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00003213 &cached, GFP_NOFS);
Filipe Manana7f042a82016-01-27 19:17:20 +00003214 unlock_extent_cached(tree, cur,
David Sterbae43bbe52017-12-12 21:43:52 +01003215 cur + iosize - 1, &cached);
Chris Masond1310b22008-01-24 16:13:08 -05003216 break;
3217 }
Miao Xie125bac012013-07-25 19:22:37 +08003218 em = __get_extent_map(inode, page, pg_offset, cur,
3219 end - cur + 1, get_extent, em_cached);
David Sterbac7040052011-04-19 18:00:01 +02003220 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05003221 SetPageError(page);
Filipe Manana7f042a82016-01-27 19:17:20 +00003222 unlock_extent(tree, cur, end);
Chris Masond1310b22008-01-24 16:13:08 -05003223 break;
3224 }
Chris Masond1310b22008-01-24 16:13:08 -05003225 extent_offset = cur - em->start;
3226 BUG_ON(extent_map_end(em) <= cur);
3227 BUG_ON(end < cur);
3228
Li Zefan261507a02010-12-17 14:21:50 +08003229 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
Mark Fasheh4b384312013-08-06 11:42:50 -07003230 this_bio_flag |= EXTENT_BIO_COMPRESSED;
Li Zefan261507a02010-12-17 14:21:50 +08003231 extent_set_compress_type(&this_bio_flag,
3232 em->compress_type);
3233 }
Chris Masonc8b97812008-10-29 14:49:59 -04003234
Chris Masond1310b22008-01-24 16:13:08 -05003235 iosize = min(extent_map_end(em) - cur, end - cur + 1);
3236 cur_end = min(extent_map_end(em) - 1, end);
Qu Wenruofda28322013-02-26 08:10:22 +00003237 iosize = ALIGN(iosize, blocksize);
Chris Masonc8b97812008-10-29 14:49:59 -04003238 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
3239 disk_io_size = em->block_len;
David Sterba6273b7f2017-10-04 17:30:11 +02003240 offset = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04003241 } else {
David Sterba6273b7f2017-10-04 17:30:11 +02003242 offset = em->block_start + extent_offset;
Chris Masonc8b97812008-10-29 14:49:59 -04003243 disk_io_size = iosize;
3244 }
Chris Masond1310b22008-01-24 16:13:08 -05003245 block_start = em->block_start;
Yan Zhengd899e052008-10-30 14:25:28 -04003246 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3247 block_start = EXTENT_MAP_HOLE;
Filipe Manana005efed2015-09-14 09:09:31 +01003248
3249 /*
3250 * If we have a file range that points to a compressed extent
3251 * and it's followed by a consecutive file range that points to
3252 * to the same compressed extent (possibly with a different
3253 * offset and/or length, so it either points to the whole extent
3254 * or only part of it), we must make sure we do not submit a
3255 * single bio to populate the pages for the 2 ranges because
3256 * this makes the compressed extent read zero out the pages
3257 * belonging to the 2nd range. Imagine the following scenario:
3258 *
3259 * File layout
3260 * [0 - 8K] [8K - 24K]
3261 * | |
3262 * | |
3263 * points to extent X, points to extent X,
3264 * offset 4K, length of 8K offset 0, length 16K
3265 *
3266 * [extent X, compressed length = 4K uncompressed length = 16K]
3267 *
3268 * If the bio to read the compressed extent covers both ranges,
3269 * it will decompress extent X into the pages belonging to the
3270 * first range and then it will stop, zeroing out the remaining
3271 * pages that belong to the other range that points to extent X.
3272 * So here we make sure we submit 2 bios, one for the first
3273 * range and another one for the third range. Both will target
3274 * the same physical extent from disk, but we can't currently
3275 * make the compressed bio endio callback populate the pages
3276 * for both ranges because each compressed bio is tightly
3277 * coupled with a single extent map, and each range can have
3278 * an extent map with a different offset value relative to the
3279 * uncompressed data of our extent and different lengths. This
3280 * is a corner case so we prioritize correctness over
3281 * non-optimal behavior (submitting 2 bios for the same extent).
3282 */
3283 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3284 prev_em_start && *prev_em_start != (u64)-1 &&
Filipe Manana8e928212019-02-14 15:17:20 +00003285 *prev_em_start != em->start)
Filipe Manana005efed2015-09-14 09:09:31 +01003286 force_bio_submit = true;
3287
3288 if (prev_em_start)
Filipe Manana8e928212019-02-14 15:17:20 +00003289 *prev_em_start = em->start;
Filipe Manana005efed2015-09-14 09:09:31 +01003290
Chris Masond1310b22008-01-24 16:13:08 -05003291 free_extent_map(em);
3292 em = NULL;
3293
3294 /* we've found a hole, just zero and go on */
3295 if (block_start == EXTENT_MAP_HOLE) {
3296 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00003297 struct extent_state *cached = NULL;
3298
Cong Wang7ac687d2011-11-25 23:14:28 +08003299 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02003300 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003301 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08003302 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05003303
3304 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00003305 &cached, GFP_NOFS);
Filipe Manana7f042a82016-01-27 19:17:20 +00003306 unlock_extent_cached(tree, cur,
David Sterbae43bbe52017-12-12 21:43:52 +01003307 cur + iosize - 1, &cached);
Chris Masond1310b22008-01-24 16:13:08 -05003308 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003309 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003310 continue;
3311 }
3312 /* the get_extent function already copied into the page */
Chris Mason9655d292009-09-02 15:22:30 -04003313 if (test_range_bit(tree, cur, cur_end,
3314 EXTENT_UPTODATE, 1, NULL)) {
Chris Masona1b32a52008-09-05 16:09:51 -04003315 check_page_uptodate(tree, page);
Filipe Manana7f042a82016-01-27 19:17:20 +00003316 unlock_extent(tree, cur, cur + iosize - 1);
Chris Masond1310b22008-01-24 16:13:08 -05003317 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003318 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003319 continue;
3320 }
Chris Mason70dec802008-01-29 09:59:12 -05003321 /* we have an inline extent but it didn't get marked up
3322 * to date. Error out
3323 */
3324 if (block_start == EXTENT_MAP_INLINE) {
3325 SetPageError(page);
Filipe Manana7f042a82016-01-27 19:17:20 +00003326 unlock_extent(tree, cur, cur + iosize - 1);
Chris Mason70dec802008-01-29 09:59:12 -05003327 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003328 pg_offset += iosize;
Chris Mason70dec802008-01-29 09:59:12 -05003329 continue;
3330 }
Chris Masond1310b22008-01-24 16:13:08 -05003331
David Sterba0ceb34b2020-02-05 19:09:28 +01003332 ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
David Sterba6273b7f2017-10-04 17:30:11 +02003333 page, offset, disk_io_size,
David Sterbafa17ed02019-10-03 17:29:05 +02003334 pg_offset, bio,
Chris Masonc8b97812008-10-29 14:49:59 -04003335 end_bio_extent_readpage, mirror_num,
3336 *bio_flags,
Filipe Manana005efed2015-09-14 09:09:31 +01003337 this_bio_flag,
3338 force_bio_submit);
Josef Bacikc8f2f242013-02-11 11:33:00 -05003339 if (!ret) {
3340 nr++;
3341 *bio_flags = this_bio_flag;
3342 } else {
Chris Masond1310b22008-01-24 16:13:08 -05003343 SetPageError(page);
Filipe Manana7f042a82016-01-27 19:17:20 +00003344 unlock_extent(tree, cur, cur + iosize - 1);
Liu Bobaf863b2016-07-11 10:39:07 -07003345 goto out;
Josef Bacikedd33c92012-10-05 16:40:32 -04003346 }
Chris Masond1310b22008-01-24 16:13:08 -05003347 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003348 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003349 }
Dan Magenheimer90a887c2011-05-26 10:01:56 -06003350out:
Chris Masond1310b22008-01-24 16:13:08 -05003351 if (!nr) {
3352 if (!PageError(page))
3353 SetPageUptodate(page);
3354 unlock_page(page);
3355 }
Liu Bobaf863b2016-07-11 10:39:07 -07003356 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05003357}
3358
David Sterbab6660e82020-02-05 19:09:40 +01003359static inline void contiguous_readpages(struct page *pages[], int nr_pages,
Miao Xie99740902013-07-25 19:22:36 +08003360 u64 start, u64 end,
Miao Xie125bac012013-07-25 19:22:37 +08003361 struct extent_map **em_cached,
Nikolay Borisovd3fac6b2017-10-24 11:50:39 +03003362 struct bio **bio,
Mike Christie1f7ad752016-06-05 14:31:51 -05003363 unsigned long *bio_flags,
Filipe Manana808f80b2015-09-28 09:56:26 +01003364 u64 *prev_em_start)
Miao Xie99740902013-07-25 19:22:36 +08003365{
Nikolay Borisov23d31bd2019-05-07 10:19:23 +03003366 struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
Miao Xie99740902013-07-25 19:22:36 +08003367 int index;
3368
David Sterbab272ae22020-02-05 19:09:33 +01003369 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
Miao Xie99740902013-07-25 19:22:36 +08003370
3371 for (index = 0; index < nr_pages; index++) {
David Sterbaf657a312020-02-05 19:09:42 +01003372 __do_readpage(pages[index], btrfs_get_extent, em_cached,
Jens Axboe5e9d3982018-08-17 15:45:39 -07003373 bio, 0, bio_flags, REQ_RAHEAD, prev_em_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003374 put_page(pages[index]);
Miao Xie99740902013-07-25 19:22:36 +08003375 }
3376}
3377
David Sterba0d44fea2020-02-05 19:09:37 +01003378static int __extent_read_full_page(struct page *page,
Miao Xie99740902013-07-25 19:22:36 +08003379 get_extent_t *get_extent,
3380 struct bio **bio, int mirror_num,
David Sterbaf1c77c52017-06-06 19:03:49 +02003381 unsigned long *bio_flags,
3382 unsigned int read_flags)
Miao Xie99740902013-07-25 19:22:36 +08003383{
Nikolay Borisov23d31bd2019-05-07 10:19:23 +03003384 struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
Miao Xie99740902013-07-25 19:22:36 +08003385 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003386 u64 end = start + PAGE_SIZE - 1;
Miao Xie99740902013-07-25 19:22:36 +08003387 int ret;
3388
David Sterbab272ae22020-02-05 19:09:33 +01003389 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
Miao Xie99740902013-07-25 19:22:36 +08003390
David Sterbaf657a312020-02-05 19:09:42 +01003391 ret = __do_readpage(page, get_extent, NULL, bio, mirror_num,
Mike Christie1f7ad752016-06-05 14:31:51 -05003392 bio_flags, read_flags, NULL);
Miao Xie99740902013-07-25 19:22:36 +08003393 return ret;
3394}
3395
David Sterba71ad38b2020-02-05 19:09:35 +01003396int extent_read_full_page(struct page *page, get_extent_t *get_extent,
3397 int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05003398{
3399 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04003400 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003401 int ret;
3402
David Sterba0d44fea2020-02-05 19:09:37 +01003403 ret = __extent_read_full_page(page, get_extent, &bio, mirror_num,
Mike Christie1f7ad752016-06-05 14:31:51 -05003404 &bio_flags, 0);
Chris Masond1310b22008-01-24 16:13:08 -05003405 if (bio)
Mike Christie1f7ad752016-06-05 14:31:51 -05003406 ret = submit_one_bio(bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05003407 return ret;
3408}
Chris Masond1310b22008-01-24 16:13:08 -05003409
David Sterba3d4b9492017-02-10 19:33:41 +01003410static void update_nr_written(struct writeback_control *wbc,
Liu Boa91326672016-03-07 16:56:21 -08003411 unsigned long nr_written)
Chris Mason11c83492009-04-20 15:50:09 -04003412{
3413 wbc->nr_to_write -= nr_written;
Chris Mason11c83492009-04-20 15:50:09 -04003414}
3415
Chris Masond1310b22008-01-24 16:13:08 -05003416/*
Chris Mason40f76582014-05-21 13:35:51 -07003417 * helper for __extent_writepage, doing all of the delayed allocation setup.
3418 *
Nikolay Borisov5eaad972018-11-01 14:09:46 +02003419 * This returns 1 if btrfs_run_delalloc_range function did all the work required
Chris Mason40f76582014-05-21 13:35:51 -07003420 * to write the page (copy into inline extent). In this case the IO has
3421 * been started and the page is already unlocked.
3422 *
3423 * This returns 0 if all went well (page still locked)
3424 * This returns < 0 if there were errors (page still locked)
Chris Masond1310b22008-01-24 16:13:08 -05003425 */
Chris Mason40f76582014-05-21 13:35:51 -07003426static noinline_for_stack int writepage_delalloc(struct inode *inode,
Nikolay Borisov8cc02372018-11-08 10:18:07 +02003427 struct page *page, struct writeback_control *wbc,
3428 u64 delalloc_start, unsigned long *nr_written)
Chris Masond1310b22008-01-24 16:13:08 -05003429{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003430 u64 page_end = delalloc_start + PAGE_SIZE - 1;
Lu Fengqi3522e902018-11-29 11:33:38 +08003431 bool found;
Chris Mason40f76582014-05-21 13:35:51 -07003432 u64 delalloc_to_write = 0;
3433 u64 delalloc_end = 0;
3434 int ret;
3435 int page_started = 0;
3436
Chris Mason40f76582014-05-21 13:35:51 -07003437
3438 while (delalloc_end < page_end) {
Goldwyn Rodrigues99780592019-06-21 10:02:54 -05003439 found = find_lock_delalloc_range(inode, page,
Chris Mason40f76582014-05-21 13:35:51 -07003440 &delalloc_start,
Nikolay Borisov917aace2018-10-26 14:43:20 +03003441 &delalloc_end);
Lu Fengqi3522e902018-11-29 11:33:38 +08003442 if (!found) {
Chris Mason40f76582014-05-21 13:35:51 -07003443 delalloc_start = delalloc_end + 1;
3444 continue;
3445 }
Nikolay Borisov5eaad972018-11-01 14:09:46 +02003446 ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
3447 delalloc_end, &page_started, nr_written, wbc);
Chris Mason40f76582014-05-21 13:35:51 -07003448 if (ret) {
3449 SetPageError(page);
Nikolay Borisov5eaad972018-11-01 14:09:46 +02003450 /*
3451 * btrfs_run_delalloc_range should return < 0 for error
3452 * but just in case, we use > 0 here meaning the IO is
3453 * started, so we don't want to return > 0 unless
3454 * things are going well.
Chris Mason40f76582014-05-21 13:35:51 -07003455 */
3456 ret = ret < 0 ? ret : -EIO;
3457 goto done;
3458 }
3459 /*
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03003460 * delalloc_end is already one less than the total length, so
3461 * we don't subtract one from PAGE_SIZE
Chris Mason40f76582014-05-21 13:35:51 -07003462 */
3463 delalloc_to_write += (delalloc_end - delalloc_start +
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03003464 PAGE_SIZE) >> PAGE_SHIFT;
Chris Mason40f76582014-05-21 13:35:51 -07003465 delalloc_start = delalloc_end + 1;
3466 }
3467 if (wbc->nr_to_write < delalloc_to_write) {
3468 int thresh = 8192;
3469
3470 if (delalloc_to_write < thresh * 2)
3471 thresh = delalloc_to_write;
3472 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3473 thresh);
3474 }
3475
3476 /* did the fill delalloc function already unlock and start
3477 * the IO?
3478 */
3479 if (page_started) {
3480 /*
3481 * we've unlocked the page, so we can't update
3482 * the mapping's writeback index, just update
3483 * nr_to_write.
3484 */
3485 wbc->nr_to_write -= *nr_written;
3486 return 1;
3487 }
3488
3489 ret = 0;
3490
3491done:
3492 return ret;
3493}
3494
3495/*
3496 * helper for __extent_writepage. This calls the writepage start hooks,
3497 * and does the loop to map the page into extents and bios.
3498 *
3499 * We return 1 if the IO is started and the page is unlocked,
3500 * 0 if all went well (page still locked)
3501 * < 0 if there were errors (page still locked)
3502 */
3503static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3504 struct page *page,
3505 struct writeback_control *wbc,
3506 struct extent_page_data *epd,
3507 loff_t i_size,
3508 unsigned long nr_written,
David Sterba57e5ffe2019-10-29 18:28:55 +01003509 int *nr_ret)
Chris Mason40f76582014-05-21 13:35:51 -07003510{
David Sterba45b08402020-02-05 19:09:26 +01003511 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
Miao Xie4eee4fa2012-12-21 09:17:45 +00003512 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003513 u64 page_end = start + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05003514 u64 end;
3515 u64 cur = start;
3516 u64 extent_offset;
Chris Masond1310b22008-01-24 16:13:08 -05003517 u64 block_start;
3518 u64 iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003519 struct extent_map *em;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003520 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003521 size_t blocksize;
Chris Mason40f76582014-05-21 13:35:51 -07003522 int ret = 0;
3523 int nr = 0;
David Sterba57e5ffe2019-10-29 18:28:55 +01003524 const unsigned int write_flags = wbc_to_write_flags(wbc);
Chris Mason40f76582014-05-21 13:35:51 -07003525 bool compressed;
Chris Masond1310b22008-01-24 16:13:08 -05003526
Nikolay Borisovd75855b2018-11-01 14:09:47 +02003527 ret = btrfs_writepage_cow_fixup(page, start, page_end);
3528 if (ret) {
3529 /* Fixup worker will requeue */
Josef Bacik5ab58052020-01-21 11:51:43 -05003530 redirty_page_for_writepage(wbc, page);
Nikolay Borisovd75855b2018-11-01 14:09:47 +02003531 update_nr_written(wbc, nr_written);
3532 unlock_page(page);
3533 return 1;
Chris Mason247e7432008-07-17 12:53:51 -04003534 }
3535
Chris Mason11c83492009-04-20 15:50:09 -04003536 /*
3537 * we don't want to touch the inode after unlocking the page,
3538 * so we update the mapping writeback index now
3539 */
David Sterba3d4b9492017-02-10 19:33:41 +01003540 update_nr_written(wbc, nr_written + 1);
Chris Mason771ed682008-11-06 22:02:51 -05003541
Chris Masond1310b22008-01-24 16:13:08 -05003542 end = page_end;
Chris Masond1310b22008-01-24 16:13:08 -05003543 blocksize = inode->i_sb->s_blocksize;
3544
3545 while (cur <= end) {
Chris Mason40f76582014-05-21 13:35:51 -07003546 u64 em_end;
David Sterba6273b7f2017-10-04 17:30:11 +02003547 u64 offset;
David Sterba58409ed2016-05-04 11:46:10 +02003548
Chris Mason40f76582014-05-21 13:35:51 -07003549 if (cur >= i_size) {
Nikolay Borisov7087a9d2018-11-01 14:09:48 +02003550 btrfs_writepage_endio_finish_ordered(page, cur,
Nikolay Borisovc6297322018-11-08 10:18:08 +02003551 page_end, 1);
Chris Masond1310b22008-01-24 16:13:08 -05003552 break;
3553 }
Omar Sandoval39b07b52019-12-02 17:34:23 -08003554 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur,
3555 end - cur + 1);
David Sterbac7040052011-04-19 18:00:01 +02003556 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05003557 SetPageError(page);
Filipe Manana61391d52014-05-09 17:17:40 +01003558 ret = PTR_ERR_OR_ZERO(em);
Chris Masond1310b22008-01-24 16:13:08 -05003559 break;
3560 }
3561
3562 extent_offset = cur - em->start;
Chris Mason40f76582014-05-21 13:35:51 -07003563 em_end = extent_map_end(em);
3564 BUG_ON(em_end <= cur);
Chris Masond1310b22008-01-24 16:13:08 -05003565 BUG_ON(end < cur);
Chris Mason40f76582014-05-21 13:35:51 -07003566 iosize = min(em_end - cur, end - cur + 1);
Qu Wenruofda28322013-02-26 08:10:22 +00003567 iosize = ALIGN(iosize, blocksize);
David Sterba6273b7f2017-10-04 17:30:11 +02003568 offset = em->block_start + extent_offset;
Chris Masond1310b22008-01-24 16:13:08 -05003569 block_start = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04003570 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
Chris Masond1310b22008-01-24 16:13:08 -05003571 free_extent_map(em);
3572 em = NULL;
3573
Chris Masonc8b97812008-10-29 14:49:59 -04003574 /*
3575 * compressed and inline extents are written through other
3576 * paths in the FS
3577 */
3578 if (compressed || block_start == EXTENT_MAP_HOLE ||
Chris Masond1310b22008-01-24 16:13:08 -05003579 block_start == EXTENT_MAP_INLINE) {
Omar Sandovalc8b04032019-12-02 17:34:24 -08003580 if (compressed)
Chris Masonc8b97812008-10-29 14:49:59 -04003581 nr++;
Omar Sandovalc8b04032019-12-02 17:34:24 -08003582 else
3583 btrfs_writepage_endio_finish_ordered(page, cur,
3584 cur + iosize - 1, 1);
Chris Masonc8b97812008-10-29 14:49:59 -04003585 cur += iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003586 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003587 continue;
3588 }
Chris Masonc8b97812008-10-29 14:49:59 -04003589
David Sterba5cdc84b2018-07-18 20:32:52 +02003590 btrfs_set_range_writeback(tree, cur, cur + iosize - 1);
David Sterba58409ed2016-05-04 11:46:10 +02003591 if (!PageWriteback(page)) {
3592 btrfs_err(BTRFS_I(inode)->root->fs_info,
3593 "page %lu not writeback, cur %llu end %llu",
3594 page->index, cur, end);
Chris Masond1310b22008-01-24 16:13:08 -05003595 }
David Sterba58409ed2016-05-04 11:46:10 +02003596
David Sterba0ceb34b2020-02-05 19:09:28 +01003597 ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
David Sterba6273b7f2017-10-04 17:30:11 +02003598 page, offset, iosize, pg_offset,
David Sterbafa17ed02019-10-03 17:29:05 +02003599 &epd->bio,
David Sterba58409ed2016-05-04 11:46:10 +02003600 end_bio_extent_writepage,
3601 0, 0, 0, false);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09003602 if (ret) {
Chris Masond1310b22008-01-24 16:13:08 -05003603 SetPageError(page);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09003604 if (PageWriteback(page))
3605 end_page_writeback(page);
3606 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04003607
Chris Masond1310b22008-01-24 16:13:08 -05003608 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003609 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003610 nr++;
3611 }
Chris Mason40f76582014-05-21 13:35:51 -07003612 *nr_ret = nr;
Chris Mason40f76582014-05-21 13:35:51 -07003613 return ret;
3614}
3615
3616/*
3617 * the writepage semantics are similar to regular writepage. extent
3618 * records are inserted to lock ranges in the tree, and as dirty areas
3619 * are found, they are marked writeback. Then the lock bits are removed
3620 * and the end_io handler clears the writeback ranges
Qu Wenruo30659762019-03-20 14:27:42 +08003621 *
3622 * Return 0 if everything goes well.
3623 * Return <0 for error.
Chris Mason40f76582014-05-21 13:35:51 -07003624 */
3625static int __extent_writepage(struct page *page, struct writeback_control *wbc,
David Sterbaaab6e9e2017-11-30 18:00:02 +01003626 struct extent_page_data *epd)
Chris Mason40f76582014-05-21 13:35:51 -07003627{
3628 struct inode *inode = page->mapping->host;
Chris Mason40f76582014-05-21 13:35:51 -07003629 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003630 u64 page_end = start + PAGE_SIZE - 1;
Chris Mason40f76582014-05-21 13:35:51 -07003631 int ret;
3632 int nr = 0;
Omar Sandovaleb70d222019-12-02 17:34:20 -08003633 size_t pg_offset;
Chris Mason40f76582014-05-21 13:35:51 -07003634 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003635 unsigned long end_index = i_size >> PAGE_SHIFT;
Chris Mason40f76582014-05-21 13:35:51 -07003636 unsigned long nr_written = 0;
3637
Chris Mason40f76582014-05-21 13:35:51 -07003638 trace___extent_writepage(page, inode, wbc);
3639
3640 WARN_ON(!PageLocked(page));
3641
3642 ClearPageError(page);
3643
Johannes Thumshirn70730172018-12-05 15:23:03 +01003644 pg_offset = offset_in_page(i_size);
Chris Mason40f76582014-05-21 13:35:51 -07003645 if (page->index > end_index ||
3646 (page->index == end_index && !pg_offset)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003647 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
Chris Mason40f76582014-05-21 13:35:51 -07003648 unlock_page(page);
3649 return 0;
3650 }
3651
3652 if (page->index == end_index) {
3653 char *userpage;
3654
3655 userpage = kmap_atomic(page);
3656 memset(userpage + pg_offset, 0,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003657 PAGE_SIZE - pg_offset);
Chris Mason40f76582014-05-21 13:35:51 -07003658 kunmap_atomic(userpage);
3659 flush_dcache_page(page);
3660 }
3661
Chris Mason40f76582014-05-21 13:35:51 -07003662 set_page_extent_mapped(page);
3663
Nikolay Borisov7789a552018-11-08 10:18:06 +02003664 if (!epd->extent_locked) {
Nikolay Borisov8cc02372018-11-08 10:18:07 +02003665 ret = writepage_delalloc(inode, page, wbc, start, &nr_written);
Nikolay Borisov7789a552018-11-08 10:18:06 +02003666 if (ret == 1)
Omar Sandoval169d2c82019-12-02 17:34:21 -08003667 return 0;
Nikolay Borisov7789a552018-11-08 10:18:06 +02003668 if (ret)
3669 goto done;
3670 }
Chris Mason40f76582014-05-21 13:35:51 -07003671
3672 ret = __extent_writepage_io(inode, page, wbc, epd,
David Sterba57e5ffe2019-10-29 18:28:55 +01003673 i_size, nr_written, &nr);
Chris Mason40f76582014-05-21 13:35:51 -07003674 if (ret == 1)
Omar Sandoval169d2c82019-12-02 17:34:21 -08003675 return 0;
Chris Mason40f76582014-05-21 13:35:51 -07003676
3677done:
Chris Masond1310b22008-01-24 16:13:08 -05003678 if (nr == 0) {
3679 /* make sure the mapping tag for page dirty gets cleared */
3680 set_page_writeback(page);
3681 end_page_writeback(page);
3682 }
Filipe Manana61391d52014-05-09 17:17:40 +01003683 if (PageError(page)) {
3684 ret = ret < 0 ? ret : -EIO;
3685 end_extent_writepage(page, ret, start, page_end);
3686 }
Chris Masond1310b22008-01-24 16:13:08 -05003687 unlock_page(page);
Qu Wenruo30659762019-03-20 14:27:42 +08003688 ASSERT(ret <= 0);
Chris Mason40f76582014-05-21 13:35:51 -07003689 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05003690}
3691
Josef Bacikfd8b2b62013-04-24 16:41:19 -04003692void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003693{
NeilBrown74316202014-07-07 15:16:04 +10003694 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3695 TASK_UNINTERRUPTIBLE);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003696}
3697
Filipe Manana18dfa712019-09-11 17:42:00 +01003698static void end_extent_buffer_writeback(struct extent_buffer *eb)
3699{
3700 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3701 smp_mb__after_atomic();
3702 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3703}
3704
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003705/*
3706 * Lock eb pages and flush the bio if we can't the locks
3707 *
3708 * Return 0 if nothing went wrong
3709 * Return >0 is same as 0, except bio is not submitted
3710 * Return <0 if something went wrong, no page is locked
3711 */
David Sterba9df76fb2019-03-20 11:21:41 +01003712static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
Chris Mason0e378df2014-05-19 20:55:27 -07003713 struct extent_page_data *epd)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003714{
David Sterba9df76fb2019-03-20 11:21:41 +01003715 struct btrfs_fs_info *fs_info = eb->fs_info;
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003716 int i, num_pages, failed_page_nr;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003717 int flush = 0;
3718 int ret = 0;
3719
3720 if (!btrfs_try_tree_write_lock(eb)) {
Qu Wenruof4340622019-03-20 14:27:41 +08003721 ret = flush_write_bio(epd);
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003722 if (ret < 0)
3723 return ret;
3724 flush = 1;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003725 btrfs_tree_lock(eb);
3726 }
3727
3728 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3729 btrfs_tree_unlock(eb);
3730 if (!epd->sync_io)
3731 return 0;
3732 if (!flush) {
Qu Wenruof4340622019-03-20 14:27:41 +08003733 ret = flush_write_bio(epd);
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003734 if (ret < 0)
3735 return ret;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003736 flush = 1;
3737 }
Chris Masona098d8e82012-03-21 12:09:56 -04003738 while (1) {
3739 wait_on_extent_buffer_writeback(eb);
3740 btrfs_tree_lock(eb);
3741 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3742 break;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003743 btrfs_tree_unlock(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003744 }
3745 }
3746
Josef Bacik51561ff2012-07-20 16:25:24 -04003747 /*
3748 * We need to do this to prevent races in people who check if the eb is
3749 * under IO since we can end up having no IO bits set for a short period
3750 * of time.
3751 */
3752 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003753 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3754 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
Josef Bacik51561ff2012-07-20 16:25:24 -04003755 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003756 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
Nikolay Borisov104b4e52017-06-20 21:01:20 +03003757 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3758 -eb->len,
3759 fs_info->dirty_metadata_batch);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003760 ret = 1;
Josef Bacik51561ff2012-07-20 16:25:24 -04003761 } else {
3762 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003763 }
3764
3765 btrfs_tree_unlock(eb);
3766
3767 if (!ret)
3768 return ret;
3769
David Sterba65ad0102018-06-29 10:56:49 +02003770 num_pages = num_extent_pages(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003771 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02003772 struct page *p = eb->pages[i];
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003773
3774 if (!trylock_page(p)) {
3775 if (!flush) {
Filipe Manana18dfa712019-09-11 17:42:00 +01003776 int err;
3777
3778 err = flush_write_bio(epd);
3779 if (err < 0) {
3780 ret = err;
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003781 failed_page_nr = i;
3782 goto err_unlock;
3783 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003784 flush = 1;
3785 }
3786 lock_page(p);
3787 }
3788 }
3789
3790 return ret;
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003791err_unlock:
3792 /* Unlock already locked pages */
3793 for (i = 0; i < failed_page_nr; i++)
3794 unlock_page(eb->pages[i]);
Filipe Manana18dfa712019-09-11 17:42:00 +01003795 /*
3796 * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
3797 * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
3798 * be made and undo everything done before.
3799 */
3800 btrfs_tree_lock(eb);
3801 spin_lock(&eb->refs_lock);
3802 set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3803 end_extent_buffer_writeback(eb);
3804 spin_unlock(&eb->refs_lock);
3805 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
3806 fs_info->dirty_metadata_batch);
3807 btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3808 btrfs_tree_unlock(eb);
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003809 return ret;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003810}
3811
Filipe Manana656f30d2014-09-26 12:25:56 +01003812static void set_btree_ioerr(struct page *page)
3813{
3814 struct extent_buffer *eb = (struct extent_buffer *)page->private;
Dennis Zhoueb5b64f2019-09-13 14:54:07 +01003815 struct btrfs_fs_info *fs_info;
Filipe Manana656f30d2014-09-26 12:25:56 +01003816
3817 SetPageError(page);
3818 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3819 return;
3820
3821 /*
Dennis Zhoueb5b64f2019-09-13 14:54:07 +01003822 * If we error out, we should add back the dirty_metadata_bytes
3823 * to make it consistent.
3824 */
3825 fs_info = eb->fs_info;
3826 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3827 eb->len, fs_info->dirty_metadata_batch);
3828
3829 /*
Filipe Manana656f30d2014-09-26 12:25:56 +01003830 * If writeback for a btree extent that doesn't belong to a log tree
3831 * failed, increment the counter transaction->eb_write_errors.
3832 * We do this because while the transaction is running and before it's
3833 * committing (when we call filemap_fdata[write|wait]_range against
3834 * the btree inode), we might have
3835 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
3836 * returns an error or an error happens during writeback, when we're
3837 * committing the transaction we wouldn't know about it, since the pages
3838 * can be no longer dirty nor marked anymore for writeback (if a
3839 * subsequent modification to the extent buffer didn't happen before the
3840 * transaction commit), which makes filemap_fdata[write|wait]_range not
3841 * able to find the pages tagged with SetPageError at transaction
3842 * commit time. So if this happens we must abort the transaction,
3843 * otherwise we commit a super block with btree roots that point to
3844 * btree nodes/leafs whose content on disk is invalid - either garbage
3845 * or the content of some node/leaf from a past generation that got
3846 * cowed or deleted and is no longer valid.
3847 *
3848 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
3849 * not be enough - we need to distinguish between log tree extents vs
3850 * non-log tree extents, and the next filemap_fdatawait_range() call
3851 * will catch and clear such errors in the mapping - and that call might
3852 * be from a log sync and not from a transaction commit. Also, checking
3853 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
3854 * not done and would not be reliable - the eb might have been released
3855 * from memory and reading it back again means that flag would not be
3856 * set (since it's a runtime flag, not persisted on disk).
3857 *
3858 * Using the flags below in the btree inode also makes us achieve the
3859 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
3860 * writeback for all dirty pages and before filemap_fdatawait_range()
3861 * is called, the writeback for all dirty pages had already finished
3862 * with errors - because we were not using AS_EIO/AS_ENOSPC,
3863 * filemap_fdatawait_range() would return success, as it could not know
3864 * that writeback errors happened (the pages were no longer tagged for
3865 * writeback).
3866 */
3867 switch (eb->log_index) {
3868 case -1:
Josef Bacikafcdd122016-09-02 15:40:02 -04003869 set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags);
Filipe Manana656f30d2014-09-26 12:25:56 +01003870 break;
3871 case 0:
Josef Bacikafcdd122016-09-02 15:40:02 -04003872 set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags);
Filipe Manana656f30d2014-09-26 12:25:56 +01003873 break;
3874 case 1:
Josef Bacikafcdd122016-09-02 15:40:02 -04003875 set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags);
Filipe Manana656f30d2014-09-26 12:25:56 +01003876 break;
3877 default:
3878 BUG(); /* unexpected, logic error */
3879 }
3880}
3881
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02003882static void end_bio_extent_buffer_writepage(struct bio *bio)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003883{
Kent Overstreet2c30c712013-11-07 12:20:26 -08003884 struct bio_vec *bvec;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003885 struct extent_buffer *eb;
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02003886 int done;
Ming Lei6dc4f102019-02-15 19:13:19 +08003887 struct bvec_iter_all iter_all;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003888
David Sterbac09abff2017-07-13 18:10:07 +02003889 ASSERT(!bio_flagged(bio, BIO_CLONED));
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02003890 bio_for_each_segment_all(bvec, bio, iter_all) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003891 struct page *page = bvec->bv_page;
3892
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003893 eb = (struct extent_buffer *)page->private;
3894 BUG_ON(!eb);
3895 done = atomic_dec_and_test(&eb->io_pages);
3896
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02003897 if (bio->bi_status ||
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02003898 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003899 ClearPageUptodate(page);
Filipe Manana656f30d2014-09-26 12:25:56 +01003900 set_btree_ioerr(page);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003901 }
3902
3903 end_page_writeback(page);
3904
3905 if (!done)
3906 continue;
3907
3908 end_extent_buffer_writeback(eb);
Kent Overstreet2c30c712013-11-07 12:20:26 -08003909 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003910
3911 bio_put(bio);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003912}
3913
Chris Mason0e378df2014-05-19 20:55:27 -07003914static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003915 struct writeback_control *wbc,
3916 struct extent_page_data *epd)
3917{
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003918 u64 offset = eb->start;
Liu Bo851cd172016-09-23 13:44:44 -07003919 u32 nritems;
David Sterbacc5e31a2018-03-01 18:20:27 +01003920 int i, num_pages;
Liu Bo851cd172016-09-23 13:44:44 -07003921 unsigned long start, end;
Liu Boff40adf2017-08-24 18:19:48 -06003922 unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
Josef Bacikd7dbe9e2012-04-23 14:00:51 -04003923 int ret = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003924
Filipe Manana656f30d2014-09-26 12:25:56 +01003925 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
David Sterba65ad0102018-06-29 10:56:49 +02003926 num_pages = num_extent_pages(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003927 atomic_set(&eb->io_pages, num_pages);
Josef Bacikde0022b2012-09-25 14:25:58 -04003928
Liu Bo851cd172016-09-23 13:44:44 -07003929 /* set btree blocks beyond nritems with 0 to avoid stale content. */
3930 nritems = btrfs_header_nritems(eb);
Liu Bo3eb548e2016-09-14 17:22:57 -07003931 if (btrfs_header_level(eb) > 0) {
Liu Bo3eb548e2016-09-14 17:22:57 -07003932 end = btrfs_node_key_ptr_offset(nritems);
3933
David Sterbab159fa22016-11-08 18:09:03 +01003934 memzero_extent_buffer(eb, end, eb->len - end);
Liu Bo851cd172016-09-23 13:44:44 -07003935 } else {
3936 /*
3937 * leaf:
3938 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
3939 */
3940 start = btrfs_item_nr_offset(nritems);
David Sterba8f881e82019-03-20 11:33:10 +01003941 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
David Sterbab159fa22016-11-08 18:09:03 +01003942 memzero_extent_buffer(eb, start, end - start);
Liu Bo3eb548e2016-09-14 17:22:57 -07003943 }
3944
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003945 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02003946 struct page *p = eb->pages[i];
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003947
3948 clear_page_dirty_for_io(p);
3949 set_page_writeback(p);
David Sterba0ceb34b2020-02-05 19:09:28 +01003950 ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
David Sterbafa17ed02019-10-03 17:29:05 +02003951 p, offset, PAGE_SIZE, 0,
David Sterbac2df8bb2017-02-10 19:29:38 +01003952 &epd->bio,
Mike Christie1f7ad752016-06-05 14:31:51 -05003953 end_bio_extent_buffer_writepage,
Liu Bo18fdc672017-09-13 12:18:22 -06003954 0, 0, 0, false);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003955 if (ret) {
Filipe Manana656f30d2014-09-26 12:25:56 +01003956 set_btree_ioerr(p);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09003957 if (PageWriteback(p))
3958 end_page_writeback(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003959 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3960 end_extent_buffer_writeback(eb);
3961 ret = -EIO;
3962 break;
3963 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003964 offset += PAGE_SIZE;
David Sterba3d4b9492017-02-10 19:33:41 +01003965 update_nr_written(wbc, 1);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003966 unlock_page(p);
3967 }
3968
3969 if (unlikely(ret)) {
3970 for (; i < num_pages; i++) {
Chris Masonbbf65cf2014-10-04 09:56:45 -07003971 struct page *p = eb->pages[i];
Liu Bo81465022014-09-23 22:22:33 +08003972 clear_page_dirty_for_io(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003973 unlock_page(p);
3974 }
3975 }
3976
3977 return ret;
3978}
3979
3980int btree_write_cache_pages(struct address_space *mapping,
3981 struct writeback_control *wbc)
3982{
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003983 struct extent_buffer *eb, *prev_eb = NULL;
3984 struct extent_page_data epd = {
3985 .bio = NULL,
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003986 .extent_locked = 0,
3987 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3988 };
Qu Wenruob3ff8f12020-02-12 14:12:44 +08003989 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003990 int ret = 0;
3991 int done = 0;
3992 int nr_to_write_done = 0;
3993 struct pagevec pvec;
3994 int nr_pages;
3995 pgoff_t index;
3996 pgoff_t end; /* Inclusive */
3997 int scanned = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -05003998 xa_mark_t tag;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003999
Mel Gorman86679822017-11-15 17:37:52 -08004000 pagevec_init(&pvec);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004001 if (wbc->range_cyclic) {
4002 index = mapping->writeback_index; /* Start from prev offset */
4003 end = -1;
Josef Bacik556755a2020-01-03 10:38:44 -05004004 /*
4005 * Start from the beginning does not need to cycle over the
4006 * range, mark it as scanned.
4007 */
4008 scanned = (index == 0);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004009 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004010 index = wbc->range_start >> PAGE_SHIFT;
4011 end = wbc->range_end >> PAGE_SHIFT;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004012 scanned = 1;
4013 }
4014 if (wbc->sync_mode == WB_SYNC_ALL)
4015 tag = PAGECACHE_TAG_TOWRITE;
4016 else
4017 tag = PAGECACHE_TAG_DIRTY;
4018retry:
4019 if (wbc->sync_mode == WB_SYNC_ALL)
4020 tag_pages_for_writeback(mapping, index, end);
4021 while (!done && !nr_to_write_done && (index <= end) &&
Jan Kara4006f432017-11-15 17:34:37 -08004022 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
Jan Kara67fd7072017-11-15 17:35:19 -08004023 tag))) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004024 unsigned i;
4025
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004026 for (i = 0; i < nr_pages; i++) {
4027 struct page *page = pvec.pages[i];
4028
4029 if (!PagePrivate(page))
4030 continue;
4031
Josef Bacikb5bae262012-09-14 13:43:01 -04004032 spin_lock(&mapping->private_lock);
4033 if (!PagePrivate(page)) {
4034 spin_unlock(&mapping->private_lock);
4035 continue;
4036 }
4037
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004038 eb = (struct extent_buffer *)page->private;
Josef Bacikb5bae262012-09-14 13:43:01 -04004039
4040 /*
4041 * Shouldn't happen and normally this would be a BUG_ON
4042 * but no sense in crashing the users box for something
4043 * we can survive anyway.
4044 */
Dulshani Gunawardhanafae7f212013-10-31 10:30:08 +05304045 if (WARN_ON(!eb)) {
Josef Bacikb5bae262012-09-14 13:43:01 -04004046 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004047 continue;
4048 }
4049
Josef Bacikb5bae262012-09-14 13:43:01 -04004050 if (eb == prev_eb) {
4051 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004052 continue;
4053 }
4054
Josef Bacikb5bae262012-09-14 13:43:01 -04004055 ret = atomic_inc_not_zero(&eb->refs);
4056 spin_unlock(&mapping->private_lock);
4057 if (!ret)
4058 continue;
4059
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004060 prev_eb = eb;
David Sterba9df76fb2019-03-20 11:21:41 +01004061 ret = lock_extent_buffer_for_io(eb, &epd);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004062 if (!ret) {
4063 free_extent_buffer(eb);
4064 continue;
Filipe Manana0607eb1d2019-09-11 17:42:28 +01004065 } else if (ret < 0) {
4066 done = 1;
4067 free_extent_buffer(eb);
4068 break;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004069 }
4070
David Sterba0ab02062019-03-20 11:27:57 +01004071 ret = write_one_eb(eb, wbc, &epd);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004072 if (ret) {
4073 done = 1;
4074 free_extent_buffer(eb);
4075 break;
4076 }
4077 free_extent_buffer(eb);
4078
4079 /*
4080 * the filesystem may choose to bump up nr_to_write.
4081 * We have to make sure to honor the new nr_to_write
4082 * at any time
4083 */
4084 nr_to_write_done = wbc->nr_to_write <= 0;
4085 }
4086 pagevec_release(&pvec);
4087 cond_resched();
4088 }
4089 if (!scanned && !done) {
4090 /*
4091 * We hit the last page and there is more work to be done: wrap
4092 * back to the start of the file
4093 */
4094 scanned = 1;
4095 index = 0;
4096 goto retry;
4097 }
Qu Wenruo2b952ee2019-03-20 14:27:43 +08004098 ASSERT(ret <= 0);
4099 if (ret < 0) {
4100 end_write_bio(&epd, ret);
4101 return ret;
4102 }
Qu Wenruob3ff8f12020-02-12 14:12:44 +08004103 /*
4104 * If something went wrong, don't allow any metadata write bio to be
4105 * submitted.
4106 *
4107 * This would prevent use-after-free if we had dirty pages not
4108 * cleaned up, which can still happen by fuzzed images.
4109 *
4110 * - Bad extent tree
4111 * Allowing existing tree block to be allocated for other trees.
4112 *
4113 * - Log tree operations
4114 * Exiting tree blocks get allocated to log tree, bumps its
4115 * generation, then get cleaned in tree re-balance.
4116 * Such tree block will not be written back, since it's clean,
4117 * thus no WRITTEN flag set.
4118 * And after log writes back, this tree block is not traced by
4119 * any dirty extent_io_tree.
4120 *
4121 * - Offending tree block gets re-dirtied from its original owner
4122 * Since it has bumped generation, no WRITTEN flag, it can be
4123 * reused without COWing. This tree block will not be traced
4124 * by btrfs_transaction::dirty_pages.
4125 *
4126 * Now such dirty tree block will not be cleaned by any dirty
4127 * extent io tree. Thus we don't want to submit such wild eb
4128 * if the fs already has error.
4129 */
4130 if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
4131 ret = flush_write_bio(&epd);
4132 } else {
4133 ret = -EUCLEAN;
4134 end_write_bio(&epd, ret);
4135 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004136 return ret;
4137}
4138
Chris Masond1310b22008-01-24 16:13:08 -05004139/**
Chris Mason4bef0842008-09-08 11:18:08 -04004140 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
Chris Masond1310b22008-01-24 16:13:08 -05004141 * @mapping: address space structure to write
4142 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
David Sterba935db852017-06-23 04:30:28 +02004143 * @data: data passed to __extent_writepage function
Chris Masond1310b22008-01-24 16:13:08 -05004144 *
4145 * If a page is already under I/O, write_cache_pages() skips it, even
4146 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
4147 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
4148 * and msync() need to guarantee that all the data which was dirty at the time
4149 * the call was made get new I/O started against them. If wbc->sync_mode is
4150 * WB_SYNC_ALL then we were called for data integrity and we must wait for
4151 * existing IO to complete.
4152 */
David Sterba4242b642017-02-10 19:38:24 +01004153static int extent_write_cache_pages(struct address_space *mapping,
Chris Mason4bef0842008-09-08 11:18:08 -04004154 struct writeback_control *wbc,
David Sterbaaab6e9e2017-11-30 18:00:02 +01004155 struct extent_page_data *epd)
Chris Masond1310b22008-01-24 16:13:08 -05004156{
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04004157 struct inode *inode = mapping->host;
Chris Masond1310b22008-01-24 16:13:08 -05004158 int ret = 0;
4159 int done = 0;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04004160 int nr_to_write_done = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004161 struct pagevec pvec;
4162 int nr_pages;
4163 pgoff_t index;
4164 pgoff_t end; /* Inclusive */
Liu Boa91326672016-03-07 16:56:21 -08004165 pgoff_t done_index;
4166 int range_whole = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004167 int scanned = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -05004168 xa_mark_t tag;
Chris Masond1310b22008-01-24 16:13:08 -05004169
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04004170 /*
4171 * We have to hold onto the inode so that ordered extents can do their
4172 * work when the IO finishes. The alternative to this is failing to add
4173 * an ordered extent if the igrab() fails there and that is a huge pain
4174 * to deal with, so instead just hold onto the inode throughout the
4175 * writepages operation. If it fails here we are freeing up the inode
4176 * anyway and we'd rather not waste our time writing out stuff that is
4177 * going to be truncated anyway.
4178 */
4179 if (!igrab(inode))
4180 return 0;
4181
Mel Gorman86679822017-11-15 17:37:52 -08004182 pagevec_init(&pvec);
Chris Masond1310b22008-01-24 16:13:08 -05004183 if (wbc->range_cyclic) {
4184 index = mapping->writeback_index; /* Start from prev offset */
4185 end = -1;
Josef Bacik556755a2020-01-03 10:38:44 -05004186 /*
4187 * Start from the beginning does not need to cycle over the
4188 * range, mark it as scanned.
4189 */
4190 scanned = (index == 0);
Chris Masond1310b22008-01-24 16:13:08 -05004191 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004192 index = wbc->range_start >> PAGE_SHIFT;
4193 end = wbc->range_end >> PAGE_SHIFT;
Liu Boa91326672016-03-07 16:56:21 -08004194 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4195 range_whole = 1;
Chris Masond1310b22008-01-24 16:13:08 -05004196 scanned = 1;
4197 }
Ethan Lien3cd24c62018-11-01 14:49:03 +08004198
4199 /*
4200 * We do the tagged writepage as long as the snapshot flush bit is set
4201 * and we are the first one who do the filemap_flush() on this inode.
4202 *
4203 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
4204 * not race in and drop the bit.
4205 */
4206 if (range_whole && wbc->nr_to_write == LONG_MAX &&
4207 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
4208 &BTRFS_I(inode)->runtime_flags))
4209 wbc->tagged_writepages = 1;
4210
4211 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
Josef Bacikf7aaa062011-07-15 21:26:38 +00004212 tag = PAGECACHE_TAG_TOWRITE;
4213 else
4214 tag = PAGECACHE_TAG_DIRTY;
Chris Masond1310b22008-01-24 16:13:08 -05004215retry:
Ethan Lien3cd24c62018-11-01 14:49:03 +08004216 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
Josef Bacikf7aaa062011-07-15 21:26:38 +00004217 tag_pages_for_writeback(mapping, index, end);
Liu Boa91326672016-03-07 16:56:21 -08004218 done_index = index;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04004219 while (!done && !nr_to_write_done && (index <= end) &&
Jan Kara67fd7072017-11-15 17:35:19 -08004220 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
4221 &index, end, tag))) {
Chris Masond1310b22008-01-24 16:13:08 -05004222 unsigned i;
4223
Chris Masond1310b22008-01-24 16:13:08 -05004224 for (i = 0; i < nr_pages; i++) {
4225 struct page *page = pvec.pages[i];
4226
Tejun Heof7bddf12019-10-03 07:27:13 -07004227 done_index = page->index + 1;
Chris Masond1310b22008-01-24 16:13:08 -05004228 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07004229 * At this point we hold neither the i_pages lock nor
4230 * the page lock: the page may be truncated or
4231 * invalidated (changing page->mapping to NULL),
4232 * or even swizzled back from swapper_space to
4233 * tmpfs file mapping
Chris Masond1310b22008-01-24 16:13:08 -05004234 */
Josef Bacikc8f2f242013-02-11 11:33:00 -05004235 if (!trylock_page(page)) {
Qu Wenruof4340622019-03-20 14:27:41 +08004236 ret = flush_write_bio(epd);
4237 BUG_ON(ret < 0);
Josef Bacikc8f2f242013-02-11 11:33:00 -05004238 lock_page(page);
Chris Mason01d658f2011-11-01 10:08:06 -04004239 }
Chris Masond1310b22008-01-24 16:13:08 -05004240
4241 if (unlikely(page->mapping != mapping)) {
4242 unlock_page(page);
4243 continue;
4244 }
4245
Chris Masond2c3f4f2008-11-19 12:44:22 -05004246 if (wbc->sync_mode != WB_SYNC_NONE) {
Qu Wenruof4340622019-03-20 14:27:41 +08004247 if (PageWriteback(page)) {
4248 ret = flush_write_bio(epd);
4249 BUG_ON(ret < 0);
4250 }
Chris Masond1310b22008-01-24 16:13:08 -05004251 wait_on_page_writeback(page);
Chris Masond2c3f4f2008-11-19 12:44:22 -05004252 }
Chris Masond1310b22008-01-24 16:13:08 -05004253
4254 if (PageWriteback(page) ||
4255 !clear_page_dirty_for_io(page)) {
4256 unlock_page(page);
4257 continue;
4258 }
4259
David Sterbaaab6e9e2017-11-30 18:00:02 +01004260 ret = __extent_writepage(page, wbc, epd);
Liu Boa91326672016-03-07 16:56:21 -08004261 if (ret < 0) {
Liu Boa91326672016-03-07 16:56:21 -08004262 done = 1;
4263 break;
4264 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04004265
4266 /*
4267 * the filesystem may choose to bump up nr_to_write.
4268 * We have to make sure to honor the new nr_to_write
4269 * at any time
4270 */
4271 nr_to_write_done = wbc->nr_to_write <= 0;
Chris Masond1310b22008-01-24 16:13:08 -05004272 }
4273 pagevec_release(&pvec);
4274 cond_resched();
4275 }
Liu Bo894b36e2016-03-07 16:56:22 -08004276 if (!scanned && !done) {
Chris Masond1310b22008-01-24 16:13:08 -05004277 /*
4278 * We hit the last page and there is more work to be done: wrap
4279 * back to the start of the file
4280 */
4281 scanned = 1;
4282 index = 0;
Josef Bacik42ffb0b2020-01-23 15:33:02 -05004283
4284 /*
4285 * If we're looping we could run into a page that is locked by a
4286 * writer and that writer could be waiting on writeback for a
4287 * page in our current bio, and thus deadlock, so flush the
4288 * write bio here.
4289 */
4290 ret = flush_write_bio(epd);
4291 if (!ret)
4292 goto retry;
Chris Masond1310b22008-01-24 16:13:08 -05004293 }
Liu Boa91326672016-03-07 16:56:21 -08004294
4295 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
4296 mapping->writeback_index = done_index;
4297
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04004298 btrfs_add_delayed_iput(inode);
Liu Bo894b36e2016-03-07 16:56:22 -08004299 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05004300}
Chris Masond1310b22008-01-24 16:13:08 -05004301
Nikolay Borisov0a9b0e52017-12-08 15:55:59 +02004302int extent_write_full_page(struct page *page, struct writeback_control *wbc)
Chris Masond1310b22008-01-24 16:13:08 -05004303{
4304 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05004305 struct extent_page_data epd = {
4306 .bio = NULL,
Chris Mason771ed682008-11-06 22:02:51 -05004307 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04004308 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05004309 };
Chris Masond1310b22008-01-24 16:13:08 -05004310
Chris Masond1310b22008-01-24 16:13:08 -05004311 ret = __extent_writepage(page, wbc, &epd);
Qu Wenruo30659762019-03-20 14:27:42 +08004312 ASSERT(ret <= 0);
4313 if (ret < 0) {
4314 end_write_bio(&epd, ret);
4315 return ret;
4316 }
Chris Masond1310b22008-01-24 16:13:08 -05004317
Qu Wenruo30659762019-03-20 14:27:42 +08004318 ret = flush_write_bio(&epd);
4319 ASSERT(ret <= 0);
Chris Masond1310b22008-01-24 16:13:08 -05004320 return ret;
4321}
Chris Masond1310b22008-01-24 16:13:08 -05004322
Nikolay Borisov5e3ee232017-12-08 15:55:58 +02004323int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
Chris Mason771ed682008-11-06 22:02:51 -05004324 int mode)
4325{
4326 int ret = 0;
4327 struct address_space *mapping = inode->i_mapping;
4328 struct page *page;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004329 unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4330 PAGE_SHIFT;
Chris Mason771ed682008-11-06 22:02:51 -05004331
4332 struct extent_page_data epd = {
4333 .bio = NULL,
Chris Mason771ed682008-11-06 22:02:51 -05004334 .extent_locked = 1,
Chris Masonffbd5172009-04-20 15:50:09 -04004335 .sync_io = mode == WB_SYNC_ALL,
Chris Mason771ed682008-11-06 22:02:51 -05004336 };
4337 struct writeback_control wbc_writepages = {
Chris Mason771ed682008-11-06 22:02:51 -05004338 .sync_mode = mode,
Chris Mason771ed682008-11-06 22:02:51 -05004339 .nr_to_write = nr_pages * 2,
4340 .range_start = start,
4341 .range_end = end + 1,
Chris Masonec39f762019-07-10 12:28:17 -07004342 /* We're called from an async helper function */
4343 .punt_to_cgroup = 1,
4344 .no_cgroup_owner = 1,
Chris Mason771ed682008-11-06 22:02:51 -05004345 };
4346
Chris Masondbb70be2019-07-10 12:28:18 -07004347 wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
Chris Masond3977122009-01-05 21:25:51 -05004348 while (start <= end) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004349 page = find_get_page(mapping, start >> PAGE_SHIFT);
Chris Mason771ed682008-11-06 22:02:51 -05004350 if (clear_page_dirty_for_io(page))
4351 ret = __extent_writepage(page, &wbc_writepages, &epd);
4352 else {
Nikolay Borisov7087a9d2018-11-01 14:09:48 +02004353 btrfs_writepage_endio_finish_ordered(page, start,
Nikolay Borisovc6297322018-11-08 10:18:08 +02004354 start + PAGE_SIZE - 1, 1);
Chris Mason771ed682008-11-06 22:02:51 -05004355 unlock_page(page);
4356 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004357 put_page(page);
4358 start += PAGE_SIZE;
Chris Mason771ed682008-11-06 22:02:51 -05004359 }
4360
Qu Wenruo02c6db42019-03-20 14:27:45 +08004361 ASSERT(ret <= 0);
Chris Masondbb70be2019-07-10 12:28:18 -07004362 if (ret == 0)
4363 ret = flush_write_bio(&epd);
4364 else
Qu Wenruo02c6db42019-03-20 14:27:45 +08004365 end_write_bio(&epd, ret);
Chris Masondbb70be2019-07-10 12:28:18 -07004366
4367 wbc_detach_inode(&wbc_writepages);
Chris Mason771ed682008-11-06 22:02:51 -05004368 return ret;
4369}
Chris Masond1310b22008-01-24 16:13:08 -05004370
Nikolay Borisov8ae225a2018-04-19 10:46:38 +03004371int extent_writepages(struct address_space *mapping,
Chris Masond1310b22008-01-24 16:13:08 -05004372 struct writeback_control *wbc)
4373{
4374 int ret = 0;
4375 struct extent_page_data epd = {
4376 .bio = NULL,
Chris Mason771ed682008-11-06 22:02:51 -05004377 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04004378 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05004379 };
4380
David Sterba935db852017-06-23 04:30:28 +02004381 ret = extent_write_cache_pages(mapping, wbc, &epd);
Qu Wenruoa2a72fb2019-03-20 14:27:48 +08004382 ASSERT(ret <= 0);
4383 if (ret < 0) {
4384 end_write_bio(&epd, ret);
4385 return ret;
4386 }
4387 ret = flush_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05004388 return ret;
4389}
Chris Masond1310b22008-01-24 16:13:08 -05004390
Nikolay Borisov2a3ff0a2018-04-19 10:46:36 +03004391int extent_readpages(struct address_space *mapping, struct list_head *pages,
4392 unsigned nr_pages)
Chris Masond1310b22008-01-24 16:13:08 -05004393{
4394 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04004395 unsigned long bio_flags = 0;
Liu Bo67c96842012-07-20 21:43:09 -06004396 struct page *pagepool[16];
Miao Xie125bac012013-07-25 19:22:37 +08004397 struct extent_map *em_cached = NULL;
Liu Bo67c96842012-07-20 21:43:09 -06004398 int nr = 0;
Filipe Manana808f80b2015-09-28 09:56:26 +01004399 u64 prev_em_start = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05004400
Nikolay Borisov61ed3a12018-11-29 18:41:31 +02004401 while (!list_empty(pages)) {
Nikolay Borisove65ef212019-03-11 09:55:38 +02004402 u64 contig_end = 0;
4403
Nikolay Borisov61ed3a12018-11-29 18:41:31 +02004404 for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) {
Nikolay Borisovf86196e2019-01-03 15:29:02 -08004405 struct page *page = lru_to_page(pages);
Chris Masond1310b22008-01-24 16:13:08 -05004406
Nikolay Borisov61ed3a12018-11-29 18:41:31 +02004407 prefetchw(&page->flags);
4408 list_del(&page->lru);
4409 if (add_to_page_cache_lru(page, mapping, page->index,
4410 readahead_gfp_mask(mapping))) {
4411 put_page(page);
Nikolay Borisove65ef212019-03-11 09:55:38 +02004412 break;
Nikolay Borisov61ed3a12018-11-29 18:41:31 +02004413 }
4414
4415 pagepool[nr++] = page;
Nikolay Borisove65ef212019-03-11 09:55:38 +02004416 contig_end = page_offset(page) + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05004417 }
Liu Bo67c96842012-07-20 21:43:09 -06004418
Nikolay Borisove65ef212019-03-11 09:55:38 +02004419 if (nr) {
4420 u64 contig_start = page_offset(pagepool[0]);
4421
4422 ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
4423
David Sterbab6660e82020-02-05 19:09:40 +01004424 contiguous_readpages(pagepool, nr, contig_start,
Nikolay Borisove65ef212019-03-11 09:55:38 +02004425 contig_end, &em_cached, &bio, &bio_flags,
4426 &prev_em_start);
4427 }
Chris Masond1310b22008-01-24 16:13:08 -05004428 }
Liu Bo67c96842012-07-20 21:43:09 -06004429
Miao Xie125bac012013-07-25 19:22:37 +08004430 if (em_cached)
4431 free_extent_map(em_cached);
4432
Chris Masond1310b22008-01-24 16:13:08 -05004433 if (bio)
Mike Christie1f7ad752016-06-05 14:31:51 -05004434 return submit_one_bio(bio, 0, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05004435 return 0;
4436}
Chris Masond1310b22008-01-24 16:13:08 -05004437
4438/*
4439 * basic invalidatepage code, this waits on any locked or writeback
4440 * ranges corresponding to the page, and then deletes any extent state
4441 * records from the tree
4442 */
4443int extent_invalidatepage(struct extent_io_tree *tree,
4444 struct page *page, unsigned long offset)
4445{
Josef Bacik2ac55d42010-02-03 19:33:23 +00004446 struct extent_state *cached_state = NULL;
Miao Xie4eee4fa2012-12-21 09:17:45 +00004447 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004448 u64 end = start + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05004449 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4450
Qu Wenruofda28322013-02-26 08:10:22 +00004451 start += ALIGN(offset, blocksize);
Chris Masond1310b22008-01-24 16:13:08 -05004452 if (start > end)
4453 return 0;
4454
David Sterbaff13db42015-12-03 14:30:40 +01004455 lock_extent_bits(tree, start, end, &cached_state);
Chris Mason1edbb732009-09-02 13:24:36 -04004456 wait_on_page_writeback(page);
Omar Sandovale1821632019-08-15 14:04:04 -07004457 clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC |
4458 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05004459 return 0;
4460}
Chris Masond1310b22008-01-24 16:13:08 -05004461
4462/*
Chris Mason7b13b7b2008-04-18 10:29:50 -04004463 * a helper for releasepage, this tests for areas of the page that
4464 * are locked or under IO and drops the related state bits if it is safe
4465 * to drop the page.
4466 */
Nikolay Borisov29c68b2d2018-04-19 10:46:35 +03004467static int try_release_extent_state(struct extent_io_tree *tree,
Eric Sandeen48a3b632013-04-25 20:41:01 +00004468 struct page *page, gfp_t mask)
Chris Mason7b13b7b2008-04-18 10:29:50 -04004469{
Miao Xie4eee4fa2012-12-21 09:17:45 +00004470 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004471 u64 end = start + PAGE_SIZE - 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004472 int ret = 1;
4473
Nikolay Borisov88826792019-03-14 15:28:31 +02004474 if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
Chris Mason7b13b7b2008-04-18 10:29:50 -04004475 ret = 0;
Nikolay Borisov88826792019-03-14 15:28:31 +02004476 } else {
Chris Mason11ef1602009-09-23 20:28:46 -04004477 /*
4478 * at this point we can safely clear everything except the
4479 * locked bit and the nodatasum bit
4480 */
David Sterba66b0c882017-10-31 16:30:47 +01004481 ret = __clear_extent_bit(tree, start, end,
Chris Mason11ef1602009-09-23 20:28:46 -04004482 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
David Sterba66b0c882017-10-31 16:30:47 +01004483 0, 0, NULL, mask, NULL);
Chris Masone3f24cc2011-02-14 12:52:08 -05004484
4485 /* if clear_extent_bit failed for enomem reasons,
4486 * we can't allow the release to continue.
4487 */
4488 if (ret < 0)
4489 ret = 0;
4490 else
4491 ret = 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004492 }
4493 return ret;
4494}
Chris Mason7b13b7b2008-04-18 10:29:50 -04004495
4496/*
Chris Masond1310b22008-01-24 16:13:08 -05004497 * a helper for releasepage. As long as there are no locked extents
4498 * in the range corresponding to the page, both state records and extent
4499 * map records are removed
4500 */
Nikolay Borisov477a30b2018-04-19 10:46:34 +03004501int try_release_extent_mapping(struct page *page, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05004502{
4503 struct extent_map *em;
Miao Xie4eee4fa2012-12-21 09:17:45 +00004504 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004505 u64 end = start + PAGE_SIZE - 1;
Filipe Mananabd3599a2018-07-12 01:36:43 +01004506 struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
4507 struct extent_io_tree *tree = &btrfs_inode->io_tree;
4508 struct extent_map_tree *map = &btrfs_inode->extent_tree;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004509
Mel Gormand0164ad2015-11-06 16:28:21 -08004510 if (gfpflags_allow_blocking(mask) &&
Byongho Leeee221842015-12-15 01:42:10 +09004511 page->mapping->host->i_size > SZ_16M) {
Yan39b56372008-02-15 10:40:50 -05004512 u64 len;
Chris Mason70dec802008-01-29 09:59:12 -05004513 while (start <= end) {
Yan39b56372008-02-15 10:40:50 -05004514 len = end - start + 1;
Chris Mason890871b2009-09-02 16:24:52 -04004515 write_lock(&map->lock);
Yan39b56372008-02-15 10:40:50 -05004516 em = lookup_extent_mapping(map, start, len);
Tsutomu Itoh285190d2012-02-16 16:23:58 +09004517 if (!em) {
Chris Mason890871b2009-09-02 16:24:52 -04004518 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05004519 break;
4520 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04004521 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4522 em->start != start) {
Chris Mason890871b2009-09-02 16:24:52 -04004523 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05004524 free_extent_map(em);
4525 break;
4526 }
4527 if (!test_range_bit(tree, em->start,
4528 extent_map_end(em) - 1,
Nikolay Borisov4e586ca2019-03-14 15:28:30 +02004529 EXTENT_LOCKED, 0, NULL)) {
Filipe Mananabd3599a2018-07-12 01:36:43 +01004530 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4531 &btrfs_inode->runtime_flags);
Chris Mason70dec802008-01-29 09:59:12 -05004532 remove_extent_mapping(map, em);
4533 /* once for the rb tree */
4534 free_extent_map(em);
4535 }
4536 start = extent_map_end(em);
Chris Mason890871b2009-09-02 16:24:52 -04004537 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05004538
4539 /* once for us */
Chris Masond1310b22008-01-24 16:13:08 -05004540 free_extent_map(em);
4541 }
Chris Masond1310b22008-01-24 16:13:08 -05004542 }
Nikolay Borisov29c68b2d2018-04-19 10:46:35 +03004543 return try_release_extent_state(tree, page, mask);
Chris Masond1310b22008-01-24 16:13:08 -05004544}
Chris Masond1310b22008-01-24 16:13:08 -05004545
Chris Masonec29ed52011-02-23 16:23:20 -05004546/*
4547 * helper function for fiemap, which doesn't want to see any holes.
4548 * This maps until we find something past 'last'
4549 */
4550static struct extent_map *get_extent_skip_holes(struct inode *inode,
David Sterbae3350e12017-06-23 04:09:57 +02004551 u64 offset, u64 last)
Chris Masonec29ed52011-02-23 16:23:20 -05004552{
Jeff Mahoneyda170662016-06-15 09:22:56 -04004553 u64 sectorsize = btrfs_inode_sectorsize(inode);
Chris Masonec29ed52011-02-23 16:23:20 -05004554 struct extent_map *em;
4555 u64 len;
4556
4557 if (offset >= last)
4558 return NULL;
4559
Dulshani Gunawardhana67871252013-10-31 10:33:04 +05304560 while (1) {
Chris Masonec29ed52011-02-23 16:23:20 -05004561 len = last - offset;
4562 if (len == 0)
4563 break;
Qu Wenruofda28322013-02-26 08:10:22 +00004564 len = ALIGN(len, sectorsize);
Nikolay Borisov4ab47a82018-12-12 09:42:32 +02004565 em = btrfs_get_extent_fiemap(BTRFS_I(inode), offset, len);
David Sterbac7040052011-04-19 18:00:01 +02004566 if (IS_ERR_OR_NULL(em))
Chris Masonec29ed52011-02-23 16:23:20 -05004567 return em;
4568
4569 /* if this isn't a hole return it */
Nikolay Borisov4a2d25c2017-11-23 10:51:43 +02004570 if (em->block_start != EXTENT_MAP_HOLE)
Chris Masonec29ed52011-02-23 16:23:20 -05004571 return em;
Chris Masonec29ed52011-02-23 16:23:20 -05004572
4573 /* this is a hole, advance to the next extent */
4574 offset = extent_map_end(em);
4575 free_extent_map(em);
4576 if (offset >= last)
4577 break;
4578 }
4579 return NULL;
4580}
4581
Qu Wenruo47518322017-04-07 10:43:15 +08004582/*
4583 * To cache previous fiemap extent
4584 *
4585 * Will be used for merging fiemap extent
4586 */
4587struct fiemap_cache {
4588 u64 offset;
4589 u64 phys;
4590 u64 len;
4591 u32 flags;
4592 bool cached;
4593};
4594
4595/*
4596 * Helper to submit fiemap extent.
4597 *
4598 * Will try to merge current fiemap extent specified by @offset, @phys,
4599 * @len and @flags with cached one.
4600 * And only when we fails to merge, cached one will be submitted as
4601 * fiemap extent.
4602 *
4603 * Return value is the same as fiemap_fill_next_extent().
4604 */
4605static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
4606 struct fiemap_cache *cache,
4607 u64 offset, u64 phys, u64 len, u32 flags)
4608{
4609 int ret = 0;
4610
4611 if (!cache->cached)
4612 goto assign;
4613
4614 /*
4615 * Sanity check, extent_fiemap() should have ensured that new
Andrea Gelmini52042d82018-11-28 12:05:13 +01004616 * fiemap extent won't overlap with cached one.
Qu Wenruo47518322017-04-07 10:43:15 +08004617 * Not recoverable.
4618 *
4619 * NOTE: Physical address can overlap, due to compression
4620 */
4621 if (cache->offset + cache->len > offset) {
4622 WARN_ON(1);
4623 return -EINVAL;
4624 }
4625
4626 /*
4627 * Only merges fiemap extents if
4628 * 1) Their logical addresses are continuous
4629 *
4630 * 2) Their physical addresses are continuous
4631 * So truly compressed (physical size smaller than logical size)
4632 * extents won't get merged with each other
4633 *
4634 * 3) Share same flags except FIEMAP_EXTENT_LAST
4635 * So regular extent won't get merged with prealloc extent
4636 */
4637 if (cache->offset + cache->len == offset &&
4638 cache->phys + cache->len == phys &&
4639 (cache->flags & ~FIEMAP_EXTENT_LAST) ==
4640 (flags & ~FIEMAP_EXTENT_LAST)) {
4641 cache->len += len;
4642 cache->flags |= flags;
4643 goto try_submit_last;
4644 }
4645
4646 /* Not mergeable, need to submit cached one */
4647 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4648 cache->len, cache->flags);
4649 cache->cached = false;
4650 if (ret)
4651 return ret;
4652assign:
4653 cache->cached = true;
4654 cache->offset = offset;
4655 cache->phys = phys;
4656 cache->len = len;
4657 cache->flags = flags;
4658try_submit_last:
4659 if (cache->flags & FIEMAP_EXTENT_LAST) {
4660 ret = fiemap_fill_next_extent(fieinfo, cache->offset,
4661 cache->phys, cache->len, cache->flags);
4662 cache->cached = false;
4663 }
4664 return ret;
4665}
4666
4667/*
Qu Wenruo848c23b2017-06-22 10:01:21 +08004668 * Emit last fiemap cache
Qu Wenruo47518322017-04-07 10:43:15 +08004669 *
Qu Wenruo848c23b2017-06-22 10:01:21 +08004670 * The last fiemap cache may still be cached in the following case:
4671 * 0 4k 8k
4672 * |<- Fiemap range ->|
4673 * |<------------ First extent ----------->|
4674 *
4675 * In this case, the first extent range will be cached but not emitted.
4676 * So we must emit it before ending extent_fiemap().
Qu Wenruo47518322017-04-07 10:43:15 +08004677 */
David Sterba5c5aff92019-03-20 11:29:46 +01004678static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
Qu Wenruo848c23b2017-06-22 10:01:21 +08004679 struct fiemap_cache *cache)
Qu Wenruo47518322017-04-07 10:43:15 +08004680{
4681 int ret;
4682
4683 if (!cache->cached)
4684 return 0;
4685
Qu Wenruo47518322017-04-07 10:43:15 +08004686 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4687 cache->len, cache->flags);
4688 cache->cached = false;
4689 if (ret > 0)
4690 ret = 0;
4691 return ret;
4692}
4693
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004694int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
David Sterba2135fb92017-06-23 04:09:57 +02004695 __u64 start, __u64 len)
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004696{
Josef Bacik975f84f2010-11-23 19:36:57 +00004697 int ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004698 u64 off = start;
4699 u64 max = start + len;
4700 u32 flags = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00004701 u32 found_type;
4702 u64 last;
Chris Masonec29ed52011-02-23 16:23:20 -05004703 u64 last_for_get_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004704 u64 disko = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05004705 u64 isize = i_size_read(inode);
Josef Bacik975f84f2010-11-23 19:36:57 +00004706 struct btrfs_key found_key;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004707 struct extent_map *em = NULL;
Josef Bacik2ac55d42010-02-03 19:33:23 +00004708 struct extent_state *cached_state = NULL;
Josef Bacik975f84f2010-11-23 19:36:57 +00004709 struct btrfs_path *path;
Josef Bacikdc046b12014-09-10 16:20:45 -04004710 struct btrfs_root *root = BTRFS_I(inode)->root;
Qu Wenruo47518322017-04-07 10:43:15 +08004711 struct fiemap_cache cache = { 0 };
David Sterba5911c8f2019-05-15 15:31:04 +02004712 struct ulist *roots;
4713 struct ulist *tmp_ulist;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004714 int end = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05004715 u64 em_start = 0;
4716 u64 em_len = 0;
4717 u64 em_end = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004718
4719 if (len == 0)
4720 return -EINVAL;
4721
Josef Bacik975f84f2010-11-23 19:36:57 +00004722 path = btrfs_alloc_path();
4723 if (!path)
4724 return -ENOMEM;
4725 path->leave_spinning = 1;
4726
David Sterba5911c8f2019-05-15 15:31:04 +02004727 roots = ulist_alloc(GFP_KERNEL);
4728 tmp_ulist = ulist_alloc(GFP_KERNEL);
4729 if (!roots || !tmp_ulist) {
4730 ret = -ENOMEM;
4731 goto out_free_ulist;
4732 }
4733
Jeff Mahoneyda170662016-06-15 09:22:56 -04004734 start = round_down(start, btrfs_inode_sectorsize(inode));
4735 len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
Josef Bacik4d479cf2011-11-17 11:34:31 -05004736
Chris Masonec29ed52011-02-23 16:23:20 -05004737 /*
4738 * lookup the last file extent. We're not using i_size here
4739 * because there might be preallocation past i_size
4740 */
David Sterbaf85b7372017-01-20 14:54:07 +01004741 ret = btrfs_lookup_file_extent(NULL, root, path,
4742 btrfs_ino(BTRFS_I(inode)), -1, 0);
Josef Bacik975f84f2010-11-23 19:36:57 +00004743 if (ret < 0) {
David Sterba5911c8f2019-05-15 15:31:04 +02004744 goto out_free_ulist;
Liu Bo2d324f52016-05-17 17:21:48 -07004745 } else {
4746 WARN_ON(!ret);
4747 if (ret == 1)
4748 ret = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00004749 }
Liu Bo2d324f52016-05-17 17:21:48 -07004750
Josef Bacik975f84f2010-11-23 19:36:57 +00004751 path->slots[0]--;
Josef Bacik975f84f2010-11-23 19:36:57 +00004752 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
David Sterba962a2982014-06-04 18:41:45 +02004753 found_type = found_key.type;
Josef Bacik975f84f2010-11-23 19:36:57 +00004754
Chris Masonec29ed52011-02-23 16:23:20 -05004755 /* No extents, but there might be delalloc bits */
Nikolay Borisov4a0cc7c2017-01-10 20:35:31 +02004756 if (found_key.objectid != btrfs_ino(BTRFS_I(inode)) ||
Josef Bacik975f84f2010-11-23 19:36:57 +00004757 found_type != BTRFS_EXTENT_DATA_KEY) {
Chris Masonec29ed52011-02-23 16:23:20 -05004758 /* have to trust i_size as the end */
4759 last = (u64)-1;
4760 last_for_get_extent = isize;
4761 } else {
4762 /*
4763 * remember the start of the last extent. There are a
4764 * bunch of different factors that go into the length of the
4765 * extent, so its much less complex to remember where it started
4766 */
4767 last = found_key.offset;
4768 last_for_get_extent = last + 1;
Josef Bacik975f84f2010-11-23 19:36:57 +00004769 }
Liu Bofe09e162013-09-22 12:54:23 +08004770 btrfs_release_path(path);
Josef Bacik975f84f2010-11-23 19:36:57 +00004771
Chris Masonec29ed52011-02-23 16:23:20 -05004772 /*
4773 * we might have some extents allocated but more delalloc past those
4774 * extents. so, we trust isize unless the start of the last extent is
4775 * beyond isize
4776 */
4777 if (last < isize) {
4778 last = (u64)-1;
4779 last_for_get_extent = isize;
4780 }
4781
David Sterbaff13db42015-12-03 14:30:40 +01004782 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01004783 &cached_state);
Chris Masonec29ed52011-02-23 16:23:20 -05004784
David Sterbae3350e12017-06-23 04:09:57 +02004785 em = get_extent_skip_holes(inode, start, last_for_get_extent);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004786 if (!em)
4787 goto out;
4788 if (IS_ERR(em)) {
4789 ret = PTR_ERR(em);
4790 goto out;
4791 }
Josef Bacik975f84f2010-11-23 19:36:57 +00004792
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004793 while (!end) {
Josef Bacikb76bb702013-07-05 13:52:51 -04004794 u64 offset_in_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004795
Chris Masonea8efc72011-03-08 11:54:40 -05004796 /* break if the extent we found is outside the range */
4797 if (em->start >= max || extent_map_end(em) < off)
4798 break;
4799
4800 /*
4801 * get_extent may return an extent that starts before our
4802 * requested range. We have to make sure the ranges
4803 * we return to fiemap always move forward and don't
4804 * overlap, so adjust the offsets here
4805 */
4806 em_start = max(em->start, off);
4807
4808 /*
4809 * record the offset from the start of the extent
Josef Bacikb76bb702013-07-05 13:52:51 -04004810 * for adjusting the disk offset below. Only do this if the
4811 * extent isn't compressed since our in ram offset may be past
4812 * what we have actually allocated on disk.
Chris Masonea8efc72011-03-08 11:54:40 -05004813 */
Josef Bacikb76bb702013-07-05 13:52:51 -04004814 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4815 offset_in_extent = em_start - em->start;
Chris Masonec29ed52011-02-23 16:23:20 -05004816 em_end = extent_map_end(em);
Chris Masonea8efc72011-03-08 11:54:40 -05004817 em_len = em_end - em_start;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004818 flags = 0;
Filipe Mananaf0986312018-06-20 10:02:30 +01004819 if (em->block_start < EXTENT_MAP_LAST_BYTE)
4820 disko = em->block_start + offset_in_extent;
4821 else
4822 disko = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004823
Chris Masonea8efc72011-03-08 11:54:40 -05004824 /*
4825 * bump off for our next call to get_extent
4826 */
4827 off = extent_map_end(em);
4828 if (off >= max)
4829 end = 1;
4830
Heiko Carstens93dbfad2009-04-03 10:33:45 -04004831 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004832 end = 1;
4833 flags |= FIEMAP_EXTENT_LAST;
Heiko Carstens93dbfad2009-04-03 10:33:45 -04004834 } else if (em->block_start == EXTENT_MAP_INLINE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004835 flags |= (FIEMAP_EXTENT_DATA_INLINE |
4836 FIEMAP_EXTENT_NOT_ALIGNED);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04004837 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004838 flags |= (FIEMAP_EXTENT_DELALLOC |
4839 FIEMAP_EXTENT_UNKNOWN);
Josef Bacikdc046b12014-09-10 16:20:45 -04004840 } else if (fieinfo->fi_extents_max) {
4841 u64 bytenr = em->block_start -
4842 (em->start - em->orig_start);
Liu Bofe09e162013-09-22 12:54:23 +08004843
Liu Bofe09e162013-09-22 12:54:23 +08004844 /*
4845 * As btrfs supports shared space, this information
4846 * can be exported to userspace tools via
Josef Bacikdc046b12014-09-10 16:20:45 -04004847 * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0
4848 * then we're just getting a count and we can skip the
4849 * lookup stuff.
Liu Bofe09e162013-09-22 12:54:23 +08004850 */
Edmund Nadolskibb739cf2017-06-28 21:56:58 -06004851 ret = btrfs_check_shared(root,
4852 btrfs_ino(BTRFS_I(inode)),
David Sterba5911c8f2019-05-15 15:31:04 +02004853 bytenr, roots, tmp_ulist);
Josef Bacikdc046b12014-09-10 16:20:45 -04004854 if (ret < 0)
Liu Bofe09e162013-09-22 12:54:23 +08004855 goto out_free;
Josef Bacikdc046b12014-09-10 16:20:45 -04004856 if (ret)
Liu Bofe09e162013-09-22 12:54:23 +08004857 flags |= FIEMAP_EXTENT_SHARED;
Josef Bacikdc046b12014-09-10 16:20:45 -04004858 ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004859 }
4860 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4861 flags |= FIEMAP_EXTENT_ENCODED;
Josef Bacik0d2b2372015-05-19 10:44:04 -04004862 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4863 flags |= FIEMAP_EXTENT_UNWRITTEN;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004864
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004865 free_extent_map(em);
4866 em = NULL;
Chris Masonec29ed52011-02-23 16:23:20 -05004867 if ((em_start >= last) || em_len == (u64)-1 ||
4868 (last == (u64)-1 && isize <= em_end)) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004869 flags |= FIEMAP_EXTENT_LAST;
4870 end = 1;
4871 }
4872
Chris Masonec29ed52011-02-23 16:23:20 -05004873 /* now scan forward to see if this is really the last extent. */
David Sterbae3350e12017-06-23 04:09:57 +02004874 em = get_extent_skip_holes(inode, off, last_for_get_extent);
Chris Masonec29ed52011-02-23 16:23:20 -05004875 if (IS_ERR(em)) {
4876 ret = PTR_ERR(em);
4877 goto out;
4878 }
4879 if (!em) {
Josef Bacik975f84f2010-11-23 19:36:57 +00004880 flags |= FIEMAP_EXTENT_LAST;
4881 end = 1;
4882 }
Qu Wenruo47518322017-04-07 10:43:15 +08004883 ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
4884 em_len, flags);
Chengyu Song26e726a2015-03-24 18:12:56 -04004885 if (ret) {
4886 if (ret == 1)
4887 ret = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05004888 goto out_free;
Chengyu Song26e726a2015-03-24 18:12:56 -04004889 }
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004890 }
4891out_free:
Qu Wenruo47518322017-04-07 10:43:15 +08004892 if (!ret)
David Sterba5c5aff92019-03-20 11:29:46 +01004893 ret = emit_last_fiemap_cache(fieinfo, &cache);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004894 free_extent_map(em);
4895out:
Liu Boa52f4cd2013-05-01 16:23:41 +00004896 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
David Sterbae43bbe52017-12-12 21:43:52 +01004897 &cached_state);
David Sterba5911c8f2019-05-15 15:31:04 +02004898
4899out_free_ulist:
Colin Ian Kinge02d48e2019-07-05 08:26:24 +01004900 btrfs_free_path(path);
David Sterba5911c8f2019-05-15 15:31:04 +02004901 ulist_free(roots);
4902 ulist_free(tmp_ulist);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004903 return ret;
4904}
4905
Chris Mason727011e2010-08-06 13:21:20 -04004906static void __free_extent_buffer(struct extent_buffer *eb)
4907{
Chris Mason727011e2010-08-06 13:21:20 -04004908 kmem_cache_free(extent_buffer_cache, eb);
4909}
4910
Josef Bacika26e8c92014-03-28 17:07:27 -04004911int extent_buffer_under_io(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004912{
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004913 return (atomic_read(&eb->io_pages) ||
4914 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4915 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
Chris Masond1310b22008-01-24 16:13:08 -05004916}
4917
Miao Xie897ca6e92010-10-26 20:57:29 -04004918/*
David Sterba55ac0132018-07-19 17:24:32 +02004919 * Release all pages attached to the extent buffer.
Miao Xie897ca6e92010-10-26 20:57:29 -04004920 */
David Sterba55ac0132018-07-19 17:24:32 +02004921static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
Miao Xie897ca6e92010-10-26 20:57:29 -04004922{
Nikolay Borisovd64766f2018-06-27 16:38:22 +03004923 int i;
4924 int num_pages;
Nikolay Borisovb0132a32018-06-27 16:38:24 +03004925 int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
Miao Xie897ca6e92010-10-26 20:57:29 -04004926
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004927 BUG_ON(extent_buffer_under_io(eb));
Miao Xie897ca6e92010-10-26 20:57:29 -04004928
Nikolay Borisovd64766f2018-06-27 16:38:22 +03004929 num_pages = num_extent_pages(eb);
4930 for (i = 0; i < num_pages; i++) {
4931 struct page *page = eb->pages[i];
Miao Xie897ca6e92010-10-26 20:57:29 -04004932
Forrest Liu5d2361d2015-02-09 17:31:45 +08004933 if (!page)
4934 continue;
4935 if (mapped)
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004936 spin_lock(&page->mapping->private_lock);
Forrest Liu5d2361d2015-02-09 17:31:45 +08004937 /*
4938 * We do this since we'll remove the pages after we've
4939 * removed the eb from the radix tree, so we could race
4940 * and have this page now attached to the new eb. So
4941 * only clear page_private if it's still connected to
4942 * this eb.
4943 */
4944 if (PagePrivate(page) &&
4945 page->private == (unsigned long)eb) {
4946 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4947 BUG_ON(PageDirty(page));
4948 BUG_ON(PageWriteback(page));
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004949 /*
Forrest Liu5d2361d2015-02-09 17:31:45 +08004950 * We need to make sure we haven't be attached
4951 * to a new eb.
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004952 */
Forrest Liu5d2361d2015-02-09 17:31:45 +08004953 ClearPagePrivate(page);
4954 set_page_private(page, 0);
4955 /* One for the page private */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004956 put_page(page);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004957 }
Forrest Liu5d2361d2015-02-09 17:31:45 +08004958
4959 if (mapped)
4960 spin_unlock(&page->mapping->private_lock);
4961
Nicholas D Steeves01327612016-05-19 21:18:45 -04004962 /* One for when we allocated the page */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004963 put_page(page);
Nikolay Borisovd64766f2018-06-27 16:38:22 +03004964 }
Miao Xie897ca6e92010-10-26 20:57:29 -04004965}
4966
4967/*
4968 * Helper for releasing the extent buffer.
4969 */
4970static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4971{
David Sterba55ac0132018-07-19 17:24:32 +02004972 btrfs_release_extent_buffer_pages(eb);
Josef Bacik8c389382020-02-14 16:11:42 -05004973 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
Miao Xie897ca6e92010-10-26 20:57:29 -04004974 __free_extent_buffer(eb);
4975}
4976
Josef Bacikf28491e2013-12-16 13:24:27 -05004977static struct extent_buffer *
4978__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
David Sterba23d79d82014-06-15 02:55:29 +02004979 unsigned long len)
Josef Bacikdb7f3432013-08-07 14:54:37 -04004980{
4981 struct extent_buffer *eb = NULL;
4982
Michal Hockod1b5c562015-08-19 14:17:40 +02004983 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004984 eb->start = start;
4985 eb->len = len;
Josef Bacikf28491e2013-12-16 13:24:27 -05004986 eb->fs_info = fs_info;
Josef Bacikdb7f3432013-08-07 14:54:37 -04004987 eb->bflags = 0;
4988 rwlock_init(&eb->lock);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004989 atomic_set(&eb->blocking_readers, 0);
David Sterba06297d82019-05-02 16:47:23 +02004990 eb->blocking_writers = 0;
David Sterbaed1b4ed2018-08-24 16:31:17 +02004991 eb->lock_nested = false;
Josef Bacikdb7f3432013-08-07 14:54:37 -04004992 init_waitqueue_head(&eb->write_lock_wq);
4993 init_waitqueue_head(&eb->read_lock_wq);
4994
Josef Bacik3fd63722020-02-14 16:11:40 -05004995 btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
4996 &fs_info->allocated_ebs);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004997
4998 spin_lock_init(&eb->refs_lock);
4999 atomic_set(&eb->refs, 1);
5000 atomic_set(&eb->io_pages, 0);
5001
5002 /*
5003 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
5004 */
5005 BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
5006 > MAX_INLINE_EXTENT_BUFFER_SIZE);
5007 BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
5008
David Sterba843ccf92018-08-24 14:56:28 +02005009#ifdef CONFIG_BTRFS_DEBUG
David Sterbaf3dc24c2019-05-02 16:51:53 +02005010 eb->spinning_writers = 0;
David Sterbaafd495a2018-08-24 15:57:38 +02005011 atomic_set(&eb->spinning_readers, 0);
David Sterba5c9c7992018-08-24 16:15:51 +02005012 atomic_set(&eb->read_locks, 0);
David Sterba00801ae2019-05-02 16:53:47 +02005013 eb->write_locks = 0;
David Sterba843ccf92018-08-24 14:56:28 +02005014#endif
5015
Josef Bacikdb7f3432013-08-07 14:54:37 -04005016 return eb;
5017}
5018
5019struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
5020{
David Sterbacc5e31a2018-03-01 18:20:27 +01005021 int i;
Josef Bacikdb7f3432013-08-07 14:54:37 -04005022 struct page *p;
5023 struct extent_buffer *new;
David Sterbacc5e31a2018-03-01 18:20:27 +01005024 int num_pages = num_extent_pages(src);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005025
David Sterba3f556f72014-06-15 03:20:26 +02005026 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005027 if (new == NULL)
5028 return NULL;
5029
5030 for (i = 0; i < num_pages; i++) {
Josef Bacik9ec72672013-08-07 16:57:23 -04005031 p = alloc_page(GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005032 if (!p) {
5033 btrfs_release_extent_buffer(new);
5034 return NULL;
5035 }
5036 attach_extent_buffer_page(new, p);
5037 WARN_ON(PageDirty(p));
5038 SetPageUptodate(p);
5039 new->pages[i] = p;
David Sterbafba1acf2016-11-08 17:56:24 +01005040 copy_page(page_address(p), page_address(src->pages[i]));
Josef Bacikdb7f3432013-08-07 14:54:37 -04005041 }
5042
Josef Bacikdb7f3432013-08-07 14:54:37 -04005043 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
Nikolay Borisovb0132a32018-06-27 16:38:24 +03005044 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005045
5046 return new;
5047}
5048
Omar Sandoval0f331222015-09-29 20:50:31 -07005049struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5050 u64 start, unsigned long len)
Josef Bacikdb7f3432013-08-07 14:54:37 -04005051{
5052 struct extent_buffer *eb;
David Sterbacc5e31a2018-03-01 18:20:27 +01005053 int num_pages;
5054 int i;
Josef Bacikdb7f3432013-08-07 14:54:37 -04005055
David Sterba3f556f72014-06-15 03:20:26 +02005056 eb = __alloc_extent_buffer(fs_info, start, len);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005057 if (!eb)
5058 return NULL;
5059
David Sterba65ad0102018-06-29 10:56:49 +02005060 num_pages = num_extent_pages(eb);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005061 for (i = 0; i < num_pages; i++) {
Josef Bacik9ec72672013-08-07 16:57:23 -04005062 eb->pages[i] = alloc_page(GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005063 if (!eb->pages[i])
5064 goto err;
5065 }
5066 set_extent_buffer_uptodate(eb);
5067 btrfs_set_header_nritems(eb, 0);
Nikolay Borisovb0132a32018-06-27 16:38:24 +03005068 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005069
5070 return eb;
5071err:
5072 for (; i > 0; i--)
5073 __free_page(eb->pages[i - 1]);
5074 __free_extent_buffer(eb);
5075 return NULL;
5076}
5077
Omar Sandoval0f331222015-09-29 20:50:31 -07005078struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -04005079 u64 start)
Omar Sandoval0f331222015-09-29 20:50:31 -07005080{
Jeff Mahoneyda170662016-06-15 09:22:56 -04005081 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
Omar Sandoval0f331222015-09-29 20:50:31 -07005082}
5083
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005084static void check_buffer_tree_ref(struct extent_buffer *eb)
5085{
Chris Mason242e18c2013-01-29 17:49:37 -05005086 int refs;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005087 /* the ref bit is tricky. We have to make sure it is set
5088 * if we have the buffer dirty. Otherwise the
5089 * code to free a buffer can end up dropping a dirty
5090 * page
5091 *
5092 * Once the ref bit is set, it won't go away while the
5093 * buffer is dirty or in writeback, and it also won't
5094 * go away while we have the reference count on the
5095 * eb bumped.
5096 *
5097 * We can't just set the ref bit without bumping the
5098 * ref on the eb because free_extent_buffer might
5099 * see the ref bit and try to clear it. If this happens
5100 * free_extent_buffer might end up dropping our original
5101 * ref by mistake and freeing the page before we are able
5102 * to add one more ref.
5103 *
5104 * So bump the ref count first, then set the bit. If someone
5105 * beat us to it, drop the ref we added.
5106 */
Chris Mason242e18c2013-01-29 17:49:37 -05005107 refs = atomic_read(&eb->refs);
5108 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5109 return;
5110
Josef Bacik594831c2012-07-20 16:11:08 -04005111 spin_lock(&eb->refs_lock);
5112 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005113 atomic_inc(&eb->refs);
Josef Bacik594831c2012-07-20 16:11:08 -04005114 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005115}
5116
Mel Gorman2457aec2014-06-04 16:10:31 -07005117static void mark_extent_buffer_accessed(struct extent_buffer *eb,
5118 struct page *accessed)
Josef Bacik5df42352012-03-15 18:24:42 -04005119{
David Sterbacc5e31a2018-03-01 18:20:27 +01005120 int num_pages, i;
Josef Bacik5df42352012-03-15 18:24:42 -04005121
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005122 check_buffer_tree_ref(eb);
5123
David Sterba65ad0102018-06-29 10:56:49 +02005124 num_pages = num_extent_pages(eb);
Josef Bacik5df42352012-03-15 18:24:42 -04005125 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005126 struct page *p = eb->pages[i];
5127
Mel Gorman2457aec2014-06-04 16:10:31 -07005128 if (p != accessed)
5129 mark_page_accessed(p);
Josef Bacik5df42352012-03-15 18:24:42 -04005130 }
5131}
5132
Josef Bacikf28491e2013-12-16 13:24:27 -05005133struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
5134 u64 start)
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005135{
5136 struct extent_buffer *eb;
5137
5138 rcu_read_lock();
Josef Bacikf28491e2013-12-16 13:24:27 -05005139 eb = radix_tree_lookup(&fs_info->buffer_radix,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005140 start >> PAGE_SHIFT);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005141 if (eb && atomic_inc_not_zero(&eb->refs)) {
5142 rcu_read_unlock();
Filipe Manana062c19e2015-04-23 11:28:48 +01005143 /*
5144 * Lock our eb's refs_lock to avoid races with
5145 * free_extent_buffer. When we get our eb it might be flagged
5146 * with EXTENT_BUFFER_STALE and another task running
5147 * free_extent_buffer might have seen that flag set,
5148 * eb->refs == 2, that the buffer isn't under IO (dirty and
5149 * writeback flags not set) and it's still in the tree (flag
5150 * EXTENT_BUFFER_TREE_REF set), therefore being in the process
5151 * of decrementing the extent buffer's reference count twice.
5152 * So here we could race and increment the eb's reference count,
5153 * clear its stale flag, mark it as dirty and drop our reference
5154 * before the other task finishes executing free_extent_buffer,
5155 * which would later result in an attempt to free an extent
5156 * buffer that is dirty.
5157 */
5158 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
5159 spin_lock(&eb->refs_lock);
5160 spin_unlock(&eb->refs_lock);
5161 }
Mel Gorman2457aec2014-06-04 16:10:31 -07005162 mark_extent_buffer_accessed(eb, NULL);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005163 return eb;
5164 }
5165 rcu_read_unlock();
5166
5167 return NULL;
5168}
5169
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005170#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5171struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -04005172 u64 start)
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005173{
5174 struct extent_buffer *eb, *exists = NULL;
5175 int ret;
5176
5177 eb = find_extent_buffer(fs_info, start);
5178 if (eb)
5179 return eb;
Jeff Mahoneyda170662016-06-15 09:22:56 -04005180 eb = alloc_dummy_extent_buffer(fs_info, start);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005181 if (!eb)
Dan Carpenterb6293c82019-12-03 14:24:58 +03005182 return ERR_PTR(-ENOMEM);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005183 eb->fs_info = fs_info;
5184again:
David Sterbae1860a72016-05-09 14:11:38 +02005185 ret = radix_tree_preload(GFP_NOFS);
Dan Carpenterb6293c82019-12-03 14:24:58 +03005186 if (ret) {
5187 exists = ERR_PTR(ret);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005188 goto free_eb;
Dan Carpenterb6293c82019-12-03 14:24:58 +03005189 }
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005190 spin_lock(&fs_info->buffer_lock);
5191 ret = radix_tree_insert(&fs_info->buffer_radix,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005192 start >> PAGE_SHIFT, eb);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005193 spin_unlock(&fs_info->buffer_lock);
5194 radix_tree_preload_end();
5195 if (ret == -EEXIST) {
5196 exists = find_extent_buffer(fs_info, start);
5197 if (exists)
5198 goto free_eb;
5199 else
5200 goto again;
5201 }
5202 check_buffer_tree_ref(eb);
5203 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5204
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005205 return eb;
5206free_eb:
5207 btrfs_release_extent_buffer(eb);
5208 return exists;
5209}
5210#endif
5211
Josef Bacikf28491e2013-12-16 13:24:27 -05005212struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
David Sterbace3e6982014-06-15 03:00:04 +02005213 u64 start)
Chris Masond1310b22008-01-24 16:13:08 -05005214{
Jeff Mahoneyda170662016-06-15 09:22:56 -04005215 unsigned long len = fs_info->nodesize;
David Sterbacc5e31a2018-03-01 18:20:27 +01005216 int num_pages;
5217 int i;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005218 unsigned long index = start >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005219 struct extent_buffer *eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04005220 struct extent_buffer *exists = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05005221 struct page *p;
Josef Bacikf28491e2013-12-16 13:24:27 -05005222 struct address_space *mapping = fs_info->btree_inode->i_mapping;
Chris Masond1310b22008-01-24 16:13:08 -05005223 int uptodate = 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04005224 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05005225
Jeff Mahoneyda170662016-06-15 09:22:56 -04005226 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
Liu Boc871b0f2016-06-06 12:01:23 -07005227 btrfs_err(fs_info, "bad tree block start %llu", start);
5228 return ERR_PTR(-EINVAL);
5229 }
5230
Josef Bacikf28491e2013-12-16 13:24:27 -05005231 eb = find_extent_buffer(fs_info, start);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005232 if (eb)
Chris Mason6af118ce2008-07-22 11:18:07 -04005233 return eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04005234
David Sterba23d79d82014-06-15 02:55:29 +02005235 eb = __alloc_extent_buffer(fs_info, start, len);
Peter2b114d12008-04-01 11:21:40 -04005236 if (!eb)
Liu Boc871b0f2016-06-06 12:01:23 -07005237 return ERR_PTR(-ENOMEM);
Chris Masond1310b22008-01-24 16:13:08 -05005238
David Sterba65ad0102018-06-29 10:56:49 +02005239 num_pages = num_extent_pages(eb);
Chris Mason727011e2010-08-06 13:21:20 -04005240 for (i = 0; i < num_pages; i++, index++) {
Michal Hockod1b5c562015-08-19 14:17:40 +02005241 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
Liu Boc871b0f2016-06-06 12:01:23 -07005242 if (!p) {
5243 exists = ERR_PTR(-ENOMEM);
Chris Mason6af118ce2008-07-22 11:18:07 -04005244 goto free_eb;
Liu Boc871b0f2016-06-06 12:01:23 -07005245 }
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005246
5247 spin_lock(&mapping->private_lock);
5248 if (PagePrivate(p)) {
5249 /*
5250 * We could have already allocated an eb for this page
5251 * and attached one so lets see if we can get a ref on
5252 * the existing eb, and if we can we know it's good and
5253 * we can just return that one, else we know we can just
5254 * overwrite page->private.
5255 */
5256 exists = (struct extent_buffer *)p->private;
5257 if (atomic_inc_not_zero(&exists->refs)) {
5258 spin_unlock(&mapping->private_lock);
5259 unlock_page(p);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005260 put_page(p);
Mel Gorman2457aec2014-06-04 16:10:31 -07005261 mark_extent_buffer_accessed(exists, p);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005262 goto free_eb;
5263 }
Omar Sandoval5ca64f42015-02-24 02:47:05 -08005264 exists = NULL;
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005265
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005266 /*
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005267 * Do this so attach doesn't complain and we need to
5268 * drop the ref the old guy had.
5269 */
5270 ClearPagePrivate(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005271 WARN_ON(PageDirty(p));
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005272 put_page(p);
Chris Masond1310b22008-01-24 16:13:08 -05005273 }
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005274 attach_extent_buffer_page(eb, p);
5275 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005276 WARN_ON(PageDirty(p));
Chris Mason727011e2010-08-06 13:21:20 -04005277 eb->pages[i] = p;
Chris Masond1310b22008-01-24 16:13:08 -05005278 if (!PageUptodate(p))
5279 uptodate = 0;
Chris Masoneb14ab82011-02-10 12:35:00 -05005280
5281 /*
Nikolay Borisovb16d0112018-07-04 10:24:52 +03005282 * We can't unlock the pages just yet since the extent buffer
5283 * hasn't been properly inserted in the radix tree, this
5284 * opens a race with btree_releasepage which can free a page
5285 * while we are still filling in all pages for the buffer and
5286 * we could crash.
Chris Masoneb14ab82011-02-10 12:35:00 -05005287 */
Chris Masond1310b22008-01-24 16:13:08 -05005288 }
5289 if (uptodate)
Chris Masonb4ce94d2009-02-04 09:25:08 -05005290 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Josef Bacik115391d2012-03-09 09:51:43 -05005291again:
David Sterbae1860a72016-05-09 14:11:38 +02005292 ret = radix_tree_preload(GFP_NOFS);
Liu Boc871b0f2016-06-06 12:01:23 -07005293 if (ret) {
5294 exists = ERR_PTR(ret);
Miao Xie19fe0a82010-10-26 20:57:29 -04005295 goto free_eb;
Liu Boc871b0f2016-06-06 12:01:23 -07005296 }
Miao Xie19fe0a82010-10-26 20:57:29 -04005297
Josef Bacikf28491e2013-12-16 13:24:27 -05005298 spin_lock(&fs_info->buffer_lock);
5299 ret = radix_tree_insert(&fs_info->buffer_radix,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005300 start >> PAGE_SHIFT, eb);
Josef Bacikf28491e2013-12-16 13:24:27 -05005301 spin_unlock(&fs_info->buffer_lock);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005302 radix_tree_preload_end();
Miao Xie19fe0a82010-10-26 20:57:29 -04005303 if (ret == -EEXIST) {
Josef Bacikf28491e2013-12-16 13:24:27 -05005304 exists = find_extent_buffer(fs_info, start);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005305 if (exists)
5306 goto free_eb;
5307 else
Josef Bacik115391d2012-03-09 09:51:43 -05005308 goto again;
Chris Mason6af118ce2008-07-22 11:18:07 -04005309 }
Chris Mason6af118ce2008-07-22 11:18:07 -04005310 /* add one reference for the tree */
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005311 check_buffer_tree_ref(eb);
Josef Bacik34b41ac2013-12-13 10:41:51 -05005312 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
Chris Masoneb14ab82011-02-10 12:35:00 -05005313
5314 /*
Nikolay Borisovb16d0112018-07-04 10:24:52 +03005315 * Now it's safe to unlock the pages because any calls to
5316 * btree_releasepage will correctly detect that a page belongs to a
5317 * live buffer and won't free them prematurely.
Chris Masoneb14ab82011-02-10 12:35:00 -05005318 */
Nikolay Borisov28187ae2018-07-04 10:24:51 +03005319 for (i = 0; i < num_pages; i++)
5320 unlock_page(eb->pages[i]);
Chris Masond1310b22008-01-24 16:13:08 -05005321 return eb;
5322
Chris Mason6af118ce2008-07-22 11:18:07 -04005323free_eb:
Omar Sandoval5ca64f42015-02-24 02:47:05 -08005324 WARN_ON(!atomic_dec_and_test(&eb->refs));
Chris Mason727011e2010-08-06 13:21:20 -04005325 for (i = 0; i < num_pages; i++) {
5326 if (eb->pages[i])
5327 unlock_page(eb->pages[i]);
5328 }
Chris Masoneb14ab82011-02-10 12:35:00 -05005329
Miao Xie897ca6e92010-10-26 20:57:29 -04005330 btrfs_release_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04005331 return exists;
Chris Masond1310b22008-01-24 16:13:08 -05005332}
Chris Masond1310b22008-01-24 16:13:08 -05005333
Josef Bacik3083ee22012-03-09 16:01:49 -05005334static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
5335{
5336 struct extent_buffer *eb =
5337 container_of(head, struct extent_buffer, rcu_head);
5338
5339 __free_extent_buffer(eb);
5340}
5341
David Sterbaf7a52a42013-04-26 14:56:29 +00005342static int release_extent_buffer(struct extent_buffer *eb)
Jules Irenge5ce48d02020-02-23 23:16:42 +00005343 __releases(&eb->refs_lock)
Josef Bacik3083ee22012-03-09 16:01:49 -05005344{
Nikolay Borisov07e21c42018-06-27 16:38:23 +03005345 lockdep_assert_held(&eb->refs_lock);
5346
Josef Bacik3083ee22012-03-09 16:01:49 -05005347 WARN_ON(atomic_read(&eb->refs) == 0);
5348 if (atomic_dec_and_test(&eb->refs)) {
Josef Bacik34b41ac2013-12-13 10:41:51 -05005349 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
Josef Bacikf28491e2013-12-16 13:24:27 -05005350 struct btrfs_fs_info *fs_info = eb->fs_info;
Josef Bacik3083ee22012-03-09 16:01:49 -05005351
Jan Schmidt815a51c2012-05-16 17:00:02 +02005352 spin_unlock(&eb->refs_lock);
Josef Bacik3083ee22012-03-09 16:01:49 -05005353
Josef Bacikf28491e2013-12-16 13:24:27 -05005354 spin_lock(&fs_info->buffer_lock);
5355 radix_tree_delete(&fs_info->buffer_radix,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005356 eb->start >> PAGE_SHIFT);
Josef Bacikf28491e2013-12-16 13:24:27 -05005357 spin_unlock(&fs_info->buffer_lock);
Josef Bacik34b41ac2013-12-13 10:41:51 -05005358 } else {
5359 spin_unlock(&eb->refs_lock);
Jan Schmidt815a51c2012-05-16 17:00:02 +02005360 }
Josef Bacik3083ee22012-03-09 16:01:49 -05005361
Josef Bacik8c389382020-02-14 16:11:42 -05005362 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
Josef Bacik3083ee22012-03-09 16:01:49 -05005363 /* Should be safe to release our pages at this point */
David Sterba55ac0132018-07-19 17:24:32 +02005364 btrfs_release_extent_buffer_pages(eb);
Josef Bacikbcb7e442015-03-16 17:38:02 -04005365#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
Nikolay Borisovb0132a32018-06-27 16:38:24 +03005366 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
Josef Bacikbcb7e442015-03-16 17:38:02 -04005367 __free_extent_buffer(eb);
5368 return 1;
5369 }
5370#endif
Josef Bacik3083ee22012-03-09 16:01:49 -05005371 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
Josef Bacike64860a2012-07-20 16:05:36 -04005372 return 1;
Josef Bacik3083ee22012-03-09 16:01:49 -05005373 }
5374 spin_unlock(&eb->refs_lock);
Josef Bacike64860a2012-07-20 16:05:36 -04005375
5376 return 0;
Josef Bacik3083ee22012-03-09 16:01:49 -05005377}
5378
Chris Masond1310b22008-01-24 16:13:08 -05005379void free_extent_buffer(struct extent_buffer *eb)
5380{
Chris Mason242e18c2013-01-29 17:49:37 -05005381 int refs;
5382 int old;
Chris Masond1310b22008-01-24 16:13:08 -05005383 if (!eb)
5384 return;
5385
Chris Mason242e18c2013-01-29 17:49:37 -05005386 while (1) {
5387 refs = atomic_read(&eb->refs);
Nikolay Borisov46cc7752018-10-15 17:04:01 +03005388 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
5389 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
5390 refs == 1))
Chris Mason242e18c2013-01-29 17:49:37 -05005391 break;
5392 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
5393 if (old == refs)
5394 return;
5395 }
5396
Josef Bacik3083ee22012-03-09 16:01:49 -05005397 spin_lock(&eb->refs_lock);
5398 if (atomic_read(&eb->refs) == 2 &&
5399 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005400 !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05005401 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5402 atomic_dec(&eb->refs);
Chris Masond1310b22008-01-24 16:13:08 -05005403
Josef Bacik3083ee22012-03-09 16:01:49 -05005404 /*
5405 * I know this is terrible, but it's temporary until we stop tracking
5406 * the uptodate bits and such for the extent buffers.
5407 */
David Sterbaf7a52a42013-04-26 14:56:29 +00005408 release_extent_buffer(eb);
Chris Masond1310b22008-01-24 16:13:08 -05005409}
Chris Masond1310b22008-01-24 16:13:08 -05005410
Josef Bacik3083ee22012-03-09 16:01:49 -05005411void free_extent_buffer_stale(struct extent_buffer *eb)
5412{
5413 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05005414 return;
5415
Josef Bacik3083ee22012-03-09 16:01:49 -05005416 spin_lock(&eb->refs_lock);
5417 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
5418
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005419 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05005420 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5421 atomic_dec(&eb->refs);
David Sterbaf7a52a42013-04-26 14:56:29 +00005422 release_extent_buffer(eb);
Chris Masond1310b22008-01-24 16:13:08 -05005423}
5424
Chris Mason1d4284b2012-03-28 20:31:37 -04005425void clear_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05005426{
David Sterbacc5e31a2018-03-01 18:20:27 +01005427 int i;
5428 int num_pages;
Chris Masond1310b22008-01-24 16:13:08 -05005429 struct page *page;
5430
David Sterba65ad0102018-06-29 10:56:49 +02005431 num_pages = num_extent_pages(eb);
Chris Masond1310b22008-01-24 16:13:08 -05005432
5433 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005434 page = eb->pages[i];
Chris Masonb9473432009-03-13 11:00:37 -04005435 if (!PageDirty(page))
Chris Masond2c3f4f2008-11-19 12:44:22 -05005436 continue;
5437
Chris Masona61e6f22008-07-22 11:18:08 -04005438 lock_page(page);
Chris Masoneb14ab82011-02-10 12:35:00 -05005439 WARN_ON(!PagePrivate(page));
5440
Chris Masond1310b22008-01-24 16:13:08 -05005441 clear_page_dirty_for_io(page);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07005442 xa_lock_irq(&page->mapping->i_pages);
Matthew Wilcox0a943c62017-12-04 10:37:22 -05005443 if (!PageDirty(page))
5444 __xa_clear_mark(&page->mapping->i_pages,
5445 page_index(page), PAGECACHE_TAG_DIRTY);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07005446 xa_unlock_irq(&page->mapping->i_pages);
Chris Masonbf0da8c2011-11-04 12:29:37 -04005447 ClearPageError(page);
Chris Masona61e6f22008-07-22 11:18:08 -04005448 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05005449 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005450 WARN_ON(atomic_read(&eb->refs) == 0);
Chris Masond1310b22008-01-24 16:13:08 -05005451}
Chris Masond1310b22008-01-24 16:13:08 -05005452
Liu Boabb57ef2018-09-14 01:44:42 +08005453bool set_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05005454{
David Sterbacc5e31a2018-03-01 18:20:27 +01005455 int i;
5456 int num_pages;
Liu Boabb57ef2018-09-14 01:44:42 +08005457 bool was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05005458
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005459 check_buffer_tree_ref(eb);
5460
Chris Masonb9473432009-03-13 11:00:37 -04005461 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005462
David Sterba65ad0102018-06-29 10:56:49 +02005463 num_pages = num_extent_pages(eb);
Josef Bacik3083ee22012-03-09 16:01:49 -05005464 WARN_ON(atomic_read(&eb->refs) == 0);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005465 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
5466
Liu Boabb57ef2018-09-14 01:44:42 +08005467 if (!was_dirty)
5468 for (i = 0; i < num_pages; i++)
5469 set_page_dirty(eb->pages[i]);
Liu Bo51995c32018-09-14 01:46:08 +08005470
5471#ifdef CONFIG_BTRFS_DEBUG
5472 for (i = 0; i < num_pages; i++)
5473 ASSERT(PageDirty(eb->pages[i]));
5474#endif
5475
Chris Masonb9473432009-03-13 11:00:37 -04005476 return was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05005477}
Chris Masond1310b22008-01-24 16:13:08 -05005478
David Sterba69ba3922015-12-03 13:08:59 +01005479void clear_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Mason1259ab72008-05-12 13:39:03 -04005480{
David Sterbacc5e31a2018-03-01 18:20:27 +01005481 int i;
Chris Mason1259ab72008-05-12 13:39:03 -04005482 struct page *page;
David Sterbacc5e31a2018-03-01 18:20:27 +01005483 int num_pages;
Chris Mason1259ab72008-05-12 13:39:03 -04005484
Chris Masonb4ce94d2009-02-04 09:25:08 -05005485 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
David Sterba65ad0102018-06-29 10:56:49 +02005486 num_pages = num_extent_pages(eb);
Chris Mason1259ab72008-05-12 13:39:03 -04005487 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005488 page = eb->pages[i];
Chris Mason33958dc2008-07-30 10:29:12 -04005489 if (page)
5490 ClearPageUptodate(page);
Chris Mason1259ab72008-05-12 13:39:03 -04005491 }
Chris Mason1259ab72008-05-12 13:39:03 -04005492}
5493
David Sterba09c25a82015-12-03 13:08:59 +01005494void set_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05005495{
David Sterbacc5e31a2018-03-01 18:20:27 +01005496 int i;
Chris Masond1310b22008-01-24 16:13:08 -05005497 struct page *page;
David Sterbacc5e31a2018-03-01 18:20:27 +01005498 int num_pages;
Chris Masond1310b22008-01-24 16:13:08 -05005499
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005500 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
David Sterba65ad0102018-06-29 10:56:49 +02005501 num_pages = num_extent_pages(eb);
Chris Masond1310b22008-01-24 16:13:08 -05005502 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005503 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005504 SetPageUptodate(page);
5505 }
Chris Masond1310b22008-01-24 16:13:08 -05005506}
Chris Masond1310b22008-01-24 16:13:08 -05005507
Nikolay Borisovc2ccfbc2019-04-10 17:24:40 +03005508int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05005509{
David Sterbacc5e31a2018-03-01 18:20:27 +01005510 int i;
Chris Masond1310b22008-01-24 16:13:08 -05005511 struct page *page;
5512 int err;
5513 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04005514 int locked_pages = 0;
5515 int all_uptodate = 1;
David Sterbacc5e31a2018-03-01 18:20:27 +01005516 int num_pages;
Chris Mason727011e2010-08-06 13:21:20 -04005517 unsigned long num_reads = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05005518 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04005519 unsigned long bio_flags = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05005520
Chris Masonb4ce94d2009-02-04 09:25:08 -05005521 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
Chris Masond1310b22008-01-24 16:13:08 -05005522 return 0;
5523
David Sterba65ad0102018-06-29 10:56:49 +02005524 num_pages = num_extent_pages(eb);
Josef Bacik8436ea912016-09-02 15:40:03 -04005525 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005526 page = eb->pages[i];
Arne Jansenbb82ab82011-06-10 14:06:53 +02005527 if (wait == WAIT_NONE) {
David Woodhouse2db04962008-08-07 11:19:43 -04005528 if (!trylock_page(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04005529 goto unlock_exit;
Chris Masond1310b22008-01-24 16:13:08 -05005530 } else {
5531 lock_page(page);
5532 }
Chris Masonce9adaa2008-04-09 16:28:12 -04005533 locked_pages++;
Liu Bo2571e732016-08-03 12:33:01 -07005534 }
5535 /*
5536 * We need to firstly lock all pages to make sure that
5537 * the uptodate bit of our pages won't be affected by
5538 * clear_extent_buffer_uptodate().
5539 */
Josef Bacik8436ea912016-09-02 15:40:03 -04005540 for (i = 0; i < num_pages; i++) {
Liu Bo2571e732016-08-03 12:33:01 -07005541 page = eb->pages[i];
Chris Mason727011e2010-08-06 13:21:20 -04005542 if (!PageUptodate(page)) {
5543 num_reads++;
Chris Masonce9adaa2008-04-09 16:28:12 -04005544 all_uptodate = 0;
Chris Mason727011e2010-08-06 13:21:20 -04005545 }
Chris Masonce9adaa2008-04-09 16:28:12 -04005546 }
Liu Bo2571e732016-08-03 12:33:01 -07005547
Chris Masonce9adaa2008-04-09 16:28:12 -04005548 if (all_uptodate) {
Josef Bacik8436ea912016-09-02 15:40:03 -04005549 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masonce9adaa2008-04-09 16:28:12 -04005550 goto unlock_exit;
5551 }
5552
Filipe Manana656f30d2014-09-26 12:25:56 +01005553 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
Josef Bacik5cf1ab52012-04-16 09:42:26 -04005554 eb->read_mirror = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005555 atomic_set(&eb->io_pages, num_reads);
Josef Bacik8436ea912016-09-02 15:40:03 -04005556 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005557 page = eb->pages[i];
Liu Bobaf863b2016-07-11 10:39:07 -07005558
Chris Masonce9adaa2008-04-09 16:28:12 -04005559 if (!PageUptodate(page)) {
Liu Bobaf863b2016-07-11 10:39:07 -07005560 if (ret) {
5561 atomic_dec(&eb->io_pages);
5562 unlock_page(page);
5563 continue;
5564 }
5565
Chris Masonf1885912008-04-09 16:28:12 -04005566 ClearPageError(page);
David Sterba0d44fea2020-02-05 19:09:37 +01005567 err = __extent_read_full_page(page,
David Sterba6af49db2017-06-23 04:09:57 +02005568 btree_get_extent, &bio,
Josef Bacikd4c7ca82013-04-19 19:49:09 -04005569 mirror_num, &bio_flags,
Mike Christie1f7ad752016-06-05 14:31:51 -05005570 REQ_META);
Liu Bobaf863b2016-07-11 10:39:07 -07005571 if (err) {
Chris Masond1310b22008-01-24 16:13:08 -05005572 ret = err;
Liu Bobaf863b2016-07-11 10:39:07 -07005573 /*
5574 * We use &bio in above __extent_read_full_page,
5575 * so we ensure that if it returns error, the
5576 * current page fails to add itself to bio and
5577 * it's been unlocked.
5578 *
5579 * We must dec io_pages by ourselves.
5580 */
5581 atomic_dec(&eb->io_pages);
5582 }
Chris Masond1310b22008-01-24 16:13:08 -05005583 } else {
5584 unlock_page(page);
5585 }
5586 }
5587
Jeff Mahoney355808c2011-10-03 23:23:14 -04005588 if (bio) {
Mike Christie1f7ad752016-06-05 14:31:51 -05005589 err = submit_one_bio(bio, mirror_num, bio_flags);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01005590 if (err)
5591 return err;
Jeff Mahoney355808c2011-10-03 23:23:14 -04005592 }
Chris Masona86c12c2008-02-07 10:50:54 -05005593
Arne Jansenbb82ab82011-06-10 14:06:53 +02005594 if (ret || wait != WAIT_COMPLETE)
Chris Masond1310b22008-01-24 16:13:08 -05005595 return ret;
Chris Masond3977122009-01-05 21:25:51 -05005596
Josef Bacik8436ea912016-09-02 15:40:03 -04005597 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005598 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005599 wait_on_page_locked(page);
Chris Masond3977122009-01-05 21:25:51 -05005600 if (!PageUptodate(page))
Chris Masond1310b22008-01-24 16:13:08 -05005601 ret = -EIO;
Chris Masond1310b22008-01-24 16:13:08 -05005602 }
Chris Masond3977122009-01-05 21:25:51 -05005603
Chris Masond1310b22008-01-24 16:13:08 -05005604 return ret;
Chris Masonce9adaa2008-04-09 16:28:12 -04005605
5606unlock_exit:
Chris Masond3977122009-01-05 21:25:51 -05005607 while (locked_pages > 0) {
Chris Masonce9adaa2008-04-09 16:28:12 -04005608 locked_pages--;
Josef Bacik8436ea912016-09-02 15:40:03 -04005609 page = eb->pages[locked_pages];
5610 unlock_page(page);
Chris Masonce9adaa2008-04-09 16:28:12 -04005611 }
5612 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05005613}
Chris Masond1310b22008-01-24 16:13:08 -05005614
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06005615void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
5616 unsigned long start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05005617{
5618 size_t cur;
5619 size_t offset;
5620 struct page *page;
5621 char *kaddr;
5622 char *dst = (char *)dstv;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005623 size_t start_offset = offset_in_page(eb->start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005624 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005625
Liu Bof716abd2017-08-09 11:10:16 -06005626 if (start + len > eb->len) {
5627 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5628 eb->start, eb->len, start, len);
5629 memset(dst, 0, len);
5630 return;
5631 }
Chris Masond1310b22008-01-24 16:13:08 -05005632
Johannes Thumshirn70730172018-12-05 15:23:03 +01005633 offset = offset_in_page(start_offset + start);
Chris Masond1310b22008-01-24 16:13:08 -05005634
Chris Masond3977122009-01-05 21:25:51 -05005635 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005636 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005637
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005638 cur = min(len, (PAGE_SIZE - offset));
Chris Masona6591712011-07-19 12:04:14 -04005639 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005640 memcpy(dst, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005641
5642 dst += cur;
5643 len -= cur;
5644 offset = 0;
5645 i++;
5646 }
5647}
Chris Masond1310b22008-01-24 16:13:08 -05005648
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06005649int read_extent_buffer_to_user(const struct extent_buffer *eb,
5650 void __user *dstv,
5651 unsigned long start, unsigned long len)
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005652{
5653 size_t cur;
5654 size_t offset;
5655 struct page *page;
5656 char *kaddr;
5657 char __user *dst = (char __user *)dstv;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005658 size_t start_offset = offset_in_page(eb->start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005659 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005660 int ret = 0;
5661
5662 WARN_ON(start > eb->len);
5663 WARN_ON(start + len > eb->start + eb->len);
5664
Johannes Thumshirn70730172018-12-05 15:23:03 +01005665 offset = offset_in_page(start_offset + start);
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005666
5667 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005668 page = eb->pages[i];
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005669
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005670 cur = min(len, (PAGE_SIZE - offset));
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005671 kaddr = page_address(page);
5672 if (copy_to_user(dst, kaddr + offset, cur)) {
5673 ret = -EFAULT;
5674 break;
5675 }
5676
5677 dst += cur;
5678 len -= cur;
5679 offset = 0;
5680 i++;
5681 }
5682
5683 return ret;
5684}
5685
Liu Bo415b35a2016-06-17 19:16:21 -07005686/*
5687 * return 0 if the item is found within a page.
5688 * return 1 if the item spans two pages.
5689 * return -EINVAL otherwise.
5690 */
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06005691int map_private_extent_buffer(const struct extent_buffer *eb,
5692 unsigned long start, unsigned long min_len,
5693 char **map, unsigned long *map_start,
5694 unsigned long *map_len)
Chris Masond1310b22008-01-24 16:13:08 -05005695{
Johannes Thumshirncc2c39d2018-11-28 09:54:54 +01005696 size_t offset;
Chris Masond1310b22008-01-24 16:13:08 -05005697 char *kaddr;
5698 struct page *p;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005699 size_t start_offset = offset_in_page(eb->start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005700 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005701 unsigned long end_i = (start_offset + start + min_len - 1) >>
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005702 PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005703
Liu Bof716abd2017-08-09 11:10:16 -06005704 if (start + min_len > eb->len) {
5705 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5706 eb->start, eb->len, start, min_len);
5707 return -EINVAL;
5708 }
5709
Chris Masond1310b22008-01-24 16:13:08 -05005710 if (i != end_i)
Liu Bo415b35a2016-06-17 19:16:21 -07005711 return 1;
Chris Masond1310b22008-01-24 16:13:08 -05005712
5713 if (i == 0) {
5714 offset = start_offset;
5715 *map_start = 0;
5716 } else {
5717 offset = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005718 *map_start = ((u64)i << PAGE_SHIFT) - start_offset;
Chris Masond1310b22008-01-24 16:13:08 -05005719 }
Chris Masond3977122009-01-05 21:25:51 -05005720
David Sterbafb85fc92014-07-31 01:03:53 +02005721 p = eb->pages[i];
Chris Masona6591712011-07-19 12:04:14 -04005722 kaddr = page_address(p);
Chris Masond1310b22008-01-24 16:13:08 -05005723 *map = kaddr + offset;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005724 *map_len = PAGE_SIZE - offset;
Chris Masond1310b22008-01-24 16:13:08 -05005725 return 0;
5726}
Chris Masond1310b22008-01-24 16:13:08 -05005727
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06005728int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
5729 unsigned long start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05005730{
5731 size_t cur;
5732 size_t offset;
5733 struct page *page;
5734 char *kaddr;
5735 char *ptr = (char *)ptrv;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005736 size_t start_offset = offset_in_page(eb->start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005737 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005738 int ret = 0;
5739
5740 WARN_ON(start > eb->len);
5741 WARN_ON(start + len > eb->start + eb->len);
5742
Johannes Thumshirn70730172018-12-05 15:23:03 +01005743 offset = offset_in_page(start_offset + start);
Chris Masond1310b22008-01-24 16:13:08 -05005744
Chris Masond3977122009-01-05 21:25:51 -05005745 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005746 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005747
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005748 cur = min(len, (PAGE_SIZE - offset));
Chris Masond1310b22008-01-24 16:13:08 -05005749
Chris Masona6591712011-07-19 12:04:14 -04005750 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005751 ret = memcmp(ptr, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005752 if (ret)
5753 break;
5754
5755 ptr += cur;
5756 len -= cur;
5757 offset = 0;
5758 i++;
5759 }
5760 return ret;
5761}
Chris Masond1310b22008-01-24 16:13:08 -05005762
David Sterbaf157bf72016-11-09 17:43:38 +01005763void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
5764 const void *srcv)
5765{
5766 char *kaddr;
5767
5768 WARN_ON(!PageUptodate(eb->pages[0]));
5769 kaddr = page_address(eb->pages[0]);
5770 memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
5771 BTRFS_FSID_SIZE);
5772}
5773
5774void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv)
5775{
5776 char *kaddr;
5777
5778 WARN_ON(!PageUptodate(eb->pages[0]));
5779 kaddr = page_address(eb->pages[0]);
5780 memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
5781 BTRFS_FSID_SIZE);
5782}
5783
Chris Masond1310b22008-01-24 16:13:08 -05005784void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5785 unsigned long start, unsigned long len)
5786{
5787 size_t cur;
5788 size_t offset;
5789 struct page *page;
5790 char *kaddr;
5791 char *src = (char *)srcv;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005792 size_t start_offset = offset_in_page(eb->start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005793 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005794
5795 WARN_ON(start > eb->len);
5796 WARN_ON(start + len > eb->start + eb->len);
5797
Johannes Thumshirn70730172018-12-05 15:23:03 +01005798 offset = offset_in_page(start_offset + start);
Chris Masond1310b22008-01-24 16:13:08 -05005799
Chris Masond3977122009-01-05 21:25:51 -05005800 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005801 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005802 WARN_ON(!PageUptodate(page));
5803
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005804 cur = min(len, PAGE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04005805 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005806 memcpy(kaddr + offset, src, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005807
5808 src += cur;
5809 len -= cur;
5810 offset = 0;
5811 i++;
5812 }
5813}
Chris Masond1310b22008-01-24 16:13:08 -05005814
David Sterbab159fa22016-11-08 18:09:03 +01005815void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
5816 unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05005817{
5818 size_t cur;
5819 size_t offset;
5820 struct page *page;
5821 char *kaddr;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005822 size_t start_offset = offset_in_page(eb->start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005823 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005824
5825 WARN_ON(start > eb->len);
5826 WARN_ON(start + len > eb->start + eb->len);
5827
Johannes Thumshirn70730172018-12-05 15:23:03 +01005828 offset = offset_in_page(start_offset + start);
Chris Masond1310b22008-01-24 16:13:08 -05005829
Chris Masond3977122009-01-05 21:25:51 -05005830 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005831 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005832 WARN_ON(!PageUptodate(page));
5833
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005834 cur = min(len, PAGE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04005835 kaddr = page_address(page);
David Sterbab159fa22016-11-08 18:09:03 +01005836 memset(kaddr + offset, 0, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005837
5838 len -= cur;
5839 offset = 0;
5840 i++;
5841 }
5842}
Chris Masond1310b22008-01-24 16:13:08 -05005843
David Sterba58e80122016-11-08 18:30:31 +01005844void copy_extent_buffer_full(struct extent_buffer *dst,
5845 struct extent_buffer *src)
5846{
5847 int i;
David Sterbacc5e31a2018-03-01 18:20:27 +01005848 int num_pages;
David Sterba58e80122016-11-08 18:30:31 +01005849
5850 ASSERT(dst->len == src->len);
5851
David Sterba65ad0102018-06-29 10:56:49 +02005852 num_pages = num_extent_pages(dst);
David Sterba58e80122016-11-08 18:30:31 +01005853 for (i = 0; i < num_pages; i++)
5854 copy_page(page_address(dst->pages[i]),
5855 page_address(src->pages[i]));
5856}
5857
Chris Masond1310b22008-01-24 16:13:08 -05005858void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5859 unsigned long dst_offset, unsigned long src_offset,
5860 unsigned long len)
5861{
5862 u64 dst_len = dst->len;
5863 size_t cur;
5864 size_t offset;
5865 struct page *page;
5866 char *kaddr;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005867 size_t start_offset = offset_in_page(dst->start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005868 unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005869
5870 WARN_ON(src->len != dst_len);
5871
Johannes Thumshirn70730172018-12-05 15:23:03 +01005872 offset = offset_in_page(start_offset + dst_offset);
Chris Masond1310b22008-01-24 16:13:08 -05005873
Chris Masond3977122009-01-05 21:25:51 -05005874 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005875 page = dst->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005876 WARN_ON(!PageUptodate(page));
5877
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005878 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
Chris Masond1310b22008-01-24 16:13:08 -05005879
Chris Masona6591712011-07-19 12:04:14 -04005880 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005881 read_extent_buffer(src, kaddr + offset, src_offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005882
5883 src_offset += cur;
5884 len -= cur;
5885 offset = 0;
5886 i++;
5887 }
5888}
Chris Masond1310b22008-01-24 16:13:08 -05005889
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005890/*
5891 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5892 * given bit number
5893 * @eb: the extent buffer
5894 * @start: offset of the bitmap item in the extent buffer
5895 * @nr: bit number
5896 * @page_index: return index of the page in the extent buffer that contains the
5897 * given bit number
5898 * @page_offset: return offset into the page given by page_index
5899 *
5900 * This helper hides the ugliness of finding the byte in an extent buffer which
5901 * contains a given bit.
5902 */
5903static inline void eb_bitmap_offset(struct extent_buffer *eb,
5904 unsigned long start, unsigned long nr,
5905 unsigned long *page_index,
5906 size_t *page_offset)
5907{
Johannes Thumshirn70730172018-12-05 15:23:03 +01005908 size_t start_offset = offset_in_page(eb->start);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005909 size_t byte_offset = BIT_BYTE(nr);
5910 size_t offset;
5911
5912 /*
5913 * The byte we want is the offset of the extent buffer + the offset of
5914 * the bitmap item in the extent buffer + the offset of the byte in the
5915 * bitmap item.
5916 */
5917 offset = start_offset + start + byte_offset;
5918
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005919 *page_index = offset >> PAGE_SHIFT;
Johannes Thumshirn70730172018-12-05 15:23:03 +01005920 *page_offset = offset_in_page(offset);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005921}
5922
5923/**
5924 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
5925 * @eb: the extent buffer
5926 * @start: offset of the bitmap item in the extent buffer
5927 * @nr: bit number to test
5928 */
5929int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
5930 unsigned long nr)
5931{
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005932 u8 *kaddr;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005933 struct page *page;
5934 unsigned long i;
5935 size_t offset;
5936
5937 eb_bitmap_offset(eb, start, nr, &i, &offset);
5938 page = eb->pages[i];
5939 WARN_ON(!PageUptodate(page));
5940 kaddr = page_address(page);
5941 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
5942}
5943
5944/**
5945 * extent_buffer_bitmap_set - set an area of a bitmap
5946 * @eb: the extent buffer
5947 * @start: offset of the bitmap item in the extent buffer
5948 * @pos: bit number of the first bit
5949 * @len: number of bits to set
5950 */
5951void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5952 unsigned long pos, unsigned long len)
5953{
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005954 u8 *kaddr;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005955 struct page *page;
5956 unsigned long i;
5957 size_t offset;
5958 const unsigned int size = pos + len;
5959 int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005960 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005961
5962 eb_bitmap_offset(eb, start, pos, &i, &offset);
5963 page = eb->pages[i];
5964 WARN_ON(!PageUptodate(page));
5965 kaddr = page_address(page);
5966
5967 while (len >= bits_to_set) {
5968 kaddr[offset] |= mask_to_set;
5969 len -= bits_to_set;
5970 bits_to_set = BITS_PER_BYTE;
Dan Carpenter9c894692016-10-12 11:33:21 +03005971 mask_to_set = ~0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005972 if (++offset >= PAGE_SIZE && len > 0) {
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005973 offset = 0;
5974 page = eb->pages[++i];
5975 WARN_ON(!PageUptodate(page));
5976 kaddr = page_address(page);
5977 }
5978 }
5979 if (len) {
5980 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5981 kaddr[offset] |= mask_to_set;
5982 }
5983}
5984
5985
5986/**
5987 * extent_buffer_bitmap_clear - clear an area of a bitmap
5988 * @eb: the extent buffer
5989 * @start: offset of the bitmap item in the extent buffer
5990 * @pos: bit number of the first bit
5991 * @len: number of bits to clear
5992 */
5993void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5994 unsigned long pos, unsigned long len)
5995{
Omar Sandoval2fe1d552016-09-22 17:24:20 -07005996 u8 *kaddr;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005997 struct page *page;
5998 unsigned long i;
5999 size_t offset;
6000 const unsigned int size = pos + len;
6001 int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
Omar Sandoval2fe1d552016-09-22 17:24:20 -07006002 u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006003
6004 eb_bitmap_offset(eb, start, pos, &i, &offset);
6005 page = eb->pages[i];
6006 WARN_ON(!PageUptodate(page));
6007 kaddr = page_address(page);
6008
6009 while (len >= bits_to_clear) {
6010 kaddr[offset] &= ~mask_to_clear;
6011 len -= bits_to_clear;
6012 bits_to_clear = BITS_PER_BYTE;
Dan Carpenter9c894692016-10-12 11:33:21 +03006013 mask_to_clear = ~0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006014 if (++offset >= PAGE_SIZE && len > 0) {
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006015 offset = 0;
6016 page = eb->pages[++i];
6017 WARN_ON(!PageUptodate(page));
6018 kaddr = page_address(page);
6019 }
6020 }
6021 if (len) {
6022 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
6023 kaddr[offset] &= ~mask_to_clear;
6024 }
6025}
6026
Sergei Trofimovich33872062011-04-11 21:52:52 +00006027static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
6028{
6029 unsigned long distance = (src > dst) ? src - dst : dst - src;
6030 return distance < len;
6031}
6032
Chris Masond1310b22008-01-24 16:13:08 -05006033static void copy_pages(struct page *dst_page, struct page *src_page,
6034 unsigned long dst_off, unsigned long src_off,
6035 unsigned long len)
6036{
Chris Masona6591712011-07-19 12:04:14 -04006037 char *dst_kaddr = page_address(dst_page);
Chris Masond1310b22008-01-24 16:13:08 -05006038 char *src_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04006039 int must_memmove = 0;
Chris Masond1310b22008-01-24 16:13:08 -05006040
Sergei Trofimovich33872062011-04-11 21:52:52 +00006041 if (dst_page != src_page) {
Chris Masona6591712011-07-19 12:04:14 -04006042 src_kaddr = page_address(src_page);
Sergei Trofimovich33872062011-04-11 21:52:52 +00006043 } else {
Chris Masond1310b22008-01-24 16:13:08 -05006044 src_kaddr = dst_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04006045 if (areas_overlap(src_off, dst_off, len))
6046 must_memmove = 1;
Sergei Trofimovich33872062011-04-11 21:52:52 +00006047 }
Chris Masond1310b22008-01-24 16:13:08 -05006048
Chris Mason727011e2010-08-06 13:21:20 -04006049 if (must_memmove)
6050 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
6051 else
6052 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
Chris Masond1310b22008-01-24 16:13:08 -05006053}
6054
6055void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
6056 unsigned long src_offset, unsigned long len)
6057{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04006058 struct btrfs_fs_info *fs_info = dst->fs_info;
Chris Masond1310b22008-01-24 16:13:08 -05006059 size_t cur;
6060 size_t dst_off_in_page;
6061 size_t src_off_in_page;
Johannes Thumshirn70730172018-12-05 15:23:03 +01006062 size_t start_offset = offset_in_page(dst->start);
Chris Masond1310b22008-01-24 16:13:08 -05006063 unsigned long dst_i;
6064 unsigned long src_i;
6065
6066 if (src_offset + len > dst->len) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04006067 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04006068 "memmove bogus src_offset %lu move len %lu dst len %lu",
6069 src_offset, len, dst->len);
Arnd Bergmann290342f2019-03-25 14:02:25 +01006070 BUG();
Chris Masond1310b22008-01-24 16:13:08 -05006071 }
6072 if (dst_offset + len > dst->len) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04006073 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04006074 "memmove bogus dst_offset %lu move len %lu dst len %lu",
6075 dst_offset, len, dst->len);
Arnd Bergmann290342f2019-03-25 14:02:25 +01006076 BUG();
Chris Masond1310b22008-01-24 16:13:08 -05006077 }
6078
Chris Masond3977122009-01-05 21:25:51 -05006079 while (len > 0) {
Johannes Thumshirn70730172018-12-05 15:23:03 +01006080 dst_off_in_page = offset_in_page(start_offset + dst_offset);
6081 src_off_in_page = offset_in_page(start_offset + src_offset);
Chris Masond1310b22008-01-24 16:13:08 -05006082
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006083 dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
6084 src_i = (start_offset + src_offset) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05006085
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006086 cur = min(len, (unsigned long)(PAGE_SIZE -
Chris Masond1310b22008-01-24 16:13:08 -05006087 src_off_in_page));
6088 cur = min_t(unsigned long, cur,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006089 (unsigned long)(PAGE_SIZE - dst_off_in_page));
Chris Masond1310b22008-01-24 16:13:08 -05006090
David Sterbafb85fc92014-07-31 01:03:53 +02006091 copy_pages(dst->pages[dst_i], dst->pages[src_i],
Chris Masond1310b22008-01-24 16:13:08 -05006092 dst_off_in_page, src_off_in_page, cur);
6093
6094 src_offset += cur;
6095 dst_offset += cur;
6096 len -= cur;
6097 }
6098}
Chris Masond1310b22008-01-24 16:13:08 -05006099
6100void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
6101 unsigned long src_offset, unsigned long len)
6102{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04006103 struct btrfs_fs_info *fs_info = dst->fs_info;
Chris Masond1310b22008-01-24 16:13:08 -05006104 size_t cur;
6105 size_t dst_off_in_page;
6106 size_t src_off_in_page;
6107 unsigned long dst_end = dst_offset + len - 1;
6108 unsigned long src_end = src_offset + len - 1;
Johannes Thumshirn70730172018-12-05 15:23:03 +01006109 size_t start_offset = offset_in_page(dst->start);
Chris Masond1310b22008-01-24 16:13:08 -05006110 unsigned long dst_i;
6111 unsigned long src_i;
6112
6113 if (src_offset + len > dst->len) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04006114 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04006115 "memmove bogus src_offset %lu move len %lu len %lu",
6116 src_offset, len, dst->len);
Arnd Bergmann290342f2019-03-25 14:02:25 +01006117 BUG();
Chris Masond1310b22008-01-24 16:13:08 -05006118 }
6119 if (dst_offset + len > dst->len) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04006120 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04006121 "memmove bogus dst_offset %lu move len %lu len %lu",
6122 dst_offset, len, dst->len);
Arnd Bergmann290342f2019-03-25 14:02:25 +01006123 BUG();
Chris Masond1310b22008-01-24 16:13:08 -05006124 }
Chris Mason727011e2010-08-06 13:21:20 -04006125 if (dst_offset < src_offset) {
Chris Masond1310b22008-01-24 16:13:08 -05006126 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
6127 return;
6128 }
Chris Masond3977122009-01-05 21:25:51 -05006129 while (len > 0) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006130 dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
6131 src_i = (start_offset + src_end) >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05006132
Johannes Thumshirn70730172018-12-05 15:23:03 +01006133 dst_off_in_page = offset_in_page(start_offset + dst_end);
6134 src_off_in_page = offset_in_page(start_offset + src_end);
Chris Masond1310b22008-01-24 16:13:08 -05006135
6136 cur = min_t(unsigned long, len, src_off_in_page + 1);
6137 cur = min(cur, dst_off_in_page + 1);
David Sterbafb85fc92014-07-31 01:03:53 +02006138 copy_pages(dst->pages[dst_i], dst->pages[src_i],
Chris Masond1310b22008-01-24 16:13:08 -05006139 dst_off_in_page - cur + 1,
6140 src_off_in_page - cur + 1, cur);
6141
6142 dst_end -= cur;
6143 src_end -= cur;
6144 len -= cur;
6145 }
6146}
Chris Mason6af118ce2008-07-22 11:18:07 -04006147
David Sterbaf7a52a42013-04-26 14:56:29 +00006148int try_release_extent_buffer(struct page *page)
Miao Xie19fe0a82010-10-26 20:57:29 -04006149{
Chris Mason6af118ce2008-07-22 11:18:07 -04006150 struct extent_buffer *eb;
Miao Xie897ca6e92010-10-26 20:57:29 -04006151
Miao Xie19fe0a82010-10-26 20:57:29 -04006152 /*
Nicholas D Steeves01327612016-05-19 21:18:45 -04006153 * We need to make sure nobody is attaching this page to an eb right
Josef Bacik3083ee22012-03-09 16:01:49 -05006154 * now.
Miao Xie19fe0a82010-10-26 20:57:29 -04006155 */
Josef Bacik3083ee22012-03-09 16:01:49 -05006156 spin_lock(&page->mapping->private_lock);
6157 if (!PagePrivate(page)) {
6158 spin_unlock(&page->mapping->private_lock);
6159 return 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04006160 }
6161
Josef Bacik3083ee22012-03-09 16:01:49 -05006162 eb = (struct extent_buffer *)page->private;
6163 BUG_ON(!eb);
Miao Xie19fe0a82010-10-26 20:57:29 -04006164
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006165 /*
Josef Bacik3083ee22012-03-09 16:01:49 -05006166 * This is a little awful but should be ok, we need to make sure that
6167 * the eb doesn't disappear out from under us while we're looking at
6168 * this page.
6169 */
6170 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006171 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
Josef Bacik3083ee22012-03-09 16:01:49 -05006172 spin_unlock(&eb->refs_lock);
6173 spin_unlock(&page->mapping->private_lock);
6174 return 0;
6175 }
6176 spin_unlock(&page->mapping->private_lock);
6177
Josef Bacik3083ee22012-03-09 16:01:49 -05006178 /*
6179 * If tree ref isn't set then we know the ref on this eb is a real ref,
6180 * so just return, this page will likely be freed soon anyway.
6181 */
6182 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
6183 spin_unlock(&eb->refs_lock);
6184 return 0;
6185 }
Josef Bacik3083ee22012-03-09 16:01:49 -05006186
David Sterbaf7a52a42013-04-26 14:56:29 +00006187 return release_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04006188}