blob: 360d997c722632fe73b5b4aab55698ecab9bebe5 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
David Sterbac1d7c512018-04-03 19:23:33 +02002
Chris Masond1310b22008-01-24 16:13:08 -05003#include <linux/bitops.h>
4#include <linux/slab.h>
5#include <linux/bio.h>
6#include <linux/mm.h>
Chris Masond1310b22008-01-24 16:13:08 -05007#include <linux/pagemap.h>
8#include <linux/page-flags.h>
Chris Masond1310b22008-01-24 16:13:08 -05009#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
Chris Masond1310b22008-01-24 16:13:08 -050012#include <linux/writeback.h>
13#include <linux/pagevec.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070014#include <linux/prefetch.h>
Dan Magenheimer90a887c2011-05-26 10:01:56 -060015#include <linux/cleancache.h>
Johannes Thumshirncea62802021-03-16 19:04:01 +090016#include "misc.h"
Chris Masond1310b22008-01-24 16:13:08 -050017#include "extent_io.h"
Josef Bacik9c7d3a52019-09-23 10:05:19 -040018#include "extent-io-tree.h"
Chris Masond1310b22008-01-24 16:13:08 -050019#include "extent_map.h"
David Woodhouse902b22f2008-08-20 08:51:49 -040020#include "ctree.h"
21#include "btrfs_inode.h"
Jan Schmidt4a54c8c2011-07-22 15:41:52 +020022#include "volumes.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010023#include "check-integrity.h"
Josef Bacik0b32f4b2012-03-13 09:38:00 -040024#include "locking.h"
Josef Bacik606686e2012-06-04 14:03:51 -040025#include "rcu-string.h"
Liu Bofe09e162013-09-22 12:54:23 +080026#include "backref.h"
David Sterba6af49db2017-06-23 04:09:57 +020027#include "disk-io.h"
Qu Wenruo760f9912021-01-26 16:33:48 +080028#include "subpage.h"
Naohiro Aotad35751562021-02-04 19:21:54 +090029#include "zoned.h"
Naohiro Aota0bc09ca2021-02-04 19:22:08 +090030#include "block-group.h"
Chris Masond1310b22008-01-24 16:13:08 -050031
Chris Masond1310b22008-01-24 16:13:08 -050032static struct kmem_cache *extent_state_cache;
33static struct kmem_cache *extent_buffer_cache;
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -040034static struct bio_set btrfs_bioset;
Chris Masond1310b22008-01-24 16:13:08 -050035
Filipe Manana27a35072014-07-06 20:09:59 +010036static inline bool extent_state_in_tree(const struct extent_state *state)
37{
38 return !RB_EMPTY_NODE(&state->rb_node);
39}
40
Eric Sandeen6d49ba12013-04-22 16:12:31 +000041#ifdef CONFIG_BTRFS_DEBUG
Chris Masond1310b22008-01-24 16:13:08 -050042static LIST_HEAD(states);
Chris Masond3977122009-01-05 21:25:51 -050043static DEFINE_SPINLOCK(leak_lock);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000044
Josef Bacik3fd63722020-02-14 16:11:40 -050045static inline void btrfs_leak_debug_add(spinlock_t *lock,
46 struct list_head *new,
47 struct list_head *head)
Eric Sandeen6d49ba12013-04-22 16:12:31 +000048{
49 unsigned long flags;
50
Josef Bacik3fd63722020-02-14 16:11:40 -050051 spin_lock_irqsave(lock, flags);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000052 list_add(new, head);
Josef Bacik3fd63722020-02-14 16:11:40 -050053 spin_unlock_irqrestore(lock, flags);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000054}
55
Josef Bacik3fd63722020-02-14 16:11:40 -050056static inline void btrfs_leak_debug_del(spinlock_t *lock,
57 struct list_head *entry)
Eric Sandeen6d49ba12013-04-22 16:12:31 +000058{
59 unsigned long flags;
60
Josef Bacik3fd63722020-02-14 16:11:40 -050061 spin_lock_irqsave(lock, flags);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000062 list_del(entry);
Josef Bacik3fd63722020-02-14 16:11:40 -050063 spin_unlock_irqrestore(lock, flags);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000064}
65
Josef Bacik3fd63722020-02-14 16:11:40 -050066void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
Josef Bacik33ca8322019-09-23 10:05:17 -040067{
68 struct extent_buffer *eb;
Josef Bacik3fd63722020-02-14 16:11:40 -050069 unsigned long flags;
Josef Bacik33ca8322019-09-23 10:05:17 -040070
Josef Bacik8c389382020-02-14 16:11:42 -050071 /*
72 * If we didn't get into open_ctree our allocated_ebs will not be
73 * initialized, so just skip this.
74 */
75 if (!fs_info->allocated_ebs.next)
76 return;
77
Josef Bacik3fd63722020-02-14 16:11:40 -050078 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
79 while (!list_empty(&fs_info->allocated_ebs)) {
80 eb = list_first_entry(&fs_info->allocated_ebs,
81 struct extent_buffer, leak_list);
Josef Bacik8c389382020-02-14 16:11:42 -050082 pr_err(
83 "BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
84 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
85 btrfs_header_owner(eb));
Josef Bacik33ca8322019-09-23 10:05:17 -040086 list_del(&eb->leak_list);
87 kmem_cache_free(extent_buffer_cache, eb);
88 }
Josef Bacik3fd63722020-02-14 16:11:40 -050089 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
Josef Bacik33ca8322019-09-23 10:05:17 -040090}
91
92static inline void btrfs_extent_state_leak_debug_check(void)
Eric Sandeen6d49ba12013-04-22 16:12:31 +000093{
94 struct extent_state *state;
Eric Sandeen6d49ba12013-04-22 16:12:31 +000095
96 while (!list_empty(&states)) {
97 state = list_entry(states.next, struct extent_state, leak_list);
David Sterba9ee49a042015-01-14 19:52:13 +010098 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
Filipe Manana27a35072014-07-06 20:09:59 +010099 state->start, state->end, state->state,
100 extent_state_in_tree(state),
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200101 refcount_read(&state->refs));
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000102 list_del(&state->leak_list);
103 kmem_cache_free(extent_state_cache, state);
104 }
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000105}
David Sterba8d599ae2013-04-30 15:22:23 +0000106
Josef Bacika5dee372013-12-13 10:02:44 -0500107#define btrfs_debug_check_extent_io_range(tree, start, end) \
108 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
David Sterba8d599ae2013-04-30 15:22:23 +0000109static inline void __btrfs_debug_check_extent_io_range(const char *caller,
Josef Bacika5dee372013-12-13 10:02:44 -0500110 struct extent_io_tree *tree, u64 start, u64 end)
David Sterba8d599ae2013-04-30 15:22:23 +0000111{
Nikolay Borisov65a680f2018-11-01 14:09:49 +0200112 struct inode *inode = tree->private_data;
113 u64 isize;
114
115 if (!inode || !is_data_inode(inode))
116 return;
117
118 isize = i_size_read(inode);
119 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
120 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
121 "%s: ino %llu isize %llu odd range [%llu,%llu]",
122 caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
123 }
David Sterba8d599ae2013-04-30 15:22:23 +0000124}
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000125#else
Josef Bacik3fd63722020-02-14 16:11:40 -0500126#define btrfs_leak_debug_add(lock, new, head) do {} while (0)
127#define btrfs_leak_debug_del(lock, entry) do {} while (0)
Josef Bacik33ca8322019-09-23 10:05:17 -0400128#define btrfs_extent_state_leak_debug_check() do {} while (0)
David Sterba8d599ae2013-04-30 15:22:23 +0000129#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
Chris Mason4bef0842008-09-08 11:18:08 -0400130#endif
Chris Masond1310b22008-01-24 16:13:08 -0500131
Chris Masond1310b22008-01-24 16:13:08 -0500132struct tree_entry {
133 u64 start;
134 u64 end;
Chris Masond1310b22008-01-24 16:13:08 -0500135 struct rb_node rb_node;
136};
137
138struct extent_page_data {
139 struct bio *bio;
Chris Mason771ed682008-11-06 22:02:51 -0500140 /* tells writepage not to lock the state bits for this range
141 * it still does the unlocking
142 */
Chris Masonffbd5172009-04-20 15:50:09 -0400143 unsigned int extent_locked:1;
144
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600145 /* tells the submit_bio code to use REQ_SYNC */
Chris Masonffbd5172009-04-20 15:50:09 -0400146 unsigned int sync_io:1;
Chris Masond1310b22008-01-24 16:13:08 -0500147};
148
Qu Wenruof97e27e2020-11-13 20:51:40 +0800149static int add_extent_changeset(struct extent_state *state, u32 bits,
Qu Wenruod38ed272015-10-12 14:53:37 +0800150 struct extent_changeset *changeset,
151 int set)
152{
153 int ret;
154
155 if (!changeset)
David Sterba57599c72018-03-01 17:56:34 +0100156 return 0;
Qu Wenruod38ed272015-10-12 14:53:37 +0800157 if (set && (state->state & bits) == bits)
David Sterba57599c72018-03-01 17:56:34 +0100158 return 0;
Qu Wenruofefdc552015-10-12 15:35:38 +0800159 if (!set && (state->state & bits) == 0)
David Sterba57599c72018-03-01 17:56:34 +0100160 return 0;
Qu Wenruod38ed272015-10-12 14:53:37 +0800161 changeset->bytes_changed += state->end - state->start + 1;
David Sterba53d32352017-02-13 13:42:29 +0100162 ret = ulist_add(&changeset->range_changed, state->start, state->end,
Qu Wenruod38ed272015-10-12 14:53:37 +0800163 GFP_ATOMIC);
David Sterba57599c72018-03-01 17:56:34 +0100164 return ret;
Qu Wenruod38ed272015-10-12 14:53:37 +0800165}
166
Nikolay Borisovc1be9c12020-09-14 12:37:08 +0300167int __must_check submit_one_bio(struct bio *bio, int mirror_num,
168 unsigned long bio_flags)
Qu Wenruobb58eb92019-01-25 13:09:15 +0800169{
170 blk_status_t ret = 0;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800171 struct extent_io_tree *tree = bio->bi_private;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800172
173 bio->bi_private = NULL;
174
Nikolay Borisov908930f2020-09-18 16:34:37 +0300175 if (is_data_inode(tree->private_data))
176 ret = btrfs_submit_data_bio(tree->private_data, bio, mirror_num,
177 bio_flags);
178 else
Nikolay Borisov1b362942020-09-18 16:34:38 +0300179 ret = btrfs_submit_metadata_bio(tree->private_data, bio,
180 mirror_num, bio_flags);
Qu Wenruobb58eb92019-01-25 13:09:15 +0800181
182 return blk_status_to_errno(ret);
183}
184
Qu Wenruo30659762019-03-20 14:27:42 +0800185/* Cleanup unsubmitted bios */
186static void end_write_bio(struct extent_page_data *epd, int ret)
187{
188 if (epd->bio) {
189 epd->bio->bi_status = errno_to_blk_status(ret);
190 bio_endio(epd->bio);
191 epd->bio = NULL;
192 }
193}
194
Qu Wenruof4340622019-03-20 14:27:41 +0800195/*
196 * Submit bio from extent page data via submit_one_bio
197 *
198 * Return 0 if everything is OK.
199 * Return <0 for error.
200 */
201static int __must_check flush_write_bio(struct extent_page_data *epd)
Qu Wenruobb58eb92019-01-25 13:09:15 +0800202{
Qu Wenruof4340622019-03-20 14:27:41 +0800203 int ret = 0;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800204
Qu Wenruof4340622019-03-20 14:27:41 +0800205 if (epd->bio) {
Qu Wenruobb58eb92019-01-25 13:09:15 +0800206 ret = submit_one_bio(epd->bio, 0, 0);
Qu Wenruof4340622019-03-20 14:27:41 +0800207 /*
208 * Clean up of epd->bio is handled by its endio function.
209 * And endio is either triggered by successful bio execution
210 * or the error handler of submit bio hook.
211 * So at this point, no matter what happened, we don't need
212 * to clean up epd->bio.
213 */
Qu Wenruobb58eb92019-01-25 13:09:15 +0800214 epd->bio = NULL;
215 }
Qu Wenruof4340622019-03-20 14:27:41 +0800216 return ret;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800217}
David Sterbae2932ee2017-06-23 04:16:17 +0200218
Josef Bacik6f0d04f2019-09-23 10:05:18 -0400219int __init extent_state_cache_init(void)
Chris Masond1310b22008-01-24 16:13:08 -0500220{
David Sterba837e1972012-09-07 03:00:48 -0600221 extent_state_cache = kmem_cache_create("btrfs_extent_state",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +0200222 sizeof(struct extent_state), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300223 SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500224 if (!extent_state_cache)
225 return -ENOMEM;
Josef Bacik6f0d04f2019-09-23 10:05:18 -0400226 return 0;
227}
Chris Masond1310b22008-01-24 16:13:08 -0500228
Josef Bacik6f0d04f2019-09-23 10:05:18 -0400229int __init extent_io_init(void)
230{
David Sterba837e1972012-09-07 03:00:48 -0600231 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +0200232 sizeof(struct extent_buffer), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300233 SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500234 if (!extent_buffer_cache)
Josef Bacik6f0d04f2019-09-23 10:05:18 -0400235 return -ENOMEM;
Chris Mason9be33952013-05-17 18:30:14 -0400236
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400237 if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
238 offsetof(struct btrfs_io_bio, bio),
239 BIOSET_NEED_BVECS))
Chris Mason9be33952013-05-17 18:30:14 -0400240 goto free_buffer_cache;
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700241
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400242 if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700243 goto free_bioset;
244
Chris Masond1310b22008-01-24 16:13:08 -0500245 return 0;
246
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700247free_bioset:
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400248 bioset_exit(&btrfs_bioset);
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700249
Chris Mason9be33952013-05-17 18:30:14 -0400250free_buffer_cache:
251 kmem_cache_destroy(extent_buffer_cache);
252 extent_buffer_cache = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500253 return -ENOMEM;
254}
255
Josef Bacik6f0d04f2019-09-23 10:05:18 -0400256void __cold extent_state_cache_exit(void)
257{
258 btrfs_extent_state_leak_debug_check();
259 kmem_cache_destroy(extent_state_cache);
260}
261
David Sterbae67c7182018-02-19 17:24:18 +0100262void __cold extent_io_exit(void)
Chris Masond1310b22008-01-24 16:13:08 -0500263{
Kirill A. Shutemov8c0a8532012-09-26 11:33:07 +1000264 /*
265 * Make sure all delayed rcu free are flushed before we
266 * destroy caches.
267 */
268 rcu_barrier();
Kinglong Mee5598e902016-01-29 21:36:35 +0800269 kmem_cache_destroy(extent_buffer_cache);
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400270 bioset_exit(&btrfs_bioset);
Chris Masond1310b22008-01-24 16:13:08 -0500271}
272
Josef Bacik41a2ee72020-01-17 09:02:21 -0500273/*
274 * For the file_extent_tree, we want to hold the inode lock when we lookup and
275 * update the disk_i_size, but lockdep will complain because our io_tree we hold
276 * the tree lock and get the inode lock when setting delalloc. These two things
277 * are unrelated, so make a class for the file_extent_tree so we don't get the
278 * two locking patterns mixed up.
279 */
280static struct lock_class_key file_extent_tree_class;
281
Qu Wenruoc258d6e2019-03-01 10:47:58 +0800282void extent_io_tree_init(struct btrfs_fs_info *fs_info,
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800283 struct extent_io_tree *tree, unsigned int owner,
284 void *private_data)
Chris Masond1310b22008-01-24 16:13:08 -0500285{
Qu Wenruoc258d6e2019-03-01 10:47:58 +0800286 tree->fs_info = fs_info;
Eric Paris6bef4d32010-02-23 19:43:04 +0000287 tree->state = RB_ROOT;
Chris Masond1310b22008-01-24 16:13:08 -0500288 tree->dirty_bytes = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500289 spin_lock_init(&tree->lock);
Josef Bacikc6100a42017-05-05 11:57:13 -0400290 tree->private_data = private_data;
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800291 tree->owner = owner;
Josef Bacik41a2ee72020-01-17 09:02:21 -0500292 if (owner == IO_TREE_INODE_FILE_EXTENT)
293 lockdep_set_class(&tree->lock, &file_extent_tree_class);
Chris Masond1310b22008-01-24 16:13:08 -0500294}
Chris Masond1310b22008-01-24 16:13:08 -0500295
Nikolay Borisov41e7acd2019-03-25 14:31:24 +0200296void extent_io_tree_release(struct extent_io_tree *tree)
297{
298 spin_lock(&tree->lock);
299 /*
300 * Do a single barrier for the waitqueue_active check here, the state
301 * of the waitqueue should not change once extent_io_tree_release is
302 * called.
303 */
304 smp_mb();
305 while (!RB_EMPTY_ROOT(&tree->state)) {
306 struct rb_node *node;
307 struct extent_state *state;
308
309 node = rb_first(&tree->state);
310 state = rb_entry(node, struct extent_state, rb_node);
311 rb_erase(&state->rb_node, &tree->state);
312 RB_CLEAR_NODE(&state->rb_node);
313 /*
314 * btree io trees aren't supposed to have tasks waiting for
315 * changes in the flags of extent states ever.
316 */
317 ASSERT(!waitqueue_active(&state->wq));
318 free_extent_state(state);
319
320 cond_resched_lock(&tree->lock);
321 }
322 spin_unlock(&tree->lock);
323}
324
Christoph Hellwigb2950862008-12-02 09:54:17 -0500325static struct extent_state *alloc_extent_state(gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500326{
327 struct extent_state *state;
Chris Masond1310b22008-01-24 16:13:08 -0500328
Michal Hocko3ba7ab22017-01-09 15:39:02 +0100329 /*
330 * The given mask might be not appropriate for the slab allocator,
331 * drop the unsupported bits
332 */
333 mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
Chris Masond1310b22008-01-24 16:13:08 -0500334 state = kmem_cache_alloc(extent_state_cache, mask);
Peter2b114d12008-04-01 11:21:40 -0400335 if (!state)
Chris Masond1310b22008-01-24 16:13:08 -0500336 return state;
337 state->state = 0;
David Sterba47dc1962016-02-11 13:24:13 +0100338 state->failrec = NULL;
Filipe Manana27a35072014-07-06 20:09:59 +0100339 RB_CLEAR_NODE(&state->rb_node);
Josef Bacik3fd63722020-02-14 16:11:40 -0500340 btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states);
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200341 refcount_set(&state->refs, 1);
Chris Masond1310b22008-01-24 16:13:08 -0500342 init_waitqueue_head(&state->wq);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100343 trace_alloc_extent_state(state, mask, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500344 return state;
345}
Chris Masond1310b22008-01-24 16:13:08 -0500346
Chris Mason4845e442010-05-25 20:56:50 -0400347void free_extent_state(struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500348{
Chris Masond1310b22008-01-24 16:13:08 -0500349 if (!state)
350 return;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200351 if (refcount_dec_and_test(&state->refs)) {
Filipe Manana27a35072014-07-06 20:09:59 +0100352 WARN_ON(extent_state_in_tree(state));
Josef Bacik3fd63722020-02-14 16:11:40 -0500353 btrfs_leak_debug_del(&leak_lock, &state->leak_list);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100354 trace_free_extent_state(state, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500355 kmem_cache_free(extent_state_cache, state);
356 }
357}
Chris Masond1310b22008-01-24 16:13:08 -0500358
Filipe Mananaf2071b22014-02-12 15:05:53 +0000359static struct rb_node *tree_insert(struct rb_root *root,
360 struct rb_node *search_start,
361 u64 offset,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000362 struct rb_node *node,
363 struct rb_node ***p_in,
364 struct rb_node **parent_in)
Chris Masond1310b22008-01-24 16:13:08 -0500365{
Filipe Mananaf2071b22014-02-12 15:05:53 +0000366 struct rb_node **p;
Chris Masond3977122009-01-05 21:25:51 -0500367 struct rb_node *parent = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500368 struct tree_entry *entry;
369
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000370 if (p_in && parent_in) {
371 p = *p_in;
372 parent = *parent_in;
373 goto do_insert;
374 }
375
Filipe Mananaf2071b22014-02-12 15:05:53 +0000376 p = search_start ? &search_start : &root->rb_node;
Chris Masond3977122009-01-05 21:25:51 -0500377 while (*p) {
Chris Masond1310b22008-01-24 16:13:08 -0500378 parent = *p;
379 entry = rb_entry(parent, struct tree_entry, rb_node);
380
381 if (offset < entry->start)
382 p = &(*p)->rb_left;
383 else if (offset > entry->end)
384 p = &(*p)->rb_right;
385 else
386 return parent;
387 }
388
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000389do_insert:
Chris Masond1310b22008-01-24 16:13:08 -0500390 rb_link_node(node, parent, p);
391 rb_insert_color(node, root);
392 return NULL;
393}
394
Nikolay Borisov8666e632019-06-05 14:50:04 +0300395/**
Nikolay Borisov3bed2da2021-01-22 11:58:03 +0200396 * Search @tree for an entry that contains @offset. Such entry would have
397 * entry->start <= offset && entry->end >= offset.
Nikolay Borisov8666e632019-06-05 14:50:04 +0300398 *
Nikolay Borisov3bed2da2021-01-22 11:58:03 +0200399 * @tree: the tree to search
400 * @offset: offset that should fall within an entry in @tree
401 * @next_ret: pointer to the first entry whose range ends after @offset
402 * @prev_ret: pointer to the first entry whose range begins before @offset
403 * @p_ret: pointer where new node should be anchored (used when inserting an
404 * entry in the tree)
405 * @parent_ret: points to entry which would have been the parent of the entry,
Nikolay Borisov8666e632019-06-05 14:50:04 +0300406 * containing @offset
407 *
408 * This function returns a pointer to the entry that contains @offset byte
409 * address. If no such entry exists, then NULL is returned and the other
410 * pointer arguments to the function are filled, otherwise the found entry is
411 * returned and other pointers are left untouched.
412 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500413static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000414 struct rb_node **next_ret,
Nikolay Borisov352646c2019-01-30 16:51:00 +0200415 struct rb_node **prev_ret,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000416 struct rb_node ***p_ret,
417 struct rb_node **parent_ret)
Chris Masond1310b22008-01-24 16:13:08 -0500418{
Chris Mason80ea96b2008-02-01 14:51:59 -0500419 struct rb_root *root = &tree->state;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000420 struct rb_node **n = &root->rb_node;
Chris Masond1310b22008-01-24 16:13:08 -0500421 struct rb_node *prev = NULL;
422 struct rb_node *orig_prev = NULL;
423 struct tree_entry *entry;
424 struct tree_entry *prev_entry = NULL;
425
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000426 while (*n) {
427 prev = *n;
428 entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500429 prev_entry = entry;
430
431 if (offset < entry->start)
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000432 n = &(*n)->rb_left;
Chris Masond1310b22008-01-24 16:13:08 -0500433 else if (offset > entry->end)
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000434 n = &(*n)->rb_right;
Chris Masond3977122009-01-05 21:25:51 -0500435 else
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000436 return *n;
Chris Masond1310b22008-01-24 16:13:08 -0500437 }
438
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000439 if (p_ret)
440 *p_ret = n;
441 if (parent_ret)
442 *parent_ret = prev;
443
Nikolay Borisov352646c2019-01-30 16:51:00 +0200444 if (next_ret) {
Chris Masond1310b22008-01-24 16:13:08 -0500445 orig_prev = prev;
Chris Masond3977122009-01-05 21:25:51 -0500446 while (prev && offset > prev_entry->end) {
Chris Masond1310b22008-01-24 16:13:08 -0500447 prev = rb_next(prev);
448 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
449 }
Nikolay Borisov352646c2019-01-30 16:51:00 +0200450 *next_ret = prev;
Chris Masond1310b22008-01-24 16:13:08 -0500451 prev = orig_prev;
452 }
453
Nikolay Borisov352646c2019-01-30 16:51:00 +0200454 if (prev_ret) {
Chris Masond1310b22008-01-24 16:13:08 -0500455 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500456 while (prev && offset < prev_entry->start) {
Chris Masond1310b22008-01-24 16:13:08 -0500457 prev = rb_prev(prev);
458 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
459 }
Nikolay Borisov352646c2019-01-30 16:51:00 +0200460 *prev_ret = prev;
Chris Masond1310b22008-01-24 16:13:08 -0500461 }
462 return NULL;
463}
464
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000465static inline struct rb_node *
466tree_search_for_insert(struct extent_io_tree *tree,
467 u64 offset,
468 struct rb_node ***p_ret,
469 struct rb_node **parent_ret)
Chris Masond1310b22008-01-24 16:13:08 -0500470{
Nikolay Borisov352646c2019-01-30 16:51:00 +0200471 struct rb_node *next= NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500472 struct rb_node *ret;
Chris Mason70dec802008-01-29 09:59:12 -0500473
Nikolay Borisov352646c2019-01-30 16:51:00 +0200474 ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
Chris Masond3977122009-01-05 21:25:51 -0500475 if (!ret)
Nikolay Borisov352646c2019-01-30 16:51:00 +0200476 return next;
Chris Masond1310b22008-01-24 16:13:08 -0500477 return ret;
478}
479
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000480static inline struct rb_node *tree_search(struct extent_io_tree *tree,
481 u64 offset)
482{
483 return tree_search_for_insert(tree, offset, NULL, NULL);
484}
485
Chris Masond1310b22008-01-24 16:13:08 -0500486/*
487 * utility function to look for merge candidates inside a given range.
488 * Any extents with matching state are merged together into a single
489 * extent in the tree. Extents with EXTENT_IO in their state field
490 * are not merged because the end_io handlers need to be able to do
491 * operations on them without sleeping (or doing allocations/splits).
492 *
493 * This should be called with the tree lock held.
494 */
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000495static void merge_state(struct extent_io_tree *tree,
496 struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500497{
498 struct extent_state *other;
499 struct rb_node *other_node;
500
Nikolay Borisov88826792019-03-14 15:28:31 +0200501 if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000502 return;
Chris Masond1310b22008-01-24 16:13:08 -0500503
504 other_node = rb_prev(&state->rb_node);
505 if (other_node) {
506 other = rb_entry(other_node, struct extent_state, rb_node);
507 if (other->end == state->start - 1 &&
508 other->state == state->state) {
Nikolay Borisov5c848192018-11-01 14:09:52 +0200509 if (tree->private_data &&
510 is_data_inode(tree->private_data))
511 btrfs_merge_delalloc_extent(tree->private_data,
512 state, other);
Chris Masond1310b22008-01-24 16:13:08 -0500513 state->start = other->start;
Chris Masond1310b22008-01-24 16:13:08 -0500514 rb_erase(&other->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100515 RB_CLEAR_NODE(&other->rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500516 free_extent_state(other);
517 }
518 }
519 other_node = rb_next(&state->rb_node);
520 if (other_node) {
521 other = rb_entry(other_node, struct extent_state, rb_node);
522 if (other->start == state->end + 1 &&
523 other->state == state->state) {
Nikolay Borisov5c848192018-11-01 14:09:52 +0200524 if (tree->private_data &&
525 is_data_inode(tree->private_data))
526 btrfs_merge_delalloc_extent(tree->private_data,
527 state, other);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400528 state->end = other->end;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400529 rb_erase(&other->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100530 RB_CLEAR_NODE(&other->rb_node);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400531 free_extent_state(other);
Chris Masond1310b22008-01-24 16:13:08 -0500532 }
533 }
Chris Masond1310b22008-01-24 16:13:08 -0500534}
535
Xiao Guangrong3150b692011-07-14 03:19:08 +0000536static void set_state_bits(struct extent_io_tree *tree,
Qu Wenruof97e27e2020-11-13 20:51:40 +0800537 struct extent_state *state, u32 *bits,
Qu Wenruod38ed272015-10-12 14:53:37 +0800538 struct extent_changeset *changeset);
Xiao Guangrong3150b692011-07-14 03:19:08 +0000539
Chris Masond1310b22008-01-24 16:13:08 -0500540/*
541 * insert an extent_state struct into the tree. 'bits' are set on the
542 * struct before it is inserted.
543 *
544 * This may return -EEXIST if the extent is already there, in which case the
545 * state struct is freed.
546 *
547 * The tree lock is not taken internally. This is a utility function and
548 * probably isn't what you want to call (see set/clear_extent_bit).
549 */
550static int insert_state(struct extent_io_tree *tree,
551 struct extent_state *state, u64 start, u64 end,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000552 struct rb_node ***p,
553 struct rb_node **parent,
Qu Wenruof97e27e2020-11-13 20:51:40 +0800554 u32 *bits, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500555{
556 struct rb_node *node;
557
David Sterba27922372019-06-18 20:00:05 +0200558 if (end < start) {
559 btrfs_err(tree->fs_info,
560 "insert state: end < start %llu %llu", end, start);
561 WARN_ON(1);
562 }
Chris Masond1310b22008-01-24 16:13:08 -0500563 state->start = start;
564 state->end = end;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400565
Qu Wenruod38ed272015-10-12 14:53:37 +0800566 set_state_bits(tree, state, bits, changeset);
Xiao Guangrong3150b692011-07-14 03:19:08 +0000567
Filipe Mananaf2071b22014-02-12 15:05:53 +0000568 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
Chris Masond1310b22008-01-24 16:13:08 -0500569 if (node) {
570 struct extent_state *found;
571 found = rb_entry(node, struct extent_state, rb_node);
David Sterba27922372019-06-18 20:00:05 +0200572 btrfs_err(tree->fs_info,
573 "found node %llu %llu on insert of %llu %llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200574 found->start, found->end, start, end);
Chris Masond1310b22008-01-24 16:13:08 -0500575 return -EEXIST;
576 }
577 merge_state(tree, state);
578 return 0;
579}
580
581/*
582 * split a given extent state struct in two, inserting the preallocated
583 * struct 'prealloc' as the newly created second half. 'split' indicates an
584 * offset inside 'orig' where it should be split.
585 *
586 * Before calling,
587 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
588 * are two extent state structs in the tree:
589 * prealloc: [orig->start, split - 1]
590 * orig: [ split, orig->end ]
591 *
592 * The tree locks are not taken by this function. They need to be held
593 * by the caller.
594 */
595static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
596 struct extent_state *prealloc, u64 split)
597{
598 struct rb_node *node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400599
Nikolay Borisovabbb55f2018-11-01 14:09:53 +0200600 if (tree->private_data && is_data_inode(tree->private_data))
601 btrfs_split_delalloc_extent(tree->private_data, orig, split);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400602
Chris Masond1310b22008-01-24 16:13:08 -0500603 prealloc->start = orig->start;
604 prealloc->end = split - 1;
605 prealloc->state = orig->state;
606 orig->start = split;
607
Filipe Mananaf2071b22014-02-12 15:05:53 +0000608 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
609 &prealloc->rb_node, NULL, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500610 if (node) {
Chris Masond1310b22008-01-24 16:13:08 -0500611 free_extent_state(prealloc);
612 return -EEXIST;
613 }
614 return 0;
615}
616
Li Zefancdc6a392012-03-12 16:39:48 +0800617static struct extent_state *next_state(struct extent_state *state)
618{
619 struct rb_node *next = rb_next(&state->rb_node);
620 if (next)
621 return rb_entry(next, struct extent_state, rb_node);
622 else
623 return NULL;
624}
625
Chris Masond1310b22008-01-24 16:13:08 -0500626/*
627 * utility function to clear some bits in an extent state struct.
Andrea Gelmini52042d82018-11-28 12:05:13 +0100628 * it will optionally wake up anyone waiting on this state (wake == 1).
Chris Masond1310b22008-01-24 16:13:08 -0500629 *
630 * If no bits are set on the state struct after clearing things, the
631 * struct is freed and removed from the tree
632 */
Li Zefancdc6a392012-03-12 16:39:48 +0800633static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
634 struct extent_state *state,
Qu Wenruof97e27e2020-11-13 20:51:40 +0800635 u32 *bits, int wake,
Qu Wenruofefdc552015-10-12 15:35:38 +0800636 struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500637{
Li Zefancdc6a392012-03-12 16:39:48 +0800638 struct extent_state *next;
Qu Wenruof97e27e2020-11-13 20:51:40 +0800639 u32 bits_to_clear = *bits & ~EXTENT_CTLBITS;
David Sterba57599c72018-03-01 17:56:34 +0100640 int ret;
Chris Masond1310b22008-01-24 16:13:08 -0500641
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400642 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500643 u64 range = state->end - state->start + 1;
644 WARN_ON(range > tree->dirty_bytes);
645 tree->dirty_bytes -= range;
646 }
Nikolay Borisova36bb5f2018-11-01 14:09:51 +0200647
648 if (tree->private_data && is_data_inode(tree->private_data))
649 btrfs_clear_delalloc_extent(tree->private_data, state, bits);
650
David Sterba57599c72018-03-01 17:56:34 +0100651 ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
652 BUG_ON(ret < 0);
Josef Bacik32c00af2009-10-08 13:34:05 -0400653 state->state &= ~bits_to_clear;
Chris Masond1310b22008-01-24 16:13:08 -0500654 if (wake)
655 wake_up(&state->wq);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400656 if (state->state == 0) {
Li Zefancdc6a392012-03-12 16:39:48 +0800657 next = next_state(state);
Filipe Manana27a35072014-07-06 20:09:59 +0100658 if (extent_state_in_tree(state)) {
Chris Masond1310b22008-01-24 16:13:08 -0500659 rb_erase(&state->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100660 RB_CLEAR_NODE(&state->rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500661 free_extent_state(state);
662 } else {
663 WARN_ON(1);
664 }
665 } else {
666 merge_state(tree, state);
Li Zefancdc6a392012-03-12 16:39:48 +0800667 next = next_state(state);
Chris Masond1310b22008-01-24 16:13:08 -0500668 }
Li Zefancdc6a392012-03-12 16:39:48 +0800669 return next;
Chris Masond1310b22008-01-24 16:13:08 -0500670}
671
Xiao Guangrong82337672011-04-20 06:44:57 +0000672static struct extent_state *
673alloc_extent_state_atomic(struct extent_state *prealloc)
674{
675 if (!prealloc)
676 prealloc = alloc_extent_state(GFP_ATOMIC);
677
678 return prealloc;
679}
680
Eric Sandeen48a3b632013-04-25 20:41:01 +0000681static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400682{
Su Yue29b665c2021-01-03 17:28:03 +0800683 btrfs_panic(tree->fs_info, err,
David Sterba05912a32018-07-18 19:23:45 +0200684 "locking error: extent tree was modified by another thread while locked");
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400685}
686
Chris Masond1310b22008-01-24 16:13:08 -0500687/*
688 * clear some bits on a range in the tree. This may require splitting
689 * or inserting elements in the tree, so the gfp mask is used to
690 * indicate which allocations or sleeping are allowed.
691 *
692 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
693 * the given range from the tree regardless of state (ie for truncate).
694 *
695 * the range [start, end] is inclusive.
696 *
Jeff Mahoney6763af82012-03-01 14:56:29 +0100697 * This takes the tree lock, and returns 0 on success and < 0 on error.
Chris Masond1310b22008-01-24 16:13:08 -0500698 */
David Sterba66b0c882017-10-31 16:30:47 +0100699int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +0800700 u32 bits, int wake, int delete,
701 struct extent_state **cached_state,
702 gfp_t mask, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500703{
704 struct extent_state *state;
Chris Mason2c64c532009-09-02 15:04:12 -0400705 struct extent_state *cached;
Chris Masond1310b22008-01-24 16:13:08 -0500706 struct extent_state *prealloc = NULL;
707 struct rb_node *node;
Yan Zheng5c939df2009-05-27 09:16:03 -0400708 u64 last_end;
Chris Masond1310b22008-01-24 16:13:08 -0500709 int err;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000710 int clear = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500711
Josef Bacika5dee372013-12-13 10:02:44 -0500712 btrfs_debug_check_extent_io_range(tree, start, end);
Qu Wenruoa1d19842019-03-01 10:48:00 +0800713 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
David Sterba8d599ae2013-04-30 15:22:23 +0000714
Josef Bacik7ee9e442013-06-21 16:37:03 -0400715 if (bits & EXTENT_DELALLOC)
716 bits |= EXTENT_NORESERVE;
717
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400718 if (delete)
719 bits |= ~EXTENT_CTLBITS;
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400720
Nikolay Borisov88826792019-03-14 15:28:31 +0200721 if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
Josef Bacik2ac55d42010-02-03 19:33:23 +0000722 clear = 1;
Chris Masond1310b22008-01-24 16:13:08 -0500723again:
Mel Gormand0164ad2015-11-06 16:28:21 -0800724 if (!prealloc && gfpflags_allow_blocking(mask)) {
Filipe Mananac7bc6312014-11-03 14:12:57 +0000725 /*
726 * Don't care for allocation failure here because we might end
727 * up not needing the pre-allocated extent state at all, which
728 * is the case if we only have in the tree extent states that
729 * cover our input range and don't cover too any other range.
730 * If we end up needing a new extent state we allocate it later.
731 */
Chris Masond1310b22008-01-24 16:13:08 -0500732 prealloc = alloc_extent_state(mask);
Chris Masond1310b22008-01-24 16:13:08 -0500733 }
734
Chris Masoncad321a2008-12-17 14:51:42 -0500735 spin_lock(&tree->lock);
Chris Mason2c64c532009-09-02 15:04:12 -0400736 if (cached_state) {
737 cached = *cached_state;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000738
739 if (clear) {
740 *cached_state = NULL;
741 cached_state = NULL;
742 }
743
Filipe Manana27a35072014-07-06 20:09:59 +0100744 if (cached && extent_state_in_tree(cached) &&
745 cached->start <= start && cached->end > start) {
Josef Bacik2ac55d42010-02-03 19:33:23 +0000746 if (clear)
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200747 refcount_dec(&cached->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400748 state = cached;
Chris Mason42daec22009-09-23 19:51:09 -0400749 goto hit_next;
Chris Mason2c64c532009-09-02 15:04:12 -0400750 }
Josef Bacik2ac55d42010-02-03 19:33:23 +0000751 if (clear)
752 free_extent_state(cached);
Chris Mason2c64c532009-09-02 15:04:12 -0400753 }
Chris Masond1310b22008-01-24 16:13:08 -0500754 /*
755 * this search will find the extents that end after
756 * our range starts
757 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500758 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500759 if (!node)
760 goto out;
761 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason2c64c532009-09-02 15:04:12 -0400762hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500763 if (state->start > end)
764 goto out;
765 WARN_ON(state->end < start);
Yan Zheng5c939df2009-05-27 09:16:03 -0400766 last_end = state->end;
Chris Masond1310b22008-01-24 16:13:08 -0500767
Liu Bo04493142012-02-16 18:34:37 +0800768 /* the state doesn't have the wanted bits, go ahead */
Li Zefancdc6a392012-03-12 16:39:48 +0800769 if (!(state->state & bits)) {
770 state = next_state(state);
Liu Bo04493142012-02-16 18:34:37 +0800771 goto next;
Li Zefancdc6a392012-03-12 16:39:48 +0800772 }
Liu Bo04493142012-02-16 18:34:37 +0800773
Chris Masond1310b22008-01-24 16:13:08 -0500774 /*
775 * | ---- desired range ---- |
776 * | state | or
777 * | ------------- state -------------- |
778 *
779 * We need to split the extent we found, and may flip
780 * bits on second half.
781 *
782 * If the extent we found extends past our range, we
783 * just split and search again. It'll get split again
784 * the next time though.
785 *
786 * If the extent we found is inside our range, we clear
787 * the desired bit on it.
788 */
789
790 if (state->start < start) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000791 prealloc = alloc_extent_state_atomic(prealloc);
792 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500793 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400794 if (err)
795 extent_io_tree_panic(tree, err);
796
Chris Masond1310b22008-01-24 16:13:08 -0500797 prealloc = NULL;
798 if (err)
799 goto out;
800 if (state->end <= end) {
Qu Wenruofefdc552015-10-12 15:35:38 +0800801 state = clear_state_bit(tree, state, &bits, wake,
802 changeset);
Liu Bod1ac6e42012-05-10 18:10:39 +0800803 goto next;
Chris Masond1310b22008-01-24 16:13:08 -0500804 }
805 goto search_again;
806 }
807 /*
808 * | ---- desired range ---- |
809 * | state |
810 * We need to split the extent, and clear the bit
811 * on the first half
812 */
813 if (state->start <= end && state->end > end) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000814 prealloc = alloc_extent_state_atomic(prealloc);
815 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500816 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400817 if (err)
818 extent_io_tree_panic(tree, err);
819
Chris Masond1310b22008-01-24 16:13:08 -0500820 if (wake)
821 wake_up(&state->wq);
Chris Mason42daec22009-09-23 19:51:09 -0400822
Qu Wenruofefdc552015-10-12 15:35:38 +0800823 clear_state_bit(tree, prealloc, &bits, wake, changeset);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400824
Chris Masond1310b22008-01-24 16:13:08 -0500825 prealloc = NULL;
826 goto out;
827 }
Chris Mason42daec22009-09-23 19:51:09 -0400828
Qu Wenruofefdc552015-10-12 15:35:38 +0800829 state = clear_state_bit(tree, state, &bits, wake, changeset);
Liu Bo04493142012-02-16 18:34:37 +0800830next:
Yan Zheng5c939df2009-05-27 09:16:03 -0400831 if (last_end == (u64)-1)
832 goto out;
833 start = last_end + 1;
Li Zefancdc6a392012-03-12 16:39:48 +0800834 if (start <= end && state && !need_resched())
Liu Bo692e5752012-02-16 18:34:36 +0800835 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500836
837search_again:
838 if (start > end)
839 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500840 spin_unlock(&tree->lock);
Mel Gormand0164ad2015-11-06 16:28:21 -0800841 if (gfpflags_allow_blocking(mask))
Chris Masond1310b22008-01-24 16:13:08 -0500842 cond_resched();
843 goto again;
David Sterba7ab5cb22016-04-27 01:02:15 +0200844
845out:
846 spin_unlock(&tree->lock);
847 if (prealloc)
848 free_extent_state(prealloc);
849
850 return 0;
851
Chris Masond1310b22008-01-24 16:13:08 -0500852}
Chris Masond1310b22008-01-24 16:13:08 -0500853
Jeff Mahoney143bede2012-03-01 14:56:26 +0100854static void wait_on_state(struct extent_io_tree *tree,
855 struct extent_state *state)
Christoph Hellwig641f5212008-12-02 06:36:10 -0500856 __releases(tree->lock)
857 __acquires(tree->lock)
Chris Masond1310b22008-01-24 16:13:08 -0500858{
859 DEFINE_WAIT(wait);
860 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
Chris Masoncad321a2008-12-17 14:51:42 -0500861 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500862 schedule();
Chris Masoncad321a2008-12-17 14:51:42 -0500863 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500864 finish_wait(&state->wq, &wait);
Chris Masond1310b22008-01-24 16:13:08 -0500865}
866
867/*
868 * waits for one or more bits to clear on a range in the state tree.
869 * The range [start, end] is inclusive.
870 * The tree lock is taken by this function
871 */
David Sterba41074882013-04-29 13:38:46 +0000872static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +0800873 u32 bits)
Chris Masond1310b22008-01-24 16:13:08 -0500874{
875 struct extent_state *state;
876 struct rb_node *node;
877
Josef Bacika5dee372013-12-13 10:02:44 -0500878 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +0000879
Chris Masoncad321a2008-12-17 14:51:42 -0500880 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500881again:
882 while (1) {
883 /*
884 * this search will find all the extents that end after
885 * our range starts
886 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500887 node = tree_search(tree, start);
Filipe Mananac50d3e72014-03-31 14:53:25 +0100888process_node:
Chris Masond1310b22008-01-24 16:13:08 -0500889 if (!node)
890 break;
891
892 state = rb_entry(node, struct extent_state, rb_node);
893
894 if (state->start > end)
895 goto out;
896
897 if (state->state & bits) {
898 start = state->start;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200899 refcount_inc(&state->refs);
Chris Masond1310b22008-01-24 16:13:08 -0500900 wait_on_state(tree, state);
901 free_extent_state(state);
902 goto again;
903 }
904 start = state->end + 1;
905
906 if (start > end)
907 break;
908
Filipe Mananac50d3e72014-03-31 14:53:25 +0100909 if (!cond_resched_lock(&tree->lock)) {
910 node = rb_next(node);
911 goto process_node;
912 }
Chris Masond1310b22008-01-24 16:13:08 -0500913 }
914out:
Chris Masoncad321a2008-12-17 14:51:42 -0500915 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500916}
Chris Masond1310b22008-01-24 16:13:08 -0500917
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000918static void set_state_bits(struct extent_io_tree *tree,
Chris Masond1310b22008-01-24 16:13:08 -0500919 struct extent_state *state,
Qu Wenruof97e27e2020-11-13 20:51:40 +0800920 u32 *bits, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500921{
Qu Wenruof97e27e2020-11-13 20:51:40 +0800922 u32 bits_to_set = *bits & ~EXTENT_CTLBITS;
David Sterba57599c72018-03-01 17:56:34 +0100923 int ret;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400924
Nikolay Borisove06a1fc2018-11-01 14:09:50 +0200925 if (tree->private_data && is_data_inode(tree->private_data))
926 btrfs_set_delalloc_extent(tree->private_data, state, bits);
927
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400928 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500929 u64 range = state->end - state->start + 1;
930 tree->dirty_bytes += range;
931 }
David Sterba57599c72018-03-01 17:56:34 +0100932 ret = add_extent_changeset(state, bits_to_set, changeset, 1);
933 BUG_ON(ret < 0);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400934 state->state |= bits_to_set;
Chris Masond1310b22008-01-24 16:13:08 -0500935}
936
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100937static void cache_state_if_flags(struct extent_state *state,
938 struct extent_state **cached_ptr,
David Sterba9ee49a042015-01-14 19:52:13 +0100939 unsigned flags)
Chris Mason2c64c532009-09-02 15:04:12 -0400940{
941 if (cached_ptr && !(*cached_ptr)) {
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100942 if (!flags || (state->state & flags)) {
Chris Mason2c64c532009-09-02 15:04:12 -0400943 *cached_ptr = state;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200944 refcount_inc(&state->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400945 }
946 }
947}
948
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100949static void cache_state(struct extent_state *state,
950 struct extent_state **cached_ptr)
951{
952 return cache_state_if_flags(state, cached_ptr,
Nikolay Borisov88826792019-03-14 15:28:31 +0200953 EXTENT_LOCKED | EXTENT_BOUNDARY);
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100954}
955
Chris Masond1310b22008-01-24 16:13:08 -0500956/*
Chris Mason1edbb732009-09-02 13:24:36 -0400957 * set some bits on a range in the tree. This may require allocations or
958 * sleeping, so the gfp mask is used to indicate what is allowed.
Chris Masond1310b22008-01-24 16:13:08 -0500959 *
Chris Mason1edbb732009-09-02 13:24:36 -0400960 * If any of the exclusive bits are set, this will fail with -EEXIST if some
961 * part of the range already has the desired bits set. The start of the
962 * existing range is returned in failed_start in this case.
Chris Masond1310b22008-01-24 16:13:08 -0500963 *
Chris Mason1edbb732009-09-02 13:24:36 -0400964 * [start, end] is inclusive This takes the tree lock.
Chris Masond1310b22008-01-24 16:13:08 -0500965 */
Qu Wenruof97e27e2020-11-13 20:51:40 +0800966int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
967 u32 exclusive_bits, u64 *failed_start,
Nikolay Borisov1cab5e72020-11-05 11:08:00 +0200968 struct extent_state **cached_state, gfp_t mask,
969 struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500970{
971 struct extent_state *state;
972 struct extent_state *prealloc = NULL;
973 struct rb_node *node;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000974 struct rb_node **p;
975 struct rb_node *parent;
Chris Masond1310b22008-01-24 16:13:08 -0500976 int err = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500977 u64 last_start;
978 u64 last_end;
Chris Mason42daec22009-09-23 19:51:09 -0400979
Josef Bacika5dee372013-12-13 10:02:44 -0500980 btrfs_debug_check_extent_io_range(tree, start, end);
Qu Wenruoa1d19842019-03-01 10:48:00 +0800981 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
David Sterba8d599ae2013-04-30 15:22:23 +0000982
Qu Wenruo3f6bb4a2020-10-21 14:24:51 +0800983 if (exclusive_bits)
984 ASSERT(failed_start);
985 else
986 ASSERT(failed_start == NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500987again:
Mel Gormand0164ad2015-11-06 16:28:21 -0800988 if (!prealloc && gfpflags_allow_blocking(mask)) {
David Sterba059f7912016-04-27 01:03:45 +0200989 /*
990 * Don't care for allocation failure here because we might end
991 * up not needing the pre-allocated extent state at all, which
992 * is the case if we only have in the tree extent states that
993 * cover our input range and don't cover too any other range.
994 * If we end up needing a new extent state we allocate it later.
995 */
Chris Masond1310b22008-01-24 16:13:08 -0500996 prealloc = alloc_extent_state(mask);
Chris Masond1310b22008-01-24 16:13:08 -0500997 }
998
Chris Masoncad321a2008-12-17 14:51:42 -0500999 spin_lock(&tree->lock);
Chris Mason9655d292009-09-02 15:22:30 -04001000 if (cached_state && *cached_state) {
1001 state = *cached_state;
Josef Bacikdf98b6e2011-06-20 14:53:48 -04001002 if (state->start <= start && state->end > start &&
Filipe Manana27a35072014-07-06 20:09:59 +01001003 extent_state_in_tree(state)) {
Chris Mason9655d292009-09-02 15:22:30 -04001004 node = &state->rb_node;
1005 goto hit_next;
1006 }
1007 }
Chris Masond1310b22008-01-24 16:13:08 -05001008 /*
1009 * this search will find all the extents that end after
1010 * our range starts.
1011 */
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001012 node = tree_search_for_insert(tree, start, &p, &parent);
Chris Masond1310b22008-01-24 16:13:08 -05001013 if (!node) {
Xiao Guangrong82337672011-04-20 06:44:57 +00001014 prealloc = alloc_extent_state_atomic(prealloc);
1015 BUG_ON(!prealloc);
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001016 err = insert_state(tree, prealloc, start, end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001017 &p, &parent, &bits, changeset);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001018 if (err)
1019 extent_io_tree_panic(tree, err);
1020
Filipe David Borba Mananac42ac0b2013-11-26 15:01:34 +00001021 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001022 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05001023 goto out;
1024 }
Chris Masond1310b22008-01-24 16:13:08 -05001025 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason40431d62009-08-05 12:57:59 -04001026hit_next:
Chris Masond1310b22008-01-24 16:13:08 -05001027 last_start = state->start;
1028 last_end = state->end;
1029
1030 /*
1031 * | ---- desired range ---- |
1032 * | state |
1033 *
1034 * Just lock what we found and keep going
1035 */
1036 if (state->start == start && state->end <= end) {
Chris Mason1edbb732009-09-02 13:24:36 -04001037 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001038 *failed_start = state->start;
1039 err = -EEXIST;
1040 goto out;
1041 }
Chris Mason42daec22009-09-23 19:51:09 -04001042
Qu Wenruod38ed272015-10-12 14:53:37 +08001043 set_state_bits(tree, state, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -04001044 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001045 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -04001046 if (last_end == (u64)-1)
1047 goto out;
1048 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001049 state = next_state(state);
1050 if (start < end && state && state->start == start &&
1051 !need_resched())
1052 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -05001053 goto search_again;
1054 }
1055
1056 /*
1057 * | ---- desired range ---- |
1058 * | state |
1059 * or
1060 * | ------------- state -------------- |
1061 *
1062 * We need to split the extent we found, and may flip bits on
1063 * second half.
1064 *
1065 * If the extent we found extends past our
1066 * range, we just split and search again. It'll get split
1067 * again the next time though.
1068 *
1069 * If the extent we found is inside our range, we set the
1070 * desired bit on it.
1071 */
1072 if (state->start < start) {
Chris Mason1edbb732009-09-02 13:24:36 -04001073 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001074 *failed_start = start;
1075 err = -EEXIST;
1076 goto out;
1077 }
Xiao Guangrong82337672011-04-20 06:44:57 +00001078
Filipe Manana55ffaab2020-02-13 10:20:02 +00001079 /*
1080 * If this extent already has all the bits we want set, then
1081 * skip it, not necessary to split it or do anything with it.
1082 */
1083 if ((state->state & bits) == bits) {
1084 start = state->end + 1;
1085 cache_state(state, cached_state);
1086 goto search_again;
1087 }
1088
Xiao Guangrong82337672011-04-20 06:44:57 +00001089 prealloc = alloc_extent_state_atomic(prealloc);
1090 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -05001091 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001092 if (err)
1093 extent_io_tree_panic(tree, err);
1094
Chris Masond1310b22008-01-24 16:13:08 -05001095 prealloc = NULL;
1096 if (err)
1097 goto out;
1098 if (state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001099 set_state_bits(tree, state, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -04001100 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001101 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -04001102 if (last_end == (u64)-1)
1103 goto out;
1104 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001105 state = next_state(state);
1106 if (start < end && state && state->start == start &&
1107 !need_resched())
1108 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -05001109 }
1110 goto search_again;
1111 }
1112 /*
1113 * | ---- desired range ---- |
1114 * | state | or | state |
1115 *
1116 * There's a hole, we need to insert something in it and
1117 * ignore the extent we found.
1118 */
1119 if (state->start > start) {
1120 u64 this_end;
1121 if (end < last_start)
1122 this_end = end;
1123 else
Chris Masond3977122009-01-05 21:25:51 -05001124 this_end = last_start - 1;
Xiao Guangrong82337672011-04-20 06:44:57 +00001125
1126 prealloc = alloc_extent_state_atomic(prealloc);
1127 BUG_ON(!prealloc);
Xiao Guangrongc7f895a2011-04-20 06:45:49 +00001128
1129 /*
1130 * Avoid to free 'prealloc' if it can be merged with
1131 * the later extent.
1132 */
Chris Masond1310b22008-01-24 16:13:08 -05001133 err = insert_state(tree, prealloc, start, this_end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001134 NULL, NULL, &bits, changeset);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001135 if (err)
1136 extent_io_tree_panic(tree, err);
1137
Chris Mason2c64c532009-09-02 15:04:12 -04001138 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001139 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05001140 start = this_end + 1;
1141 goto search_again;
1142 }
1143 /*
1144 * | ---- desired range ---- |
1145 * | state |
1146 * We need to split the extent, and set the bit
1147 * on the first half
1148 */
1149 if (state->start <= end && state->end > end) {
Chris Mason1edbb732009-09-02 13:24:36 -04001150 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001151 *failed_start = start;
1152 err = -EEXIST;
1153 goto out;
1154 }
Xiao Guangrong82337672011-04-20 06:44:57 +00001155
1156 prealloc = alloc_extent_state_atomic(prealloc);
1157 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -05001158 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001159 if (err)
1160 extent_io_tree_panic(tree, err);
Chris Masond1310b22008-01-24 16:13:08 -05001161
Qu Wenruod38ed272015-10-12 14:53:37 +08001162 set_state_bits(tree, prealloc, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -04001163 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001164 merge_state(tree, prealloc);
1165 prealloc = NULL;
1166 goto out;
1167 }
1168
David Sterbab5a4ba142016-04-27 01:02:15 +02001169search_again:
1170 if (start > end)
1171 goto out;
1172 spin_unlock(&tree->lock);
1173 if (gfpflags_allow_blocking(mask))
1174 cond_resched();
1175 goto again;
Chris Masond1310b22008-01-24 16:13:08 -05001176
1177out:
Chris Masoncad321a2008-12-17 14:51:42 -05001178 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001179 if (prealloc)
1180 free_extent_state(prealloc);
1181
1182 return err;
1183
Chris Masond1310b22008-01-24 16:13:08 -05001184}
Chris Masond1310b22008-01-24 16:13:08 -05001185
Josef Bacik462d6fa2011-09-26 13:56:12 -04001186/**
Liu Bo10983f22012-07-11 15:26:19 +08001187 * convert_extent_bit - convert all bits in a given range from one bit to
1188 * another
Josef Bacik462d6fa2011-09-26 13:56:12 -04001189 * @tree: the io tree to search
1190 * @start: the start offset in bytes
1191 * @end: the end offset in bytes (inclusive)
1192 * @bits: the bits to set in this range
1193 * @clear_bits: the bits to clear in this range
Josef Bacike6138872012-09-27 17:07:30 -04001194 * @cached_state: state that we're going to cache
Josef Bacik462d6fa2011-09-26 13:56:12 -04001195 *
1196 * This will go through and set bits for the given range. If any states exist
1197 * already in this range they are set with the given bit and cleared of the
1198 * clear_bits. This is only meant to be used by things that are mergeable, ie
1199 * converting from say DELALLOC to DIRTY. This is not meant to be used with
1200 * boundary bits like LOCK.
David Sterba210aa272016-04-26 23:54:39 +02001201 *
1202 * All allocations are done with GFP_NOFS.
Josef Bacik462d6fa2011-09-26 13:56:12 -04001203 */
1204int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001205 u32 bits, u32 clear_bits,
David Sterba210aa272016-04-26 23:54:39 +02001206 struct extent_state **cached_state)
Josef Bacik462d6fa2011-09-26 13:56:12 -04001207{
1208 struct extent_state *state;
1209 struct extent_state *prealloc = NULL;
1210 struct rb_node *node;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001211 struct rb_node **p;
1212 struct rb_node *parent;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001213 int err = 0;
1214 u64 last_start;
1215 u64 last_end;
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001216 bool first_iteration = true;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001217
Josef Bacika5dee372013-12-13 10:02:44 -05001218 btrfs_debug_check_extent_io_range(tree, start, end);
Qu Wenruoa1d19842019-03-01 10:48:00 +08001219 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1220 clear_bits);
David Sterba8d599ae2013-04-30 15:22:23 +00001221
Josef Bacik462d6fa2011-09-26 13:56:12 -04001222again:
David Sterba210aa272016-04-26 23:54:39 +02001223 if (!prealloc) {
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001224 /*
1225 * Best effort, don't worry if extent state allocation fails
1226 * here for the first iteration. We might have a cached state
1227 * that matches exactly the target range, in which case no
1228 * extent state allocations are needed. We'll only know this
1229 * after locking the tree.
1230 */
David Sterba210aa272016-04-26 23:54:39 +02001231 prealloc = alloc_extent_state(GFP_NOFS);
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001232 if (!prealloc && !first_iteration)
Josef Bacik462d6fa2011-09-26 13:56:12 -04001233 return -ENOMEM;
1234 }
1235
1236 spin_lock(&tree->lock);
Josef Bacike6138872012-09-27 17:07:30 -04001237 if (cached_state && *cached_state) {
1238 state = *cached_state;
1239 if (state->start <= start && state->end > start &&
Filipe Manana27a35072014-07-06 20:09:59 +01001240 extent_state_in_tree(state)) {
Josef Bacike6138872012-09-27 17:07:30 -04001241 node = &state->rb_node;
1242 goto hit_next;
1243 }
1244 }
1245
Josef Bacik462d6fa2011-09-26 13:56:12 -04001246 /*
1247 * this search will find all the extents that end after
1248 * our range starts.
1249 */
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001250 node = tree_search_for_insert(tree, start, &p, &parent);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001251 if (!node) {
1252 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001253 if (!prealloc) {
1254 err = -ENOMEM;
1255 goto out;
1256 }
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001257 err = insert_state(tree, prealloc, start, end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001258 &p, &parent, &bits, NULL);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001259 if (err)
1260 extent_io_tree_panic(tree, err);
Filipe David Borba Mananac42ac0b2013-11-26 15:01:34 +00001261 cache_state(prealloc, cached_state);
1262 prealloc = NULL;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001263 goto out;
1264 }
1265 state = rb_entry(node, struct extent_state, rb_node);
1266hit_next:
1267 last_start = state->start;
1268 last_end = state->end;
1269
1270 /*
1271 * | ---- desired range ---- |
1272 * | state |
1273 *
1274 * Just lock what we found and keep going
1275 */
1276 if (state->start == start && state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001277 set_state_bits(tree, state, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001278 cache_state(state, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001279 state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001280 if (last_end == (u64)-1)
1281 goto out;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001282 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001283 if (start < end && state && state->start == start &&
1284 !need_resched())
1285 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001286 goto search_again;
1287 }
1288
1289 /*
1290 * | ---- desired range ---- |
1291 * | state |
1292 * or
1293 * | ------------- state -------------- |
1294 *
1295 * We need to split the extent we found, and may flip bits on
1296 * second half.
1297 *
1298 * If the extent we found extends past our
1299 * range, we just split and search again. It'll get split
1300 * again the next time though.
1301 *
1302 * If the extent we found is inside our range, we set the
1303 * desired bit on it.
1304 */
1305 if (state->start < start) {
1306 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001307 if (!prealloc) {
1308 err = -ENOMEM;
1309 goto out;
1310 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001311 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001312 if (err)
1313 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001314 prealloc = NULL;
1315 if (err)
1316 goto out;
1317 if (state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001318 set_state_bits(tree, state, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001319 cache_state(state, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001320 state = clear_state_bit(tree, state, &clear_bits, 0,
1321 NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001322 if (last_end == (u64)-1)
1323 goto out;
1324 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001325 if (start < end && state && state->start == start &&
1326 !need_resched())
1327 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001328 }
1329 goto search_again;
1330 }
1331 /*
1332 * | ---- desired range ---- |
1333 * | state | or | state |
1334 *
1335 * There's a hole, we need to insert something in it and
1336 * ignore the extent we found.
1337 */
1338 if (state->start > start) {
1339 u64 this_end;
1340 if (end < last_start)
1341 this_end = end;
1342 else
1343 this_end = last_start - 1;
1344
1345 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001346 if (!prealloc) {
1347 err = -ENOMEM;
1348 goto out;
1349 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001350
1351 /*
1352 * Avoid to free 'prealloc' if it can be merged with
1353 * the later extent.
1354 */
1355 err = insert_state(tree, prealloc, start, this_end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001356 NULL, NULL, &bits, NULL);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001357 if (err)
1358 extent_io_tree_panic(tree, err);
Josef Bacike6138872012-09-27 17:07:30 -04001359 cache_state(prealloc, cached_state);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001360 prealloc = NULL;
1361 start = this_end + 1;
1362 goto search_again;
1363 }
1364 /*
1365 * | ---- desired range ---- |
1366 * | state |
1367 * We need to split the extent, and set the bit
1368 * on the first half
1369 */
1370 if (state->start <= end && state->end > end) {
1371 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001372 if (!prealloc) {
1373 err = -ENOMEM;
1374 goto out;
1375 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001376
1377 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001378 if (err)
1379 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001380
Qu Wenruod38ed272015-10-12 14:53:37 +08001381 set_state_bits(tree, prealloc, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001382 cache_state(prealloc, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001383 clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001384 prealloc = NULL;
1385 goto out;
1386 }
1387
Josef Bacik462d6fa2011-09-26 13:56:12 -04001388search_again:
1389 if (start > end)
1390 goto out;
1391 spin_unlock(&tree->lock);
David Sterba210aa272016-04-26 23:54:39 +02001392 cond_resched();
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001393 first_iteration = false;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001394 goto again;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001395
1396out:
1397 spin_unlock(&tree->lock);
1398 if (prealloc)
1399 free_extent_state(prealloc);
1400
1401 return err;
1402}
1403
Chris Masond1310b22008-01-24 16:13:08 -05001404/* wrappers around set/clear extent bit */
Qu Wenruod38ed272015-10-12 14:53:37 +08001405int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001406 u32 bits, struct extent_changeset *changeset)
Qu Wenruod38ed272015-10-12 14:53:37 +08001407{
1408 /*
1409 * We don't support EXTENT_LOCKED yet, as current changeset will
1410 * record any bits changed, so for EXTENT_LOCKED case, it will
1411 * either fail with -EEXIST or changeset will record the whole
1412 * range.
1413 */
1414 BUG_ON(bits & EXTENT_LOCKED);
1415
Nikolay Borisov1cab5e72020-11-05 11:08:00 +02001416 return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1417 changeset);
Qu Wenruod38ed272015-10-12 14:53:37 +08001418}
1419
Nikolay Borisov4ca73652019-03-27 14:24:10 +02001420int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001421 u32 bits)
Nikolay Borisov4ca73652019-03-27 14:24:10 +02001422{
Nikolay Borisov1cab5e72020-11-05 11:08:00 +02001423 return set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1424 GFP_NOWAIT, NULL);
Nikolay Borisov4ca73652019-03-27 14:24:10 +02001425}
1426
Qu Wenruofefdc552015-10-12 15:35:38 +08001427int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001428 u32 bits, int wake, int delete,
David Sterbaae0f1622017-10-31 16:37:52 +01001429 struct extent_state **cached)
Qu Wenruofefdc552015-10-12 15:35:38 +08001430{
1431 return __clear_extent_bit(tree, start, end, bits, wake, delete,
David Sterbaae0f1622017-10-31 16:37:52 +01001432 cached, GFP_NOFS, NULL);
Qu Wenruofefdc552015-10-12 15:35:38 +08001433}
1434
Qu Wenruofefdc552015-10-12 15:35:38 +08001435int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001436 u32 bits, struct extent_changeset *changeset)
Qu Wenruofefdc552015-10-12 15:35:38 +08001437{
1438 /*
1439 * Don't support EXTENT_LOCKED case, same reason as
1440 * set_record_extent_bits().
1441 */
1442 BUG_ON(bits & EXTENT_LOCKED);
1443
David Sterbaf734c442016-04-26 23:54:39 +02001444 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
Qu Wenruofefdc552015-10-12 15:35:38 +08001445 changeset);
1446}
1447
Chris Masond352ac62008-09-29 15:18:18 -04001448/*
1449 * either insert or lock state struct between start and end use mask to tell
1450 * us if waiting is desired.
1451 */
Chris Mason1edbb732009-09-02 13:24:36 -04001452int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaff13db42015-12-03 14:30:40 +01001453 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001454{
1455 int err;
1456 u64 failed_start;
David Sterba9ee49a042015-01-14 19:52:13 +01001457
Chris Masond1310b22008-01-24 16:13:08 -05001458 while (1) {
Nikolay Borisov1cab5e72020-11-05 11:08:00 +02001459 err = set_extent_bit(tree, start, end, EXTENT_LOCKED,
1460 EXTENT_LOCKED, &failed_start,
1461 cached_state, GFP_NOFS, NULL);
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001462 if (err == -EEXIST) {
Chris Masond1310b22008-01-24 16:13:08 -05001463 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1464 start = failed_start;
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001465 } else
Chris Masond1310b22008-01-24 16:13:08 -05001466 break;
Chris Masond1310b22008-01-24 16:13:08 -05001467 WARN_ON(start > end);
1468 }
1469 return err;
1470}
Chris Masond1310b22008-01-24 16:13:08 -05001471
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001472int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
Josef Bacik25179202008-10-29 14:49:05 -04001473{
1474 int err;
1475 u64 failed_start;
1476
Nikolay Borisov1cab5e72020-11-05 11:08:00 +02001477 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1478 &failed_start, NULL, GFP_NOFS, NULL);
Yan Zheng66435582008-10-30 14:19:50 -04001479 if (err == -EEXIST) {
1480 if (failed_start > start)
1481 clear_extent_bit(tree, start, failed_start - 1,
David Sterbaae0f1622017-10-31 16:37:52 +01001482 EXTENT_LOCKED, 1, 0, NULL);
Josef Bacik25179202008-10-29 14:49:05 -04001483 return 0;
Yan Zheng66435582008-10-30 14:19:50 -04001484 }
Josef Bacik25179202008-10-29 14:49:05 -04001485 return 1;
1486}
Josef Bacik25179202008-10-29 14:49:05 -04001487
David Sterbabd1fa4f2015-12-03 13:08:59 +01001488void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
Chris Mason4adaa612013-03-26 13:07:00 -04001489{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001490 unsigned long index = start >> PAGE_SHIFT;
1491 unsigned long end_index = end >> PAGE_SHIFT;
Chris Mason4adaa612013-03-26 13:07:00 -04001492 struct page *page;
1493
1494 while (index <= end_index) {
1495 page = find_get_page(inode->i_mapping, index);
1496 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1497 clear_page_dirty_for_io(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001498 put_page(page);
Chris Mason4adaa612013-03-26 13:07:00 -04001499 index++;
1500 }
Chris Mason4adaa612013-03-26 13:07:00 -04001501}
1502
David Sterbaf6311572015-12-03 13:08:59 +01001503void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
Chris Mason4adaa612013-03-26 13:07:00 -04001504{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001505 unsigned long index = start >> PAGE_SHIFT;
1506 unsigned long end_index = end >> PAGE_SHIFT;
Chris Mason4adaa612013-03-26 13:07:00 -04001507 struct page *page;
1508
1509 while (index <= end_index) {
1510 page = find_get_page(inode->i_mapping, index);
1511 BUG_ON(!page); /* Pages should be in the extent_io_tree */
Chris Mason4adaa612013-03-26 13:07:00 -04001512 __set_page_dirty_nobuffers(page);
Konstantin Khebnikov8d386332015-02-11 15:26:55 -08001513 account_page_redirty(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001514 put_page(page);
Chris Mason4adaa612013-03-26 13:07:00 -04001515 index++;
1516 }
Chris Mason4adaa612013-03-26 13:07:00 -04001517}
1518
Chris Masond352ac62008-09-29 15:18:18 -04001519/* find the first state struct with 'bits' set after 'start', and
1520 * return it. tree->lock must be held. NULL will returned if
1521 * nothing was found after 'start'
1522 */
Eric Sandeen48a3b632013-04-25 20:41:01 +00001523static struct extent_state *
Qu Wenruof97e27e2020-11-13 20:51:40 +08001524find_first_extent_bit_state(struct extent_io_tree *tree, u64 start, u32 bits)
Chris Masond7fc6402008-02-18 12:12:38 -05001525{
1526 struct rb_node *node;
1527 struct extent_state *state;
1528
1529 /*
1530 * this search will find all the extents that end after
1531 * our range starts.
1532 */
1533 node = tree_search(tree, start);
Chris Masond3977122009-01-05 21:25:51 -05001534 if (!node)
Chris Masond7fc6402008-02-18 12:12:38 -05001535 goto out;
Chris Masond7fc6402008-02-18 12:12:38 -05001536
Chris Masond3977122009-01-05 21:25:51 -05001537 while (1) {
Chris Masond7fc6402008-02-18 12:12:38 -05001538 state = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -05001539 if (state->end >= start && (state->state & bits))
Chris Masond7fc6402008-02-18 12:12:38 -05001540 return state;
Chris Masond3977122009-01-05 21:25:51 -05001541
Chris Masond7fc6402008-02-18 12:12:38 -05001542 node = rb_next(node);
1543 if (!node)
1544 break;
1545 }
1546out:
1547 return NULL;
1548}
Chris Masond7fc6402008-02-18 12:12:38 -05001549
Chris Masond352ac62008-09-29 15:18:18 -04001550/*
Qu Wenruo03509b72020-10-21 14:24:50 +08001551 * Find the first offset in the io tree with one or more @bits set.
Xiao Guangrong69261c42011-07-14 03:19:45 +00001552 *
Qu Wenruo03509b72020-10-21 14:24:50 +08001553 * Note: If there are multiple bits set in @bits, any of them will match.
1554 *
1555 * Return 0 if we find something, and update @start_ret and @end_ret.
1556 * Return 1 if we found nothing.
Xiao Guangrong69261c42011-07-14 03:19:45 +00001557 */
1558int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001559 u64 *start_ret, u64 *end_ret, u32 bits,
Josef Bacike6138872012-09-27 17:07:30 -04001560 struct extent_state **cached_state)
Xiao Guangrong69261c42011-07-14 03:19:45 +00001561{
1562 struct extent_state *state;
1563 int ret = 1;
1564
1565 spin_lock(&tree->lock);
Josef Bacike6138872012-09-27 17:07:30 -04001566 if (cached_state && *cached_state) {
1567 state = *cached_state;
Filipe Manana27a35072014-07-06 20:09:59 +01001568 if (state->end == start - 1 && extent_state_in_tree(state)) {
Liu Bo9688e9a2018-08-23 03:14:53 +08001569 while ((state = next_state(state)) != NULL) {
Josef Bacike6138872012-09-27 17:07:30 -04001570 if (state->state & bits)
1571 goto got_it;
Josef Bacike6138872012-09-27 17:07:30 -04001572 }
1573 free_extent_state(*cached_state);
1574 *cached_state = NULL;
1575 goto out;
1576 }
1577 free_extent_state(*cached_state);
1578 *cached_state = NULL;
1579 }
1580
Xiao Guangrong69261c42011-07-14 03:19:45 +00001581 state = find_first_extent_bit_state(tree, start, bits);
Josef Bacike6138872012-09-27 17:07:30 -04001582got_it:
Xiao Guangrong69261c42011-07-14 03:19:45 +00001583 if (state) {
Filipe Mananae38e2ed2014-10-13 12:28:38 +01001584 cache_state_if_flags(state, cached_state, 0);
Xiao Guangrong69261c42011-07-14 03:19:45 +00001585 *start_ret = state->start;
1586 *end_ret = state->end;
1587 ret = 0;
1588 }
Josef Bacike6138872012-09-27 17:07:30 -04001589out:
Xiao Guangrong69261c42011-07-14 03:19:45 +00001590 spin_unlock(&tree->lock);
1591 return ret;
1592}
1593
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001594/**
Nikolay Borisov3bed2da2021-01-22 11:58:03 +02001595 * Find a contiguous area of bits
1596 *
1597 * @tree: io tree to check
1598 * @start: offset to start the search from
1599 * @start_ret: the first offset we found with the bits set
1600 * @end_ret: the final contiguous range of the bits that were set
1601 * @bits: bits to look for
Josef Bacik41a2ee72020-01-17 09:02:21 -05001602 *
1603 * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
1604 * to set bits appropriately, and then merge them again. During this time it
1605 * will drop the tree->lock, so use this helper if you want to find the actual
1606 * contiguous area for given bits. We will search to the first bit we find, and
1607 * then walk down the tree until we find a non-contiguous area. The area
1608 * returned will be the full contiguous area with the bits set.
1609 */
1610int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001611 u64 *start_ret, u64 *end_ret, u32 bits)
Josef Bacik41a2ee72020-01-17 09:02:21 -05001612{
1613 struct extent_state *state;
1614 int ret = 1;
1615
1616 spin_lock(&tree->lock);
1617 state = find_first_extent_bit_state(tree, start, bits);
1618 if (state) {
1619 *start_ret = state->start;
1620 *end_ret = state->end;
1621 while ((state = next_state(state)) != NULL) {
1622 if (state->start > (*end_ret + 1))
1623 break;
1624 *end_ret = state->end;
1625 }
1626 ret = 0;
1627 }
1628 spin_unlock(&tree->lock);
1629 return ret;
1630}
1631
1632/**
Nikolay Borisov3bed2da2021-01-22 11:58:03 +02001633 * Find the first range that has @bits not set. This range could start before
1634 * @start.
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001635 *
Nikolay Borisov3bed2da2021-01-22 11:58:03 +02001636 * @tree: the tree to search
1637 * @start: offset at/after which the found extent should start
1638 * @start_ret: records the beginning of the range
1639 * @end_ret: records the end of the range (inclusive)
1640 * @bits: the set of bits which must be unset
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001641 *
1642 * Since unallocated range is also considered one which doesn't have the bits
1643 * set it's possible that @end_ret contains -1, this happens in case the range
1644 * spans (last_range_end, end of device]. In this case it's up to the caller to
1645 * trim @end_ret to the appropriate size.
1646 */
1647void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001648 u64 *start_ret, u64 *end_ret, u32 bits)
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001649{
1650 struct extent_state *state;
1651 struct rb_node *node, *prev = NULL, *next;
1652
1653 spin_lock(&tree->lock);
1654
1655 /* Find first extent with bits cleared */
1656 while (1) {
1657 node = __etree_search(tree, start, &next, &prev, NULL, NULL);
Nikolay Borisov5750c372020-01-27 11:59:26 +02001658 if (!node && !next && !prev) {
1659 /*
1660 * Tree is completely empty, send full range and let
1661 * caller deal with it
1662 */
1663 *start_ret = 0;
1664 *end_ret = -1;
1665 goto out;
1666 } else if (!node && !next) {
1667 /*
1668 * We are past the last allocated chunk, set start at
1669 * the end of the last extent.
1670 */
1671 state = rb_entry(prev, struct extent_state, rb_node);
1672 *start_ret = state->end + 1;
1673 *end_ret = -1;
1674 goto out;
1675 } else if (!node) {
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001676 node = next;
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001677 }
Nikolay Borisov1eaebb32019-06-03 13:06:02 +03001678 /*
1679 * At this point 'node' either contains 'start' or start is
1680 * before 'node'
1681 */
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001682 state = rb_entry(node, struct extent_state, rb_node);
Nikolay Borisov1eaebb32019-06-03 13:06:02 +03001683
1684 if (in_range(start, state->start, state->end - state->start + 1)) {
1685 if (state->state & bits) {
1686 /*
1687 * |--range with bits sets--|
1688 * |
1689 * start
1690 */
1691 start = state->end + 1;
1692 } else {
1693 /*
1694 * 'start' falls within a range that doesn't
1695 * have the bits set, so take its start as
1696 * the beginning of the desired range
1697 *
1698 * |--range with bits cleared----|
1699 * |
1700 * start
1701 */
1702 *start_ret = state->start;
1703 break;
1704 }
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001705 } else {
Nikolay Borisov1eaebb32019-06-03 13:06:02 +03001706 /*
1707 * |---prev range---|---hole/unset---|---node range---|
1708 * |
1709 * start
1710 *
1711 * or
1712 *
1713 * |---hole/unset--||--first node--|
1714 * 0 |
1715 * start
1716 */
1717 if (prev) {
1718 state = rb_entry(prev, struct extent_state,
1719 rb_node);
1720 *start_ret = state->end + 1;
1721 } else {
1722 *start_ret = 0;
1723 }
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001724 break;
1725 }
1726 }
1727
1728 /*
1729 * Find the longest stretch from start until an entry which has the
1730 * bits set
1731 */
1732 while (1) {
1733 state = rb_entry(node, struct extent_state, rb_node);
1734 if (state->end >= start && !(state->state & bits)) {
1735 *end_ret = state->end;
1736 } else {
1737 *end_ret = state->start - 1;
1738 break;
1739 }
1740
1741 node = rb_next(node);
1742 if (!node)
1743 break;
1744 }
1745out:
1746 spin_unlock(&tree->lock);
1747}
1748
Xiao Guangrong69261c42011-07-14 03:19:45 +00001749/*
Chris Masond352ac62008-09-29 15:18:18 -04001750 * find a contiguous range of bytes in the file marked as delalloc, not
1751 * more than 'max_bytes'. start and end are used to return the range,
1752 *
Lu Fengqi3522e902018-11-29 11:33:38 +08001753 * true is returned if we find something, false if nothing was in the tree
Chris Masond352ac62008-09-29 15:18:18 -04001754 */
Josef Bacik083e75e2019-09-23 10:05:20 -04001755bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
1756 u64 *end, u64 max_bytes,
1757 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001758{
1759 struct rb_node *node;
1760 struct extent_state *state;
1761 u64 cur_start = *start;
Lu Fengqi3522e902018-11-29 11:33:38 +08001762 bool found = false;
Chris Masond1310b22008-01-24 16:13:08 -05001763 u64 total_bytes = 0;
1764
Chris Masoncad321a2008-12-17 14:51:42 -05001765 spin_lock(&tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -04001766
Chris Masond1310b22008-01-24 16:13:08 -05001767 /*
1768 * this search will find all the extents that end after
1769 * our range starts.
1770 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001771 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001772 if (!node) {
Lu Fengqi3522e902018-11-29 11:33:38 +08001773 *end = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05001774 goto out;
1775 }
1776
Chris Masond3977122009-01-05 21:25:51 -05001777 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001778 state = rb_entry(node, struct extent_state, rb_node);
Zheng Yan5b21f2e2008-09-26 10:05:38 -04001779 if (found && (state->start != cur_start ||
1780 (state->state & EXTENT_BOUNDARY))) {
Chris Masond1310b22008-01-24 16:13:08 -05001781 goto out;
1782 }
1783 if (!(state->state & EXTENT_DELALLOC)) {
1784 if (!found)
1785 *end = state->end;
1786 goto out;
1787 }
Josef Bacikc2a128d2010-02-02 21:19:11 +00001788 if (!found) {
Chris Masond1310b22008-01-24 16:13:08 -05001789 *start = state->start;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001790 *cached_state = state;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +02001791 refcount_inc(&state->refs);
Josef Bacikc2a128d2010-02-02 21:19:11 +00001792 }
Lu Fengqi3522e902018-11-29 11:33:38 +08001793 found = true;
Chris Masond1310b22008-01-24 16:13:08 -05001794 *end = state->end;
1795 cur_start = state->end + 1;
1796 node = rb_next(node);
Chris Masond1310b22008-01-24 16:13:08 -05001797 total_bytes += state->end - state->start + 1;
Josef Bacik7bf811a52013-10-07 22:11:09 -04001798 if (total_bytes >= max_bytes)
Josef Bacik573aeca2013-08-30 14:38:49 -04001799 break;
Josef Bacik573aeca2013-08-30 14:38:49 -04001800 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001801 break;
1802 }
1803out:
Chris Masoncad321a2008-12-17 14:51:42 -05001804 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001805 return found;
1806}
1807
Liu Boda2c7002017-02-10 16:41:05 +01001808static int __process_pages_contig(struct address_space *mapping,
1809 struct page *locked_page,
1810 pgoff_t start_index, pgoff_t end_index,
1811 unsigned long page_ops, pgoff_t *index_ret);
1812
Jeff Mahoney143bede2012-03-01 14:56:26 +01001813static noinline void __unlock_for_delalloc(struct inode *inode,
1814 struct page *locked_page,
1815 u64 start, u64 end)
Chris Masonc8b97812008-10-29 14:49:59 -04001816{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001817 unsigned long index = start >> PAGE_SHIFT;
1818 unsigned long end_index = end >> PAGE_SHIFT;
Chris Masonc8b97812008-10-29 14:49:59 -04001819
Liu Bo76c00212017-02-10 16:42:14 +01001820 ASSERT(locked_page);
Chris Masonc8b97812008-10-29 14:49:59 -04001821 if (index == locked_page->index && end_index == index)
Jeff Mahoney143bede2012-03-01 14:56:26 +01001822 return;
Chris Masonc8b97812008-10-29 14:49:59 -04001823
Liu Bo76c00212017-02-10 16:42:14 +01001824 __process_pages_contig(inode->i_mapping, locked_page, index, end_index,
1825 PAGE_UNLOCK, NULL);
Chris Masonc8b97812008-10-29 14:49:59 -04001826}
1827
1828static noinline int lock_delalloc_pages(struct inode *inode,
1829 struct page *locked_page,
1830 u64 delalloc_start,
1831 u64 delalloc_end)
1832{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001833 unsigned long index = delalloc_start >> PAGE_SHIFT;
Liu Bo76c00212017-02-10 16:42:14 +01001834 unsigned long index_ret = index;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001835 unsigned long end_index = delalloc_end >> PAGE_SHIFT;
Chris Masonc8b97812008-10-29 14:49:59 -04001836 int ret;
Chris Masonc8b97812008-10-29 14:49:59 -04001837
Liu Bo76c00212017-02-10 16:42:14 +01001838 ASSERT(locked_page);
Chris Masonc8b97812008-10-29 14:49:59 -04001839 if (index == locked_page->index && index == end_index)
1840 return 0;
1841
Liu Bo76c00212017-02-10 16:42:14 +01001842 ret = __process_pages_contig(inode->i_mapping, locked_page, index,
1843 end_index, PAGE_LOCK, &index_ret);
1844 if (ret == -EAGAIN)
1845 __unlock_for_delalloc(inode, locked_page, delalloc_start,
1846 (u64)index_ret << PAGE_SHIFT);
Chris Masonc8b97812008-10-29 14:49:59 -04001847 return ret;
1848}
1849
1850/*
Lu Fengqi3522e902018-11-29 11:33:38 +08001851 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
1852 * more than @max_bytes. @Start and @end are used to return the range,
Chris Masonc8b97812008-10-29 14:49:59 -04001853 *
Lu Fengqi3522e902018-11-29 11:33:38 +08001854 * Return: true if we find something
1855 * false if nothing was in the tree
Chris Masonc8b97812008-10-29 14:49:59 -04001856 */
Johannes Thumshirnce9f9672018-11-19 10:38:17 +01001857EXPORT_FOR_TESTS
Lu Fengqi3522e902018-11-29 11:33:38 +08001858noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
Josef Bacik294e30f2013-10-09 12:00:56 -04001859 struct page *locked_page, u64 *start,
Nikolay Borisov917aace2018-10-26 14:43:20 +03001860 u64 *end)
Chris Masonc8b97812008-10-29 14:49:59 -04001861{
Goldwyn Rodrigues99780592019-06-21 10:02:54 -05001862 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
Nikolay Borisov917aace2018-10-26 14:43:20 +03001863 u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -04001864 u64 delalloc_start;
1865 u64 delalloc_end;
Lu Fengqi3522e902018-11-29 11:33:38 +08001866 bool found;
Chris Mason9655d292009-09-02 15:22:30 -04001867 struct extent_state *cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001868 int ret;
1869 int loops = 0;
1870
1871again:
1872 /* step one, find a bunch of delalloc bytes starting at start */
1873 delalloc_start = *start;
1874 delalloc_end = 0;
Josef Bacik083e75e2019-09-23 10:05:20 -04001875 found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1876 max_bytes, &cached_state);
Chris Mason70b99e62008-10-31 12:46:39 -04001877 if (!found || delalloc_end <= *start) {
Chris Masonc8b97812008-10-29 14:49:59 -04001878 *start = delalloc_start;
1879 *end = delalloc_end;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001880 free_extent_state(cached_state);
Lu Fengqi3522e902018-11-29 11:33:38 +08001881 return false;
Chris Masonc8b97812008-10-29 14:49:59 -04001882 }
1883
1884 /*
Chris Mason70b99e62008-10-31 12:46:39 -04001885 * start comes from the offset of locked_page. We have to lock
1886 * pages in order, so we can't process delalloc bytes before
1887 * locked_page
1888 */
Chris Masond3977122009-01-05 21:25:51 -05001889 if (delalloc_start < *start)
Chris Mason70b99e62008-10-31 12:46:39 -04001890 delalloc_start = *start;
Chris Mason70b99e62008-10-31 12:46:39 -04001891
1892 /*
Chris Masonc8b97812008-10-29 14:49:59 -04001893 * make sure to limit the number of pages we try to lock down
Chris Masonc8b97812008-10-29 14:49:59 -04001894 */
Josef Bacik7bf811a52013-10-07 22:11:09 -04001895 if (delalloc_end + 1 - delalloc_start > max_bytes)
1896 delalloc_end = delalloc_start + max_bytes - 1;
Chris Masond3977122009-01-05 21:25:51 -05001897
Chris Masonc8b97812008-10-29 14:49:59 -04001898 /* step two, lock all the pages after the page that has start */
1899 ret = lock_delalloc_pages(inode, locked_page,
1900 delalloc_start, delalloc_end);
Nikolay Borisov9bfd61d2018-10-26 14:43:21 +03001901 ASSERT(!ret || ret == -EAGAIN);
Chris Masonc8b97812008-10-29 14:49:59 -04001902 if (ret == -EAGAIN) {
1903 /* some of the pages are gone, lets avoid looping by
1904 * shortening the size of the delalloc range we're searching
1905 */
Chris Mason9655d292009-09-02 15:22:30 -04001906 free_extent_state(cached_state);
Chris Mason7d788742014-05-21 05:49:54 -07001907 cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001908 if (!loops) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001909 max_bytes = PAGE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -04001910 loops = 1;
1911 goto again;
1912 } else {
Lu Fengqi3522e902018-11-29 11:33:38 +08001913 found = false;
Chris Masonc8b97812008-10-29 14:49:59 -04001914 goto out_failed;
1915 }
1916 }
Chris Masonc8b97812008-10-29 14:49:59 -04001917
1918 /* step three, lock the state bits for the whole range */
David Sterbaff13db42015-12-03 14:30:40 +01001919 lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001920
1921 /* then test to make sure it is all still delalloc */
1922 ret = test_range_bit(tree, delalloc_start, delalloc_end,
Chris Mason9655d292009-09-02 15:22:30 -04001923 EXTENT_DELALLOC, 1, cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001924 if (!ret) {
Chris Mason9655d292009-09-02 15:22:30 -04001925 unlock_extent_cached(tree, delalloc_start, delalloc_end,
David Sterbae43bbe52017-12-12 21:43:52 +01001926 &cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001927 __unlock_for_delalloc(inode, locked_page,
1928 delalloc_start, delalloc_end);
1929 cond_resched();
1930 goto again;
1931 }
Chris Mason9655d292009-09-02 15:22:30 -04001932 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001933 *start = delalloc_start;
1934 *end = delalloc_end;
1935out_failed:
1936 return found;
1937}
1938
Liu Boda2c7002017-02-10 16:41:05 +01001939static int __process_pages_contig(struct address_space *mapping,
1940 struct page *locked_page,
1941 pgoff_t start_index, pgoff_t end_index,
1942 unsigned long page_ops, pgoff_t *index_ret)
Chris Masonc8b97812008-10-29 14:49:59 -04001943{
Liu Bo873695b2017-02-02 17:49:22 -08001944 unsigned long nr_pages = end_index - start_index + 1;
Qu Wenruo12e33602020-10-21 14:24:57 +08001945 unsigned long pages_processed = 0;
Liu Bo873695b2017-02-02 17:49:22 -08001946 pgoff_t index = start_index;
Chris Masonc8b97812008-10-29 14:49:59 -04001947 struct page *pages[16];
Liu Bo873695b2017-02-02 17:49:22 -08001948 unsigned ret;
Liu Boda2c7002017-02-10 16:41:05 +01001949 int err = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001950 int i;
Chris Mason771ed682008-11-06 22:02:51 -05001951
Liu Boda2c7002017-02-10 16:41:05 +01001952 if (page_ops & PAGE_LOCK) {
1953 ASSERT(page_ops == PAGE_LOCK);
1954 ASSERT(index_ret && *index_ret == start_index);
1955 }
1956
Filipe Manana704de492014-10-06 22:14:22 +01001957 if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
Liu Bo873695b2017-02-02 17:49:22 -08001958 mapping_set_error(mapping, -EIO);
Filipe Manana704de492014-10-06 22:14:22 +01001959
Chris Masond3977122009-01-05 21:25:51 -05001960 while (nr_pages > 0) {
Liu Bo873695b2017-02-02 17:49:22 -08001961 ret = find_get_pages_contig(mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001962 min_t(unsigned long,
1963 nr_pages, ARRAY_SIZE(pages)), pages);
Liu Boda2c7002017-02-10 16:41:05 +01001964 if (ret == 0) {
1965 /*
1966 * Only if we're going to lock these pages,
1967 * can we find nothing at @index.
1968 */
1969 ASSERT(page_ops & PAGE_LOCK);
Liu Bo49d4a332017-03-06 18:20:56 -08001970 err = -EAGAIN;
1971 goto out;
Liu Boda2c7002017-02-10 16:41:05 +01001972 }
Chris Mason8b62b722009-09-02 16:53:46 -04001973
Liu Boda2c7002017-02-10 16:41:05 +01001974 for (i = 0; i < ret; i++) {
Josef Bacikc2790a22013-07-29 11:20:47 -04001975 if (page_ops & PAGE_SET_PRIVATE2)
Chris Mason8b62b722009-09-02 16:53:46 -04001976 SetPagePrivate2(pages[i]);
1977
Chris Mason1d53c9e2019-07-10 12:28:16 -07001978 if (locked_page && pages[i] == locked_page) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001979 put_page(pages[i]);
Qu Wenruo12e33602020-10-21 14:24:57 +08001980 pages_processed++;
Chris Masonc8b97812008-10-29 14:49:59 -04001981 continue;
1982 }
Qu Wenruo6869b0a2021-01-26 16:33:45 +08001983 if (page_ops & PAGE_START_WRITEBACK) {
Chris Masonc8b97812008-10-29 14:49:59 -04001984 clear_page_dirty_for_io(pages[i]);
Chris Masonc8b97812008-10-29 14:49:59 -04001985 set_page_writeback(pages[i]);
Qu Wenruo6869b0a2021-01-26 16:33:45 +08001986 }
Filipe Manana704de492014-10-06 22:14:22 +01001987 if (page_ops & PAGE_SET_ERROR)
1988 SetPageError(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001989 if (page_ops & PAGE_END_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001990 end_page_writeback(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001991 if (page_ops & PAGE_UNLOCK)
Chris Mason771ed682008-11-06 22:02:51 -05001992 unlock_page(pages[i]);
Liu Boda2c7002017-02-10 16:41:05 +01001993 if (page_ops & PAGE_LOCK) {
1994 lock_page(pages[i]);
1995 if (!PageDirty(pages[i]) ||
1996 pages[i]->mapping != mapping) {
1997 unlock_page(pages[i]);
Robbie Ko5909ca12020-07-20 09:42:09 +08001998 for (; i < ret; i++)
1999 put_page(pages[i]);
Liu Boda2c7002017-02-10 16:41:05 +01002000 err = -EAGAIN;
2001 goto out;
2002 }
2003 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002004 put_page(pages[i]);
Qu Wenruo12e33602020-10-21 14:24:57 +08002005 pages_processed++;
Chris Masonc8b97812008-10-29 14:49:59 -04002006 }
2007 nr_pages -= ret;
2008 index += ret;
2009 cond_resched();
2010 }
Liu Boda2c7002017-02-10 16:41:05 +01002011out:
2012 if (err && index_ret)
Qu Wenruo12e33602020-10-21 14:24:57 +08002013 *index_ret = start_index + pages_processed - 1;
Liu Boda2c7002017-02-10 16:41:05 +01002014 return err;
Chris Masonc8b97812008-10-29 14:49:59 -04002015}
Chris Masonc8b97812008-10-29 14:49:59 -04002016
Nikolay Borisovad7ff172020-06-03 08:55:06 +03002017void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
Nikolay Borisov74e91942019-07-17 16:18:16 +03002018 struct page *locked_page,
Qu Wenruof97e27e2020-11-13 20:51:40 +08002019 u32 clear_bits, unsigned long page_ops)
Liu Bo873695b2017-02-02 17:49:22 -08002020{
Nikolay Borisovad7ff172020-06-03 08:55:06 +03002021 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
Liu Bo873695b2017-02-02 17:49:22 -08002022
Nikolay Borisovad7ff172020-06-03 08:55:06 +03002023 __process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
Liu Bo873695b2017-02-02 17:49:22 -08002024 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
Liu Boda2c7002017-02-10 16:41:05 +01002025 page_ops, NULL);
Liu Bo873695b2017-02-02 17:49:22 -08002026}
2027
Chris Masond352ac62008-09-29 15:18:18 -04002028/*
2029 * count the number of bytes in the tree that have a given bit(s)
2030 * set. This can be fairly slow, except for EXTENT_DIRTY which is
2031 * cached. The total number found is returned.
2032 */
Chris Masond1310b22008-01-24 16:13:08 -05002033u64 count_range_bits(struct extent_io_tree *tree,
2034 u64 *start, u64 search_end, u64 max_bytes,
Qu Wenruof97e27e2020-11-13 20:51:40 +08002035 u32 bits, int contig)
Chris Masond1310b22008-01-24 16:13:08 -05002036{
2037 struct rb_node *node;
2038 struct extent_state *state;
2039 u64 cur_start = *start;
2040 u64 total_bytes = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05002041 u64 last = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002042 int found = 0;
2043
Dulshani Gunawardhanafae7f212013-10-31 10:30:08 +05302044 if (WARN_ON(search_end <= cur_start))
Chris Masond1310b22008-01-24 16:13:08 -05002045 return 0;
Chris Masond1310b22008-01-24 16:13:08 -05002046
Chris Masoncad321a2008-12-17 14:51:42 -05002047 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002048 if (cur_start == 0 && bits == EXTENT_DIRTY) {
2049 total_bytes = tree->dirty_bytes;
2050 goto out;
2051 }
2052 /*
2053 * this search will find all the extents that end after
2054 * our range starts.
2055 */
Chris Mason80ea96b2008-02-01 14:51:59 -05002056 node = tree_search(tree, cur_start);
Chris Masond3977122009-01-05 21:25:51 -05002057 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05002058 goto out;
Chris Masond1310b22008-01-24 16:13:08 -05002059
Chris Masond3977122009-01-05 21:25:51 -05002060 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05002061 state = rb_entry(node, struct extent_state, rb_node);
2062 if (state->start > search_end)
2063 break;
Chris Masonec29ed52011-02-23 16:23:20 -05002064 if (contig && found && state->start > last + 1)
2065 break;
2066 if (state->end >= cur_start && (state->state & bits) == bits) {
Chris Masond1310b22008-01-24 16:13:08 -05002067 total_bytes += min(search_end, state->end) + 1 -
2068 max(cur_start, state->start);
2069 if (total_bytes >= max_bytes)
2070 break;
2071 if (!found) {
Josef Bacikaf60bed2011-05-04 11:11:17 -04002072 *start = max(cur_start, state->start);
Chris Masond1310b22008-01-24 16:13:08 -05002073 found = 1;
2074 }
Chris Masonec29ed52011-02-23 16:23:20 -05002075 last = state->end;
2076 } else if (contig && found) {
2077 break;
Chris Masond1310b22008-01-24 16:13:08 -05002078 }
2079 node = rb_next(node);
2080 if (!node)
2081 break;
2082 }
2083out:
Chris Masoncad321a2008-12-17 14:51:42 -05002084 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002085 return total_bytes;
2086}
Christoph Hellwigb2950862008-12-02 09:54:17 -05002087
Chris Masond352ac62008-09-29 15:18:18 -04002088/*
2089 * set the private field for a given byte offset in the tree. If there isn't
2090 * an extent_state there already, this does nothing.
2091 */
Josef Bacikb3f167a2019-09-23 10:05:21 -04002092int set_state_failrec(struct extent_io_tree *tree, u64 start,
2093 struct io_failure_record *failrec)
Chris Masond1310b22008-01-24 16:13:08 -05002094{
2095 struct rb_node *node;
2096 struct extent_state *state;
2097 int ret = 0;
2098
Chris Masoncad321a2008-12-17 14:51:42 -05002099 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002100 /*
2101 * this search will find all the extents that end after
2102 * our range starts.
2103 */
Chris Mason80ea96b2008-02-01 14:51:59 -05002104 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04002105 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05002106 ret = -ENOENT;
2107 goto out;
2108 }
2109 state = rb_entry(node, struct extent_state, rb_node);
2110 if (state->start != start) {
2111 ret = -ENOENT;
2112 goto out;
2113 }
David Sterba47dc1962016-02-11 13:24:13 +01002114 state->failrec = failrec;
Chris Masond1310b22008-01-24 16:13:08 -05002115out:
Chris Masoncad321a2008-12-17 14:51:42 -05002116 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002117 return ret;
2118}
2119
Nikolay Borisov2279a272020-07-02 15:23:28 +03002120struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start)
Chris Masond1310b22008-01-24 16:13:08 -05002121{
2122 struct rb_node *node;
2123 struct extent_state *state;
Nikolay Borisov2279a272020-07-02 15:23:28 +03002124 struct io_failure_record *failrec;
Chris Masond1310b22008-01-24 16:13:08 -05002125
Chris Masoncad321a2008-12-17 14:51:42 -05002126 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002127 /*
2128 * this search will find all the extents that end after
2129 * our range starts.
2130 */
Chris Mason80ea96b2008-02-01 14:51:59 -05002131 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04002132 if (!node) {
Nikolay Borisov2279a272020-07-02 15:23:28 +03002133 failrec = ERR_PTR(-ENOENT);
Chris Masond1310b22008-01-24 16:13:08 -05002134 goto out;
2135 }
2136 state = rb_entry(node, struct extent_state, rb_node);
2137 if (state->start != start) {
Nikolay Borisov2279a272020-07-02 15:23:28 +03002138 failrec = ERR_PTR(-ENOENT);
Chris Masond1310b22008-01-24 16:13:08 -05002139 goto out;
2140 }
Nikolay Borisov2279a272020-07-02 15:23:28 +03002141
2142 failrec = state->failrec;
Chris Masond1310b22008-01-24 16:13:08 -05002143out:
Chris Masoncad321a2008-12-17 14:51:42 -05002144 spin_unlock(&tree->lock);
Nikolay Borisov2279a272020-07-02 15:23:28 +03002145 return failrec;
Chris Masond1310b22008-01-24 16:13:08 -05002146}
2147
2148/*
2149 * searches a range in the state tree for a given mask.
Chris Mason70dec802008-01-29 09:59:12 -05002150 * If 'filled' == 1, this returns 1 only if every extent in the tree
Chris Masond1310b22008-01-24 16:13:08 -05002151 * has the bits set. Otherwise, 1 is returned if any bit in the
2152 * range is found set.
2153 */
2154int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +08002155 u32 bits, int filled, struct extent_state *cached)
Chris Masond1310b22008-01-24 16:13:08 -05002156{
2157 struct extent_state *state = NULL;
2158 struct rb_node *node;
2159 int bitset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002160
Chris Masoncad321a2008-12-17 14:51:42 -05002161 spin_lock(&tree->lock);
Filipe Manana27a35072014-07-06 20:09:59 +01002162 if (cached && extent_state_in_tree(cached) && cached->start <= start &&
Josef Bacikdf98b6e2011-06-20 14:53:48 -04002163 cached->end > start)
Chris Mason9655d292009-09-02 15:22:30 -04002164 node = &cached->rb_node;
2165 else
2166 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -05002167 while (node && start <= end) {
2168 state = rb_entry(node, struct extent_state, rb_node);
2169
2170 if (filled && state->start > start) {
2171 bitset = 0;
2172 break;
2173 }
2174
2175 if (state->start > end)
2176 break;
2177
2178 if (state->state & bits) {
2179 bitset = 1;
2180 if (!filled)
2181 break;
2182 } else if (filled) {
2183 bitset = 0;
2184 break;
2185 }
Chris Mason46562ce2009-09-23 20:23:16 -04002186
2187 if (state->end == (u64)-1)
2188 break;
2189
Chris Masond1310b22008-01-24 16:13:08 -05002190 start = state->end + 1;
2191 if (start > end)
2192 break;
2193 node = rb_next(node);
2194 if (!node) {
2195 if (filled)
2196 bitset = 0;
2197 break;
2198 }
2199 }
Chris Masoncad321a2008-12-17 14:51:42 -05002200 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002201 return bitset;
2202}
Chris Masond1310b22008-01-24 16:13:08 -05002203
2204/*
2205 * helper function to set a given page up to date if all the
2206 * extents in the tree for that page are up to date
2207 */
Jeff Mahoney143bede2012-03-01 14:56:26 +01002208static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
Chris Masond1310b22008-01-24 16:13:08 -05002209{
Miao Xie4eee4fa2012-12-21 09:17:45 +00002210 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002211 u64 end = start + PAGE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04002212 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05002213 SetPageUptodate(page);
Chris Masond1310b22008-01-24 16:13:08 -05002214}
2215
Josef Bacik7870d082017-05-05 11:57:15 -04002216int free_io_failure(struct extent_io_tree *failure_tree,
2217 struct extent_io_tree *io_tree,
2218 struct io_failure_record *rec)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002219{
2220 int ret;
2221 int err = 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002222
David Sterba47dc1962016-02-11 13:24:13 +01002223 set_state_failrec(failure_tree, rec->start, NULL);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002224 ret = clear_extent_bits(failure_tree, rec->start,
2225 rec->start + rec->len - 1,
David Sterba91166212016-04-26 23:54:39 +02002226 EXTENT_LOCKED | EXTENT_DIRTY);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002227 if (ret)
2228 err = ret;
2229
Josef Bacik7870d082017-05-05 11:57:15 -04002230 ret = clear_extent_bits(io_tree, rec->start,
David Woodhouse53b381b2013-01-29 18:40:14 -05002231 rec->start + rec->len - 1,
David Sterba91166212016-04-26 23:54:39 +02002232 EXTENT_DAMAGED);
David Woodhouse53b381b2013-01-29 18:40:14 -05002233 if (ret && !err)
2234 err = ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002235
2236 kfree(rec);
2237 return err;
2238}
2239
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002240/*
2241 * this bypasses the standard btrfs submit functions deliberately, as
2242 * the standard behavior is to write all copies in a raid setup. here we only
2243 * want to write the one bad copy. so we do the mapping for ourselves and issue
2244 * submit_bio directly.
Stefan Behrens3ec706c2012-11-05 15:46:42 +01002245 * to avoid any synchronization issues, wait for the data after writing, which
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002246 * actually prevents the read that triggered the error from finishing.
2247 * currently, there can be no more than two copies of every data bit. thus,
2248 * exactly one rewrite is required.
2249 */
Josef Bacik6ec656b2017-05-05 11:57:14 -04002250int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2251 u64 length, u64 logical, struct page *page,
2252 unsigned int pg_offset, int mirror_num)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002253{
2254 struct bio *bio;
2255 struct btrfs_device *dev;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002256 u64 map_length = 0;
2257 u64 sector;
2258 struct btrfs_bio *bbio = NULL;
2259 int ret;
2260
Linus Torvalds1751e8a2017-11-27 13:05:09 -08002261 ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002262 BUG_ON(!mirror_num);
2263
Naohiro Aotaf7ef5282021-02-04 19:22:16 +09002264 if (btrfs_is_zoned(fs_info))
2265 return btrfs_repair_one_zone(fs_info, logical);
2266
David Sterbac5e4c3d2017-06-12 17:29:41 +02002267 bio = btrfs_io_bio_alloc(1);
Kent Overstreet4f024f32013-10-11 15:44:27 -07002268 bio->bi_iter.bi_size = 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002269 map_length = length;
2270
Filipe Mananab5de8d02016-05-27 22:21:27 +01002271 /*
2272 * Avoid races with device replace and make sure our bbio has devices
2273 * associated to its stripes that don't go away while we are doing the
2274 * read repair operation.
2275 */
2276 btrfs_bio_counter_inc_blocked(fs_info);
Nikolay Borisove4ff5fb2017-07-19 10:48:42 +03002277 if (btrfs_is_parity_mirror(fs_info, logical, length)) {
Liu Boc7253282017-03-29 10:53:58 -07002278 /*
2279 * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2280 * to update all raid stripes, but here we just want to correct
2281 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2282 * stripe's dev and sector.
2283 */
2284 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2285 &map_length, &bbio, 0);
2286 if (ret) {
2287 btrfs_bio_counter_dec(fs_info);
2288 bio_put(bio);
2289 return -EIO;
2290 }
2291 ASSERT(bbio->mirror_num == 1);
2292 } else {
2293 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2294 &map_length, &bbio, mirror_num);
2295 if (ret) {
2296 btrfs_bio_counter_dec(fs_info);
2297 bio_put(bio);
2298 return -EIO;
2299 }
2300 BUG_ON(mirror_num != bbio->mirror_num);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002301 }
Liu Boc7253282017-03-29 10:53:58 -07002302
2303 sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002304 bio->bi_iter.bi_sector = sector;
Liu Boc7253282017-03-29 10:53:58 -07002305 dev = bbio->stripes[bbio->mirror_num - 1].dev;
Zhao Lei6e9606d2015-01-20 15:11:34 +08002306 btrfs_put_bbio(bbio);
Anand Jainebbede42017-12-04 12:54:52 +08002307 if (!dev || !dev->bdev ||
2308 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
Filipe Mananab5de8d02016-05-27 22:21:27 +01002309 btrfs_bio_counter_dec(fs_info);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002310 bio_put(bio);
2311 return -EIO;
2312 }
Christoph Hellwig74d46992017-08-23 19:10:32 +02002313 bio_set_dev(bio, dev->bdev);
Christoph Hellwig70fd7612016-11-01 07:40:10 -06002314 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
Miao Xieffdd2012014-09-12 18:44:00 +08002315 bio_add_page(bio, page, length, pg_offset);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002316
Mike Christie4e49ea42016-06-05 14:31:41 -05002317 if (btrfsic_submit_bio_wait(bio)) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002318 /* try to remap that extent elsewhere? */
Filipe Mananab5de8d02016-05-27 22:21:27 +01002319 btrfs_bio_counter_dec(fs_info);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002320 bio_put(bio);
Stefan Behrens442a4f62012-05-25 16:06:08 +02002321 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002322 return -EIO;
2323 }
2324
David Sterbab14af3b2015-10-08 10:43:10 +02002325 btrfs_info_rl_in_rcu(fs_info,
2326 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
Josef Bacik6ec656b2017-05-05 11:57:14 -04002327 ino, start,
Miao Xie1203b682014-09-12 18:44:01 +08002328 rcu_str_deref(dev->name), sector);
Filipe Mananab5de8d02016-05-27 22:21:27 +01002329 btrfs_bio_counter_dec(fs_info);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002330 bio_put(bio);
2331 return 0;
2332}
2333
David Sterba2b489662020-04-29 03:04:10 +02002334int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num)
Josef Bacikea466792012-03-26 21:57:36 -04002335{
David Sterba20a1fbf92019-03-20 11:23:44 +01002336 struct btrfs_fs_info *fs_info = eb->fs_info;
Josef Bacikea466792012-03-26 21:57:36 -04002337 u64 start = eb->start;
David Sterbacc5e31a2018-03-01 18:20:27 +01002338 int i, num_pages = num_extent_pages(eb);
Chris Masond95603b2012-04-12 15:55:15 -04002339 int ret = 0;
Josef Bacikea466792012-03-26 21:57:36 -04002340
David Howellsbc98a422017-07-17 08:45:34 +01002341 if (sb_rdonly(fs_info->sb))
Ilya Dryomov908960c2013-11-03 19:06:39 +02002342 return -EROFS;
2343
Josef Bacikea466792012-03-26 21:57:36 -04002344 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02002345 struct page *p = eb->pages[i];
Miao Xie1203b682014-09-12 18:44:01 +08002346
Josef Bacik6ec656b2017-05-05 11:57:14 -04002347 ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
Miao Xie1203b682014-09-12 18:44:01 +08002348 start - page_offset(p), mirror_num);
Josef Bacikea466792012-03-26 21:57:36 -04002349 if (ret)
2350 break;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002351 start += PAGE_SIZE;
Josef Bacikea466792012-03-26 21:57:36 -04002352 }
2353
2354 return ret;
2355}
2356
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002357/*
2358 * each time an IO finishes, we do a fast check in the IO failure tree
2359 * to see if we need to process or clean up an io_failure_record
2360 */
Josef Bacik7870d082017-05-05 11:57:15 -04002361int clean_io_failure(struct btrfs_fs_info *fs_info,
2362 struct extent_io_tree *failure_tree,
2363 struct extent_io_tree *io_tree, u64 start,
2364 struct page *page, u64 ino, unsigned int pg_offset)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002365{
2366 u64 private;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002367 struct io_failure_record *failrec;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002368 struct extent_state *state;
2369 int num_copies;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002370 int ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002371
2372 private = 0;
Josef Bacik7870d082017-05-05 11:57:15 -04002373 ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2374 EXTENT_DIRTY, 0);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002375 if (!ret)
2376 return 0;
2377
Nikolay Borisov2279a272020-07-02 15:23:28 +03002378 failrec = get_state_failrec(failure_tree, start);
2379 if (IS_ERR(failrec))
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002380 return 0;
2381
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002382 BUG_ON(!failrec->this_mirror);
2383
2384 if (failrec->in_validation) {
2385 /* there was no real error, just free the record */
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002386 btrfs_debug(fs_info,
2387 "clean_io_failure: freeing dummy error at %llu",
2388 failrec->start);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002389 goto out;
2390 }
David Howellsbc98a422017-07-17 08:45:34 +01002391 if (sb_rdonly(fs_info->sb))
Ilya Dryomov908960c2013-11-03 19:06:39 +02002392 goto out;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002393
Josef Bacik7870d082017-05-05 11:57:15 -04002394 spin_lock(&io_tree->lock);
2395 state = find_first_extent_bit_state(io_tree,
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002396 failrec->start,
2397 EXTENT_LOCKED);
Josef Bacik7870d082017-05-05 11:57:15 -04002398 spin_unlock(&io_tree->lock);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002399
Miao Xie883d0de2013-07-25 19:22:35 +08002400 if (state && state->start <= failrec->start &&
2401 state->end >= failrec->start + failrec->len - 1) {
Stefan Behrens3ec706c2012-11-05 15:46:42 +01002402 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2403 failrec->len);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002404 if (num_copies > 1) {
Josef Bacik7870d082017-05-05 11:57:15 -04002405 repair_io_failure(fs_info, ino, start, failrec->len,
2406 failrec->logical, page, pg_offset,
2407 failrec->failed_mirror);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002408 }
2409 }
2410
2411out:
Josef Bacik7870d082017-05-05 11:57:15 -04002412 free_io_failure(failure_tree, io_tree, failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002413
Miao Xie454ff3d2014-09-12 18:43:58 +08002414 return 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002415}
2416
Miao Xief6124962014-09-12 18:44:04 +08002417/*
2418 * Can be called when
2419 * - hold extent lock
2420 * - under ordered extent
2421 * - the inode is freeing
2422 */
Nikolay Borisov7ab79562017-02-20 13:50:57 +02002423void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
Miao Xief6124962014-09-12 18:44:04 +08002424{
Nikolay Borisov7ab79562017-02-20 13:50:57 +02002425 struct extent_io_tree *failure_tree = &inode->io_failure_tree;
Miao Xief6124962014-09-12 18:44:04 +08002426 struct io_failure_record *failrec;
2427 struct extent_state *state, *next;
2428
2429 if (RB_EMPTY_ROOT(&failure_tree->state))
2430 return;
2431
2432 spin_lock(&failure_tree->lock);
2433 state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2434 while (state) {
2435 if (state->start > end)
2436 break;
2437
2438 ASSERT(state->end <= end);
2439
2440 next = next_state(state);
2441
David Sterba47dc1962016-02-11 13:24:13 +01002442 failrec = state->failrec;
Miao Xief6124962014-09-12 18:44:04 +08002443 free_extent_state(state);
2444 kfree(failrec);
2445
2446 state = next;
2447 }
2448 spin_unlock(&failure_tree->lock);
2449}
2450
Nikolay Borisov35263022020-07-02 15:23:29 +03002451static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode,
2452 u64 start, u64 end)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002453{
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002454 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie2fe63032014-09-12 18:43:59 +08002455 struct io_failure_record *failrec;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002456 struct extent_map *em;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002457 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2458 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2459 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002460 int ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002461 u64 logical;
2462
Nikolay Borisov2279a272020-07-02 15:23:28 +03002463 failrec = get_state_failrec(failure_tree, start);
Nikolay Borisov35263022020-07-02 15:23:29 +03002464 if (!IS_ERR(failrec)) {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002465 btrfs_debug(fs_info,
2466 "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
2467 failrec->logical, failrec->start, failrec->len,
2468 failrec->in_validation);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002469 /*
2470 * when data can be on disk more than twice, add to failrec here
2471 * (e.g. with a list for failed_mirror) to make
2472 * clean_io_failure() clean all those errors at once.
2473 */
Nikolay Borisov35263022020-07-02 15:23:29 +03002474
2475 return failrec;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002476 }
Miao Xie2fe63032014-09-12 18:43:59 +08002477
Nikolay Borisov35263022020-07-02 15:23:29 +03002478 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2479 if (!failrec)
2480 return ERR_PTR(-ENOMEM);
Miao Xie2fe63032014-09-12 18:43:59 +08002481
Nikolay Borisov35263022020-07-02 15:23:29 +03002482 failrec->start = start;
2483 failrec->len = end - start + 1;
2484 failrec->this_mirror = 0;
2485 failrec->bio_flags = 0;
2486 failrec->in_validation = 0;
2487
2488 read_lock(&em_tree->lock);
2489 em = lookup_extent_mapping(em_tree, start, failrec->len);
2490 if (!em) {
2491 read_unlock(&em_tree->lock);
2492 kfree(failrec);
2493 return ERR_PTR(-EIO);
2494 }
2495
2496 if (em->start > start || em->start + em->len <= start) {
2497 free_extent_map(em);
2498 em = NULL;
2499 }
2500 read_unlock(&em_tree->lock);
2501 if (!em) {
2502 kfree(failrec);
2503 return ERR_PTR(-EIO);
2504 }
2505
2506 logical = start - em->start;
2507 logical = em->block_start + logical;
2508 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2509 logical = em->block_start;
2510 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2511 extent_set_compress_type(&failrec->bio_flags, em->compress_type);
2512 }
2513
2514 btrfs_debug(fs_info,
2515 "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2516 logical, start, failrec->len);
2517
2518 failrec->logical = logical;
2519 free_extent_map(em);
2520
2521 /* Set the bits in the private failure tree */
2522 ret = set_extent_bits(failure_tree, start, end,
2523 EXTENT_LOCKED | EXTENT_DIRTY);
2524 if (ret >= 0) {
2525 ret = set_state_failrec(failure_tree, start, failrec);
2526 /* Set the bits in the inode's tree */
2527 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
2528 } else if (ret < 0) {
2529 kfree(failrec);
2530 return ERR_PTR(ret);
2531 }
2532
2533 return failrec;
Miao Xie2fe63032014-09-12 18:43:59 +08002534}
2535
Omar Sandovalce06d3e2020-04-16 14:46:18 -07002536static bool btrfs_check_repairable(struct inode *inode, bool needs_validation,
2537 struct io_failure_record *failrec,
2538 int failed_mirror)
Miao Xie2fe63032014-09-12 18:43:59 +08002539{
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002540 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie2fe63032014-09-12 18:43:59 +08002541 int num_copies;
2542
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002543 num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002544 if (num_copies == 1) {
2545 /*
2546 * we only have a single copy of the data, so don't bother with
2547 * all the retry and error correction code that follows. no
2548 * matter what the error is, it is very likely to persist.
2549 */
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002550 btrfs_debug(fs_info,
2551 "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2552 num_copies, failrec->this_mirror, failed_mirror);
Liu Boc3cfb652017-07-13 15:00:50 -07002553 return false;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002554 }
2555
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002556 /*
2557 * there are two premises:
2558 * a) deliver good data to the caller
2559 * b) correct the bad sectors on disk
2560 */
Omar Sandovalc7333972020-04-16 14:46:14 -07002561 if (needs_validation) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002562 /*
2563 * to fulfill b), we need to know the exact failing sectors, as
2564 * we don't want to rewrite any more than the failed ones. thus,
2565 * we need separate read requests for the failed bio
2566 *
2567 * if the following BUG_ON triggers, our validation request got
2568 * merged. we need separate requests for our algorithm to work.
2569 */
2570 BUG_ON(failrec->in_validation);
2571 failrec->in_validation = 1;
2572 failrec->this_mirror = failed_mirror;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002573 } else {
2574 /*
2575 * we're ready to fulfill a) and b) alongside. get a good copy
2576 * of the failed sector and if we succeed, we have setup
2577 * everything for repair_io_failure to do the rest for us.
2578 */
2579 if (failrec->in_validation) {
2580 BUG_ON(failrec->this_mirror != failed_mirror);
2581 failrec->in_validation = 0;
2582 failrec->this_mirror = 0;
2583 }
2584 failrec->failed_mirror = failed_mirror;
2585 failrec->this_mirror++;
2586 if (failrec->this_mirror == failed_mirror)
2587 failrec->this_mirror++;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002588 }
2589
Miao Xiefacc8a222013-07-25 19:22:34 +08002590 if (failrec->this_mirror > num_copies) {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002591 btrfs_debug(fs_info,
2592 "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2593 num_copies, failrec->this_mirror, failed_mirror);
Liu Boc3cfb652017-07-13 15:00:50 -07002594 return false;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002595 }
2596
Liu Boc3cfb652017-07-13 15:00:50 -07002597 return true;
Miao Xie2fe63032014-09-12 18:43:59 +08002598}
2599
Omar Sandovalc7333972020-04-16 14:46:14 -07002600static bool btrfs_io_needs_validation(struct inode *inode, struct bio *bio)
Miao Xie2fe63032014-09-12 18:43:59 +08002601{
Omar Sandovalc7333972020-04-16 14:46:14 -07002602 u64 len = 0;
Omar Sandoval77d5d682020-04-16 14:46:25 -07002603 const u32 blocksize = inode->i_sb->s_blocksize;
Miao Xie2fe63032014-09-12 18:43:59 +08002604
Omar Sandovalc7333972020-04-16 14:46:14 -07002605 /*
Omar Sandovalf337bd72020-04-16 14:46:15 -07002606 * If bi_status is BLK_STS_OK, then this was a checksum error, not an
2607 * I/O error. In this case, we already know exactly which sector was
2608 * bad, so we don't need to validate.
2609 */
2610 if (bio->bi_status == BLK_STS_OK)
2611 return false;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002612
Omar Sandovalf337bd72020-04-16 14:46:15 -07002613 /*
Omar Sandovalc7333972020-04-16 14:46:14 -07002614 * We need to validate each sector individually if the failed I/O was
2615 * for multiple sectors.
Omar Sandoval77d5d682020-04-16 14:46:25 -07002616 *
2617 * There are a few possible bios that can end up here:
2618 * 1. A buffered read bio, which is not cloned.
2619 * 2. A direct I/O read bio, which is cloned.
2620 * 3. A (buffered or direct) repair bio, which is not cloned.
2621 *
2622 * For cloned bios (case 2), we can get the size from
2623 * btrfs_io_bio->iter; for non-cloned bios (cases 1 and 3), we can get
2624 * it from the bvecs.
Omar Sandovalc7333972020-04-16 14:46:14 -07002625 */
Omar Sandoval77d5d682020-04-16 14:46:25 -07002626 if (bio_flagged(bio, BIO_CLONED)) {
2627 if (btrfs_io_bio(bio)->iter.bi_size > blocksize)
Omar Sandovalc7333972020-04-16 14:46:14 -07002628 return true;
Omar Sandoval77d5d682020-04-16 14:46:25 -07002629 } else {
2630 struct bio_vec *bvec;
2631 int i;
Miao Xiefacc8a222013-07-25 19:22:34 +08002632
Omar Sandoval77d5d682020-04-16 14:46:25 -07002633 bio_for_each_bvec_all(bvec, bio, i) {
2634 len += bvec->bv_len;
2635 if (len > blocksize)
2636 return true;
2637 }
Miao Xiefacc8a222013-07-25 19:22:34 +08002638 }
Omar Sandovalc7333972020-04-16 14:46:14 -07002639 return false;
Miao Xie2fe63032014-09-12 18:43:59 +08002640}
2641
Omar Sandoval77d5d682020-04-16 14:46:25 -07002642blk_status_t btrfs_submit_read_repair(struct inode *inode,
Qu Wenruo7ffd27e2020-12-02 14:47:58 +08002643 struct bio *failed_bio, u32 bio_offset,
Omar Sandoval77d5d682020-04-16 14:46:25 -07002644 struct page *page, unsigned int pgoff,
2645 u64 start, u64 end, int failed_mirror,
2646 submit_bio_hook_t *submit_bio_hook)
Miao Xie2fe63032014-09-12 18:43:59 +08002647{
2648 struct io_failure_record *failrec;
Omar Sandoval77d5d682020-04-16 14:46:25 -07002649 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie2fe63032014-09-12 18:43:59 +08002650 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
Josef Bacik7870d082017-05-05 11:57:15 -04002651 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
Omar Sandoval77d5d682020-04-16 14:46:25 -07002652 struct btrfs_io_bio *failed_io_bio = btrfs_io_bio(failed_bio);
Qu Wenruo7ffd27e2020-12-02 14:47:58 +08002653 const int icsum = bio_offset >> fs_info->sectorsize_bits;
Omar Sandovalc7333972020-04-16 14:46:14 -07002654 bool need_validation;
Omar Sandoval77d5d682020-04-16 14:46:25 -07002655 struct bio *repair_bio;
2656 struct btrfs_io_bio *repair_io_bio;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002657 blk_status_t status;
Miao Xie2fe63032014-09-12 18:43:59 +08002658
Omar Sandoval77d5d682020-04-16 14:46:25 -07002659 btrfs_debug(fs_info,
2660 "repair read error: read error at %llu", start);
Miao Xie2fe63032014-09-12 18:43:59 +08002661
Mike Christie1f7ad752016-06-05 14:31:51 -05002662 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
Miao Xie2fe63032014-09-12 18:43:59 +08002663
Nikolay Borisov35263022020-07-02 15:23:29 +03002664 failrec = btrfs_get_io_failure_record(inode, start, end);
2665 if (IS_ERR(failrec))
2666 return errno_to_blk_status(PTR_ERR(failrec));
Miao Xie2fe63032014-09-12 18:43:59 +08002667
Omar Sandovalc7333972020-04-16 14:46:14 -07002668 need_validation = btrfs_io_needs_validation(inode, failed_bio);
2669
2670 if (!btrfs_check_repairable(inode, need_validation, failrec,
Liu Boc3cfb652017-07-13 15:00:50 -07002671 failed_mirror)) {
Josef Bacik7870d082017-05-05 11:57:15 -04002672 free_io_failure(failure_tree, tree, failrec);
Omar Sandoval77d5d682020-04-16 14:46:25 -07002673 return BLK_STS_IOERR;
Miao Xie2fe63032014-09-12 18:43:59 +08002674 }
2675
Omar Sandoval77d5d682020-04-16 14:46:25 -07002676 repair_bio = btrfs_io_bio_alloc(1);
2677 repair_io_bio = btrfs_io_bio(repair_bio);
2678 repair_bio->bi_opf = REQ_OP_READ;
Omar Sandovalc7333972020-04-16 14:46:14 -07002679 if (need_validation)
Omar Sandoval77d5d682020-04-16 14:46:25 -07002680 repair_bio->bi_opf |= REQ_FAILFAST_DEV;
2681 repair_bio->bi_end_io = failed_bio->bi_end_io;
2682 repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
2683 repair_bio->bi_private = failed_bio->bi_private;
Miao Xie2fe63032014-09-12 18:43:59 +08002684
Omar Sandoval77d5d682020-04-16 14:46:25 -07002685 if (failed_io_bio->csum) {
David Sterba223486c2020-07-02 11:27:30 +02002686 const u32 csum_size = fs_info->csum_size;
Omar Sandoval77d5d682020-04-16 14:46:25 -07002687
2688 repair_io_bio->csum = repair_io_bio->csum_inline;
2689 memcpy(repair_io_bio->csum,
2690 failed_io_bio->csum + csum_size * icsum, csum_size);
2691 }
2692
2693 bio_add_page(repair_bio, page, failrec->len, pgoff);
2694 repair_io_bio->logical = failrec->start;
2695 repair_io_bio->iter = repair_bio->bi_iter;
Miao Xie2fe63032014-09-12 18:43:59 +08002696
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002697 btrfs_debug(btrfs_sb(inode->i_sb),
Omar Sandoval77d5d682020-04-16 14:46:25 -07002698"repair read error: submitting new read to mirror %d, in_validation=%d",
2699 failrec->this_mirror, failrec->in_validation);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002700
Omar Sandoval77d5d682020-04-16 14:46:25 -07002701 status = submit_bio_hook(inode, repair_bio, failrec->this_mirror,
2702 failrec->bio_flags);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002703 if (status) {
Josef Bacik7870d082017-05-05 11:57:15 -04002704 free_io_failure(failure_tree, tree, failrec);
Omar Sandoval77d5d682020-04-16 14:46:25 -07002705 bio_put(repair_bio);
Miao Xie6c387ab2014-09-12 18:43:57 +08002706 }
Omar Sandoval77d5d682020-04-16 14:46:25 -07002707 return status;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002708}
2709
Chris Masond1310b22008-01-24 16:13:08 -05002710/* lots and lots of room for performance fixes in the end_bio funcs */
2711
David Sterbab5227c02015-12-03 13:08:59 +01002712void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
Jeff Mahoney87826df2012-02-15 16:23:57 +01002713{
2714 int uptodate = (err == 0);
Eric Sandeen3e2426b2014-06-12 00:39:58 -05002715 int ret = 0;
Jeff Mahoney87826df2012-02-15 16:23:57 +01002716
Nikolay Borisovc6297322018-11-08 10:18:08 +02002717 btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
Jeff Mahoney87826df2012-02-15 16:23:57 +01002718
Jeff Mahoney87826df2012-02-15 16:23:57 +01002719 if (!uptodate) {
Jeff Mahoney87826df2012-02-15 16:23:57 +01002720 ClearPageUptodate(page);
2721 SetPageError(page);
Colin Ian Kingbff5baf2017-05-09 18:14:01 +01002722 ret = err < 0 ? err : -EIO;
Liu Bo5dca6ee2014-05-12 12:47:36 +08002723 mapping_set_error(page->mapping, ret);
Jeff Mahoney87826df2012-02-15 16:23:57 +01002724 }
Jeff Mahoney87826df2012-02-15 16:23:57 +01002725}
2726
Chris Masond1310b22008-01-24 16:13:08 -05002727/*
2728 * after a writepage IO is done, we need to:
2729 * clear the uptodate bits on error
2730 * clear the writeback bits in the extent tree for this IO
2731 * end_page_writeback if the page has no more pending IO
2732 *
2733 * Scheduling is not allowed, so the extent state tree is expected
2734 * to have one and only one object corresponding to this IO.
2735 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002736static void end_bio_extent_writepage(struct bio *bio)
Chris Masond1310b22008-01-24 16:13:08 -05002737{
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002738 int error = blk_status_to_errno(bio->bi_status);
Kent Overstreet2c30c712013-11-07 12:20:26 -08002739 struct bio_vec *bvec;
Chris Masond1310b22008-01-24 16:13:08 -05002740 u64 start;
2741 u64 end;
Ming Lei6dc4f102019-02-15 19:13:19 +08002742 struct bvec_iter_all iter_all;
Naohiro Aotad8e3fb12021-02-04 19:22:05 +09002743 bool first_bvec = true;
Chris Masond1310b22008-01-24 16:13:08 -05002744
David Sterbac09abff2017-07-13 18:10:07 +02002745 ASSERT(!bio_flagged(bio, BIO_CLONED));
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02002746 bio_for_each_segment_all(bvec, bio, iter_all) {
Chris Masond1310b22008-01-24 16:13:08 -05002747 struct page *page = bvec->bv_page;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002748 struct inode *inode = page->mapping->host;
2749 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
David Woodhouse902b22f2008-08-20 08:51:49 -04002750
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002751 /* We always issue full-page reads, but if some block
2752 * in a page fails to read, blk_update_request() will
2753 * advance bv_offset and adjust bv_len to compensate.
2754 * Print a warning for nonzero offsets, and an error
2755 * if they don't add up to a full page. */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002756 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2757 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002758 btrfs_err(fs_info,
Frank Holtonefe120a2013-12-20 11:37:06 -05002759 "partial page write in btrfs with offset %u and length %u",
2760 bvec->bv_offset, bvec->bv_len);
2761 else
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002762 btrfs_info(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04002763 "incomplete page write in btrfs with offset %u and length %u",
Frank Holtonefe120a2013-12-20 11:37:06 -05002764 bvec->bv_offset, bvec->bv_len);
2765 }
Chris Masond1310b22008-01-24 16:13:08 -05002766
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002767 start = page_offset(page);
2768 end = start + bvec->bv_offset + bvec->bv_len - 1;
Chris Masond1310b22008-01-24 16:13:08 -05002769
Naohiro Aotad8e3fb12021-02-04 19:22:05 +09002770 if (first_bvec) {
2771 btrfs_record_physical_zoned(inode, start, bio);
2772 first_bvec = false;
2773 }
2774
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002775 end_extent_writepage(page, error, start, end);
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002776 end_page_writeback(page);
Kent Overstreet2c30c712013-11-07 12:20:26 -08002777 }
Chris Mason2b1f55b2008-09-24 11:48:04 -04002778
Chris Masond1310b22008-01-24 16:13:08 -05002779 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002780}
2781
Qu Wenruo94e8c952020-11-13 20:51:28 +08002782/*
2783 * Record previously processed extent range
2784 *
2785 * For endio_readpage_release_extent() to handle a full extent range, reducing
2786 * the extent io operations.
2787 */
2788struct processed_extent {
2789 struct btrfs_inode *inode;
2790 /* Start of the range in @inode */
2791 u64 start;
Nigel Christian2e626e52021-01-24 20:41:41 -05002792 /* End of the range in @inode */
Qu Wenruo94e8c952020-11-13 20:51:28 +08002793 u64 end;
2794 bool uptodate;
2795};
2796
2797/*
2798 * Try to release processed extent range
2799 *
2800 * May not release the extent range right now if the current range is
2801 * contiguous to processed extent.
2802 *
2803 * Will release processed extent when any of @inode, @uptodate, the range is
2804 * no longer contiguous to the processed range.
2805 *
2806 * Passing @inode == NULL will force processed extent to be released.
2807 */
2808static void endio_readpage_release_extent(struct processed_extent *processed,
2809 struct btrfs_inode *inode, u64 start, u64 end,
2810 bool uptodate)
Miao Xie883d0de2013-07-25 19:22:35 +08002811{
2812 struct extent_state *cached = NULL;
Qu Wenruo94e8c952020-11-13 20:51:28 +08002813 struct extent_io_tree *tree;
Miao Xie883d0de2013-07-25 19:22:35 +08002814
Qu Wenruo94e8c952020-11-13 20:51:28 +08002815 /* The first extent, initialize @processed */
2816 if (!processed->inode)
2817 goto update;
2818
2819 /*
2820 * Contiguous to processed extent, just uptodate the end.
2821 *
2822 * Several things to notice:
2823 *
2824 * - bio can be merged as long as on-disk bytenr is contiguous
2825 * This means we can have page belonging to other inodes, thus need to
2826 * check if the inode still matches.
2827 * - bvec can contain range beyond current page for multi-page bvec
2828 * Thus we need to do processed->end + 1 >= start check
2829 */
2830 if (processed->inode == inode && processed->uptodate == uptodate &&
2831 processed->end + 1 >= start && end >= processed->end) {
2832 processed->end = end;
2833 return;
2834 }
2835
2836 tree = &processed->inode->io_tree;
2837 /*
2838 * Now we don't have range contiguous to the processed range, release
2839 * the processed range now.
2840 */
2841 if (processed->uptodate && tree->track_uptodate)
2842 set_extent_uptodate(tree, processed->start, processed->end,
2843 &cached, GFP_ATOMIC);
2844 unlock_extent_cached_atomic(tree, processed->start, processed->end,
2845 &cached);
2846
2847update:
2848 /* Update processed to current range */
2849 processed->inode = inode;
2850 processed->start = start;
2851 processed->end = end;
2852 processed->uptodate = uptodate;
Miao Xie883d0de2013-07-25 19:22:35 +08002853}
2854
Qu Wenruo92082d42021-02-02 10:28:36 +08002855static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
2856{
2857 ASSERT(PageLocked(page));
2858 if (fs_info->sectorsize == PAGE_SIZE)
2859 return;
2860
2861 ASSERT(PagePrivate(page));
2862 btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
2863}
2864
2865static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
Qu Wenruoe09caaf2020-11-13 20:51:29 +08002866{
Qu Wenruo4325cb22021-01-26 16:33:58 +08002867 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
2868
2869 ASSERT(page_offset(page) <= start &&
2870 start + len <= page_offset(page) + PAGE_SIZE);
2871
Qu Wenruoe09caaf2020-11-13 20:51:29 +08002872 if (uptodate) {
Qu Wenruo4325cb22021-01-26 16:33:58 +08002873 btrfs_page_set_uptodate(fs_info, page, start, len);
Qu Wenruoe09caaf2020-11-13 20:51:29 +08002874 } else {
Qu Wenruo4325cb22021-01-26 16:33:58 +08002875 btrfs_page_clear_uptodate(fs_info, page, start, len);
2876 btrfs_page_set_error(fs_info, page, start, len);
Qu Wenruoe09caaf2020-11-13 20:51:29 +08002877 }
Qu Wenruo4325cb22021-01-26 16:33:58 +08002878
2879 if (fs_info->sectorsize == PAGE_SIZE)
2880 unlock_page(page);
Qu Wenruo92082d42021-02-02 10:28:36 +08002881 else if (is_data_inode(page->mapping->host))
2882 /*
2883 * For subpage data, unlock the page if we're the last reader.
2884 * For subpage metadata, page lock is not utilized for read.
2885 */
2886 btrfs_subpage_end_reader(fs_info, page, start, len);
Qu Wenruoe09caaf2020-11-13 20:51:29 +08002887}
2888
Chris Masond1310b22008-01-24 16:13:08 -05002889/*
Qu Wenruod9bb77d2021-03-15 13:39:14 +08002890 * Find extent buffer for a givne bytenr.
2891 *
2892 * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
2893 * in endio context.
2894 */
2895static struct extent_buffer *find_extent_buffer_readpage(
2896 struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
2897{
2898 struct extent_buffer *eb;
2899
2900 /*
2901 * For regular sectorsize, we can use page->private to grab extent
2902 * buffer
2903 */
2904 if (fs_info->sectorsize == PAGE_SIZE) {
2905 ASSERT(PagePrivate(page) && page->private);
2906 return (struct extent_buffer *)page->private;
2907 }
2908
2909 /* For subpage case, we need to lookup buffer radix tree */
2910 rcu_read_lock();
2911 eb = radix_tree_lookup(&fs_info->buffer_radix,
2912 bytenr >> fs_info->sectorsize_bits);
2913 rcu_read_unlock();
2914 ASSERT(eb);
2915 return eb;
2916}
2917
2918/*
Chris Masond1310b22008-01-24 16:13:08 -05002919 * after a readpage IO is done, we need to:
2920 * clear the uptodate bits on error
2921 * set the uptodate bits if things worked
2922 * set the page up to date if all extents in the tree are uptodate
2923 * clear the lock bit in the extent tree
2924 * unlock the page if there are no other extents locked for it
2925 *
2926 * Scheduling is not allowed, so the extent state tree is expected
2927 * to have one and only one object corresponding to this IO.
2928 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002929static void end_bio_extent_readpage(struct bio *bio)
Chris Masond1310b22008-01-24 16:13:08 -05002930{
Kent Overstreet2c30c712013-11-07 12:20:26 -08002931 struct bio_vec *bvec;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002932 int uptodate = !bio->bi_status;
Miao Xiefacc8a222013-07-25 19:22:34 +08002933 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
Josef Bacik7870d082017-05-05 11:57:15 -04002934 struct extent_io_tree *tree, *failure_tree;
Qu Wenruo94e8c952020-11-13 20:51:28 +08002935 struct processed_extent processed = { 0 };
Qu Wenruo7ffd27e2020-12-02 14:47:58 +08002936 /*
2937 * The offset to the beginning of a bio, since one bio can never be
2938 * larger than UINT_MAX, u32 here is enough.
2939 */
2940 u32 bio_offset = 0;
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002941 int mirror;
Chris Masond1310b22008-01-24 16:13:08 -05002942 int ret;
Ming Lei6dc4f102019-02-15 19:13:19 +08002943 struct bvec_iter_all iter_all;
Chris Masond1310b22008-01-24 16:13:08 -05002944
David Sterbac09abff2017-07-13 18:10:07 +02002945 ASSERT(!bio_flagged(bio, BIO_CLONED));
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02002946 bio_for_each_segment_all(bvec, bio, iter_all) {
Chris Masond1310b22008-01-24 16:13:08 -05002947 struct page *page = bvec->bv_page;
Josef Bacika71754f2013-06-17 17:14:39 -04002948 struct inode *inode = page->mapping->host;
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002949 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Qu Wenruo7ffd27e2020-12-02 14:47:58 +08002950 const u32 sectorsize = fs_info->sectorsize;
2951 u64 start;
2952 u64 end;
2953 u32 len;
Arne Jansen507903b2011-04-06 10:02:20 +00002954
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002955 btrfs_debug(fs_info,
2956 "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
David Sterba1201b582020-11-26 15:41:27 +01002957 bio->bi_iter.bi_sector, bio->bi_status,
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002958 io_bio->mirror_num);
Josef Bacika71754f2013-06-17 17:14:39 -04002959 tree = &BTRFS_I(inode)->io_tree;
Josef Bacik7870d082017-05-05 11:57:15 -04002960 failure_tree = &BTRFS_I(inode)->io_failure_tree;
David Woodhouse902b22f2008-08-20 08:51:49 -04002961
Qu Wenruo8b8bbd42020-10-21 14:24:58 +08002962 /*
2963 * We always issue full-sector reads, but if some block in a
2964 * page fails to read, blk_update_request() will advance
2965 * bv_offset and adjust bv_len to compensate. Print a warning
2966 * for unaligned offsets, and an error if they don't add up to
2967 * a full sector.
2968 */
2969 if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
2970 btrfs_err(fs_info,
2971 "partial page read in btrfs with offset %u and length %u",
2972 bvec->bv_offset, bvec->bv_len);
2973 else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
2974 sectorsize))
2975 btrfs_info(fs_info,
2976 "incomplete page read with offset %u and length %u",
2977 bvec->bv_offset, bvec->bv_len);
Chris Masond1310b22008-01-24 16:13:08 -05002978
Qu Wenruo8b8bbd42020-10-21 14:24:58 +08002979 start = page_offset(page) + bvec->bv_offset;
2980 end = start + bvec->bv_len - 1;
Miao Xiefacc8a222013-07-25 19:22:34 +08002981 len = bvec->bv_len;
Chris Masond1310b22008-01-24 16:13:08 -05002982
Chris Mason9be33952013-05-17 18:30:14 -04002983 mirror = io_bio->mirror_num;
Nikolay Borisov78e62c02018-11-22 10:17:49 +02002984 if (likely(uptodate)) {
Nikolay Borisovbe17b3a2020-09-18 16:34:36 +03002985 if (is_data_inode(inode))
Qu Wenruo7ffd27e2020-12-02 14:47:58 +08002986 ret = btrfs_verify_data_csum(io_bio,
Goldwyn Rodrigues5e295762021-03-03 06:55:37 -06002987 bio_offset, page, start, end);
Nikolay Borisov9a446d62020-09-18 16:34:33 +03002988 else
2989 ret = btrfs_validate_metadata_buffer(io_bio,
Qu Wenruo8e1dc982020-11-12 16:47:57 +08002990 page, start, end, mirror);
Stefan Behrens5ee08442012-08-27 08:30:03 -06002991 if (ret)
Chris Masond1310b22008-01-24 16:13:08 -05002992 uptodate = 0;
Stefan Behrens5ee08442012-08-27 08:30:03 -06002993 else
Josef Bacik7870d082017-05-05 11:57:15 -04002994 clean_io_failure(BTRFS_I(inode)->root->fs_info,
2995 failure_tree, tree, start,
2996 page,
2997 btrfs_ino(BTRFS_I(inode)), 0);
Chris Masond1310b22008-01-24 16:13:08 -05002998 }
Josef Bacikea466792012-03-26 21:57:36 -04002999
Miao Xief2a09da2013-07-25 19:22:33 +08003000 if (likely(uptodate))
3001 goto readpage_ok;
3002
Nikolay Borisovbe17b3a2020-09-18 16:34:36 +03003003 if (is_data_inode(inode)) {
Liu Bo9d0d1c82017-03-24 15:04:50 -07003004
3005 /*
Nikolay Borisov78e62c02018-11-22 10:17:49 +02003006 * The generic bio_readpage_error handles errors the
3007 * following way: If possible, new read requests are
3008 * created and submitted and will end up in
3009 * end_bio_extent_readpage as well (if we're lucky,
3010 * not in the !uptodate case). In that case it returns
3011 * 0 and we just go on with the next page in our bio.
3012 * If it can't handle the error it will return -EIO and
3013 * we remain responsible for that page.
Liu Bo9d0d1c82017-03-24 15:04:50 -07003014 */
Qu Wenruo7ffd27e2020-12-02 14:47:58 +08003015 if (!btrfs_submit_read_repair(inode, bio, bio_offset,
3016 page,
Omar Sandoval77d5d682020-04-16 14:46:25 -07003017 start - page_offset(page),
3018 start, end, mirror,
Nikolay Borisov908930f2020-09-18 16:34:37 +03003019 btrfs_submit_data_bio)) {
Nikolay Borisov78e62c02018-11-22 10:17:49 +02003020 uptodate = !bio->bi_status;
Qu Wenruo7ffd27e2020-12-02 14:47:58 +08003021 ASSERT(bio_offset + len > bio_offset);
3022 bio_offset += len;
Nikolay Borisov78e62c02018-11-22 10:17:49 +02003023 continue;
3024 }
3025 } else {
3026 struct extent_buffer *eb;
3027
Qu Wenruod9bb77d2021-03-15 13:39:14 +08003028 eb = find_extent_buffer_readpage(fs_info, page, start);
Nikolay Borisov78e62c02018-11-22 10:17:49 +02003029 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3030 eb->read_mirror = mirror;
3031 atomic_dec(&eb->io_pages);
3032 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD,
3033 &eb->bflags))
3034 btree_readahead_hook(eb, -EIO);
Chris Mason7e383262008-04-09 16:28:12 -04003035 }
Miao Xief2a09da2013-07-25 19:22:33 +08003036readpage_ok:
Miao Xie883d0de2013-07-25 19:22:35 +08003037 if (likely(uptodate)) {
Josef Bacika71754f2013-06-17 17:14:39 -04003038 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003039 pgoff_t end_index = i_size >> PAGE_SHIFT;
Josef Bacika71754f2013-06-17 17:14:39 -04003040
Qu Wenruoc28ea612021-03-01 16:44:22 +08003041 /*
3042 * Zero out the remaining part if this range straddles
3043 * i_size.
3044 *
3045 * Here we should only zero the range inside the bvec,
3046 * not touch anything else.
3047 *
3048 * NOTE: i_size is exclusive while end is inclusive.
3049 */
3050 if (page->index == end_index && i_size <= end) {
3051 u32 zero_start = max(offset_in_page(i_size),
Qu Wenruod2dcc8e2021-03-08 17:20:17 +08003052 offset_in_page(start));
Qu Wenruoc28ea612021-03-01 16:44:22 +08003053
3054 zero_user_segment(page, zero_start,
3055 offset_in_page(end) + 1);
3056 }
Chris Mason70dec802008-01-29 09:59:12 -05003057 }
Qu Wenruo7ffd27e2020-12-02 14:47:58 +08003058 ASSERT(bio_offset + len > bio_offset);
3059 bio_offset += len;
Miao Xie883d0de2013-07-25 19:22:35 +08003060
Qu Wenruoe09caaf2020-11-13 20:51:29 +08003061 /* Update page status and unlock */
Qu Wenruo92082d42021-02-02 10:28:36 +08003062 end_page_read(page, uptodate, start, len);
Qu Wenruo94e8c952020-11-13 20:51:28 +08003063 endio_readpage_release_extent(&processed, BTRFS_I(inode),
3064 start, end, uptodate);
Kent Overstreet2c30c712013-11-07 12:20:26 -08003065 }
Qu Wenruo94e8c952020-11-13 20:51:28 +08003066 /* Release the last extent */
3067 endio_readpage_release_extent(&processed, NULL, 0, 0, false);
David Sterbab3a0dd52018-11-22 17:16:49 +01003068 btrfs_io_bio_free_csum(io_bio);
Chris Masond1310b22008-01-24 16:13:08 -05003069 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05003070}
3071
Chris Mason9be33952013-05-17 18:30:14 -04003072/*
David Sterba184f9992017-06-12 17:29:39 +02003073 * Initialize the members up to but not including 'bio'. Use after allocating a
3074 * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
3075 * 'bio' because use of __GFP_ZERO is not supported.
Chris Mason9be33952013-05-17 18:30:14 -04003076 */
David Sterba184f9992017-06-12 17:29:39 +02003077static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
Chris Masond1310b22008-01-24 16:13:08 -05003078{
David Sterba184f9992017-06-12 17:29:39 +02003079 memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
3080}
3081
3082/*
David Sterba6e707bc2017-06-02 17:26:26 +02003083 * The following helpers allocate a bio. As it's backed by a bioset, it'll
3084 * never fail. We're returning a bio right now but you can call btrfs_io_bio
3085 * for the appropriate container_of magic
Chris Masond1310b22008-01-24 16:13:08 -05003086 */
David Sterbae749af442019-06-18 20:00:16 +02003087struct bio *btrfs_bio_alloc(u64 first_byte)
Chris Masond1310b22008-01-24 16:13:08 -05003088{
3089 struct bio *bio;
3090
Christoph Hellwiga8affc02021-03-11 12:01:37 +01003091 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &btrfs_bioset);
David Sterbac821e7f32017-06-02 18:35:36 +02003092 bio->bi_iter.bi_sector = first_byte >> 9;
David Sterba184f9992017-06-12 17:29:39 +02003093 btrfs_io_bio_init(btrfs_io_bio(bio));
Chris Masond1310b22008-01-24 16:13:08 -05003094 return bio;
3095}
3096
David Sterba8b6c1d52017-06-02 17:48:13 +02003097struct bio *btrfs_bio_clone(struct bio *bio)
Chris Mason9be33952013-05-17 18:30:14 -04003098{
Miao Xie23ea8e52014-09-12 18:43:54 +08003099 struct btrfs_io_bio *btrfs_bio;
3100 struct bio *new;
Chris Mason9be33952013-05-17 18:30:14 -04003101
David Sterba6e707bc2017-06-02 17:26:26 +02003102 /* Bio allocation backed by a bioset does not fail */
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -04003103 new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
David Sterba6e707bc2017-06-02 17:26:26 +02003104 btrfs_bio = btrfs_io_bio(new);
David Sterba184f9992017-06-12 17:29:39 +02003105 btrfs_io_bio_init(btrfs_bio);
David Sterba6e707bc2017-06-02 17:26:26 +02003106 btrfs_bio->iter = bio->bi_iter;
Miao Xie23ea8e52014-09-12 18:43:54 +08003107 return new;
3108}
Chris Mason9be33952013-05-17 18:30:14 -04003109
David Sterbac5e4c3d2017-06-12 17:29:41 +02003110struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
Chris Mason9be33952013-05-17 18:30:14 -04003111{
Miao Xiefacc8a222013-07-25 19:22:34 +08003112 struct bio *bio;
3113
David Sterba6e707bc2017-06-02 17:26:26 +02003114 /* Bio allocation backed by a bioset does not fail */
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -04003115 bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
David Sterba184f9992017-06-12 17:29:39 +02003116 btrfs_io_bio_init(btrfs_io_bio(bio));
Miao Xiefacc8a222013-07-25 19:22:34 +08003117 return bio;
Chris Mason9be33952013-05-17 18:30:14 -04003118}
3119
Liu Boe4770942017-05-16 10:57:14 -07003120struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
Liu Bo2f8e9142017-05-15 17:43:31 -07003121{
3122 struct bio *bio;
3123 struct btrfs_io_bio *btrfs_bio;
3124
3125 /* this will never fail when it's backed by a bioset */
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -04003126 bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
Liu Bo2f8e9142017-05-15 17:43:31 -07003127 ASSERT(bio);
3128
3129 btrfs_bio = btrfs_io_bio(bio);
David Sterba184f9992017-06-12 17:29:39 +02003130 btrfs_io_bio_init(btrfs_bio);
Liu Bo2f8e9142017-05-15 17:43:31 -07003131
3132 bio_trim(bio, offset >> 9, size >> 9);
Liu Bo17347ce2017-05-15 15:33:27 -07003133 btrfs_bio->iter = bio->bi_iter;
Liu Bo2f8e9142017-05-15 17:43:31 -07003134 return bio;
3135}
Chris Mason9be33952013-05-17 18:30:14 -04003136
Naohiro Aota953651e2021-02-04 19:21:57 +09003137/**
3138 * Attempt to add a page to bio
3139 *
3140 * @bio: destination bio
3141 * @page: page to add to the bio
3142 * @disk_bytenr: offset of the new bio or to check whether we are adding
3143 * a contiguous page to the previous one
3144 * @pg_offset: starting offset in the page
3145 * @size: portion of page that we want to write
3146 * @prev_bio_flags: flags of previous bio to see if we can merge the current one
3147 * @bio_flags: flags of the current bio to see if we can merge them
3148 * @return: true if page was added, false otherwise
3149 *
3150 * Attempt to add a page to bio considering stripe alignment etc.
3151 *
3152 * Return true if successfully page added. Otherwise, return false.
3153 */
3154static bool btrfs_bio_add_page(struct bio *bio, struct page *page,
3155 u64 disk_bytenr, unsigned int size,
3156 unsigned int pg_offset,
3157 unsigned long prev_bio_flags,
3158 unsigned long bio_flags)
3159{
3160 const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
3161 bool contig;
Naohiro Aotae1326f02021-02-04 19:21:58 +09003162 int ret;
Naohiro Aota953651e2021-02-04 19:21:57 +09003163
3164 if (prev_bio_flags != bio_flags)
3165 return false;
3166
3167 if (prev_bio_flags & EXTENT_BIO_COMPRESSED)
3168 contig = bio->bi_iter.bi_sector == sector;
3169 else
3170 contig = bio_end_sector(bio) == sector;
3171 if (!contig)
3172 return false;
3173
3174 if (btrfs_bio_fits_in_stripe(page, size, bio, bio_flags))
3175 return false;
3176
Johannes Thumshirncacb2ce2021-02-04 19:22:01 +09003177 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
3178 struct page *first_page = bio_first_bvec_all(bio)->bv_page;
3179
3180 if (!btrfs_bio_fits_in_ordered_extent(first_page, bio, size))
3181 return false;
Naohiro Aotae1326f02021-02-04 19:21:58 +09003182 ret = bio_add_zone_append_page(bio, page, size, pg_offset);
Johannes Thumshirncacb2ce2021-02-04 19:22:01 +09003183 } else {
Naohiro Aotae1326f02021-02-04 19:21:58 +09003184 ret = bio_add_page(bio, page, size, pg_offset);
Johannes Thumshirncacb2ce2021-02-04 19:22:01 +09003185 }
Naohiro Aotae1326f02021-02-04 19:21:58 +09003186
3187 return ret == size;
Naohiro Aota953651e2021-02-04 19:21:57 +09003188}
3189
David Sterba4b81ba42017-06-06 19:14:26 +02003190/*
3191 * @opf: bio REQ_OP_* and REQ_* flags as one value
David Sterbab8b3d622017-06-12 19:50:41 +02003192 * @wbc: optional writeback control for io accounting
3193 * @page: page to add to the bio
Qu Wenruo0c64c332021-01-06 09:01:40 +08003194 * @disk_bytenr: logical bytenr where the write will be
3195 * @size: portion of page that we want to write to
David Sterbab8b3d622017-06-12 19:50:41 +02003196 * @pg_offset: offset of the new bio or to check whether we are adding
3197 * a contiguous page to the previous one
David Sterba5c2b1fd2017-06-06 19:22:55 +02003198 * @bio_ret: must be valid pointer, newly allocated bio will be stored there
David Sterbab8b3d622017-06-12 19:50:41 +02003199 * @end_io_func: end_io callback for new bio
3200 * @mirror_num: desired mirror to read/write
3201 * @prev_bio_flags: flags of previous bio to see if we can merge the current one
3202 * @bio_flags: flags of the current bio to see if we can merge them
David Sterba4b81ba42017-06-06 19:14:26 +02003203 */
David Sterba0ceb34b2020-02-05 19:09:28 +01003204static int submit_extent_page(unsigned int opf,
Chris Masonda2f0f72015-07-02 13:57:22 -07003205 struct writeback_control *wbc,
Qu Wenruo0c64c332021-01-06 09:01:40 +08003206 struct page *page, u64 disk_bytenr,
David Sterba6c5a4e22017-10-04 17:10:34 +02003207 size_t size, unsigned long pg_offset,
Chris Masond1310b22008-01-24 16:13:08 -05003208 struct bio **bio_ret,
Chris Masonf1885912008-04-09 16:28:12 -04003209 bio_end_io_t end_io_func,
Chris Masonc8b97812008-10-29 14:49:59 -04003210 int mirror_num,
3211 unsigned long prev_bio_flags,
Filipe Manana005efed2015-09-14 09:09:31 +01003212 unsigned long bio_flags,
3213 bool force_bio_submit)
Chris Masond1310b22008-01-24 16:13:08 -05003214{
3215 int ret = 0;
3216 struct bio *bio;
Qu Wenruoe940e9a2020-10-21 14:25:01 +08003217 size_t io_size = min_t(size_t, size, PAGE_SIZE);
Naohiro Aotae1326f02021-02-04 19:21:58 +09003218 struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
3219 struct extent_io_tree *tree = &inode->io_tree;
3220 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Chris Masond1310b22008-01-24 16:13:08 -05003221
David Sterba5c2b1fd2017-06-06 19:22:55 +02003222 ASSERT(bio_ret);
3223
3224 if (*bio_ret) {
Chris Masond1310b22008-01-24 16:13:08 -05003225 bio = *bio_ret;
Naohiro Aota953651e2021-02-04 19:21:57 +09003226 if (force_bio_submit ||
3227 !btrfs_bio_add_page(bio, page, disk_bytenr, io_size,
3228 pg_offset, prev_bio_flags, bio_flags)) {
Mike Christie1f7ad752016-06-05 14:31:51 -05003229 ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
Naohiro Aota289454a2015-01-06 01:01:03 +09003230 if (ret < 0) {
3231 *bio_ret = NULL;
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003232 return ret;
Naohiro Aota289454a2015-01-06 01:01:03 +09003233 }
Chris Masond1310b22008-01-24 16:13:08 -05003234 bio = NULL;
3235 } else {
Chris Masonda2f0f72015-07-02 13:57:22 -07003236 if (wbc)
Qu Wenruoe940e9a2020-10-21 14:25:01 +08003237 wbc_account_cgroup_owner(wbc, page, io_size);
Chris Masond1310b22008-01-24 16:13:08 -05003238 return 0;
3239 }
3240 }
Chris Masonc8b97812008-10-29 14:49:59 -04003241
Qu Wenruo0c64c332021-01-06 09:01:40 +08003242 bio = btrfs_bio_alloc(disk_bytenr);
Qu Wenruoe940e9a2020-10-21 14:25:01 +08003243 bio_add_page(bio, page, io_size, pg_offset);
Chris Masond1310b22008-01-24 16:13:08 -05003244 bio->bi_end_io = end_io_func;
3245 bio->bi_private = tree;
Jens Axboee6959b92017-06-27 11:51:28 -06003246 bio->bi_write_hint = page->mapping->host->i_write_hint;
David Sterba4b81ba42017-06-06 19:14:26 +02003247 bio->bi_opf = opf;
Chris Masonda2f0f72015-07-02 13:57:22 -07003248 if (wbc) {
David Sterba429aebc2019-11-18 23:27:55 +01003249 struct block_device *bdev;
3250
Naohiro Aotae1326f02021-02-04 19:21:58 +09003251 bdev = fs_info->fs_devices->latest_bdev;
David Sterba429aebc2019-11-18 23:27:55 +01003252 bio_set_dev(bio, bdev);
Chris Masonda2f0f72015-07-02 13:57:22 -07003253 wbc_init_bio(wbc, bio);
Qu Wenruoe940e9a2020-10-21 14:25:01 +08003254 wbc_account_cgroup_owner(wbc, page, io_size);
Chris Masonda2f0f72015-07-02 13:57:22 -07003255 }
Naohiro Aotae1326f02021-02-04 19:21:58 +09003256 if (btrfs_is_zoned(fs_info) && bio_op(bio) == REQ_OP_ZONE_APPEND) {
3257 struct extent_map *em;
3258 struct map_lookup *map;
3259
3260 em = btrfs_get_chunk_map(fs_info, disk_bytenr, io_size);
3261 if (IS_ERR(em))
3262 return PTR_ERR(em);
3263
3264 map = em->map_lookup;
3265 /* We only support single profile for now */
3266 ASSERT(map->num_stripes == 1);
3267 btrfs_io_bio(bio)->device = map->stripes[0].dev;
3268
3269 free_extent_map(em);
3270 }
Chris Mason70dec802008-01-29 09:59:12 -05003271
David Sterba5c2b1fd2017-06-06 19:22:55 +02003272 *bio_ret = bio;
Chris Masond1310b22008-01-24 16:13:08 -05003273
3274 return ret;
3275}
3276
Qu Wenruo760f9912021-01-26 16:33:48 +08003277static int attach_extent_buffer_page(struct extent_buffer *eb,
3278 struct page *page,
3279 struct btrfs_subpage *prealloc)
Josef Bacik4f2de97a2012-03-07 16:20:05 -05003280{
Qu Wenruo760f9912021-01-26 16:33:48 +08003281 struct btrfs_fs_info *fs_info = eb->fs_info;
3282 int ret = 0;
3283
Qu Wenruo0d01e242020-10-21 14:25:02 +08003284 /*
3285 * If the page is mapped to btree inode, we should hold the private
3286 * lock to prevent race.
3287 * For cloned or dummy extent buffers, their pages are not mapped and
3288 * will not race with any other ebs.
3289 */
3290 if (page->mapping)
3291 lockdep_assert_held(&page->mapping->private_lock);
3292
Qu Wenruo760f9912021-01-26 16:33:48 +08003293 if (fs_info->sectorsize == PAGE_SIZE) {
3294 if (!PagePrivate(page))
3295 attach_page_private(page, eb);
3296 else
3297 WARN_ON(page->private != (unsigned long)eb);
3298 return 0;
3299 }
3300
3301 /* Already mapped, just free prealloc */
3302 if (PagePrivate(page)) {
3303 btrfs_free_subpage(prealloc);
3304 return 0;
3305 }
3306
3307 if (prealloc)
3308 /* Has preallocated memory for subpage */
3309 attach_page_private(page, prealloc);
Guoqing Jiangd1b89bc2020-06-01 21:47:45 -07003310 else
Qu Wenruo760f9912021-01-26 16:33:48 +08003311 /* Do new allocation to attach subpage */
3312 ret = btrfs_attach_subpage(fs_info, page,
3313 BTRFS_SUBPAGE_METADATA);
3314 return ret;
Josef Bacik4f2de97a2012-03-07 16:20:05 -05003315}
3316
Qu Wenruo32443de2021-01-26 16:34:00 +08003317int set_page_extent_mapped(struct page *page)
Chris Masond1310b22008-01-24 16:13:08 -05003318{
Qu Wenruo32443de2021-01-26 16:34:00 +08003319 struct btrfs_fs_info *fs_info;
3320
3321 ASSERT(page->mapping);
3322
3323 if (PagePrivate(page))
3324 return 0;
3325
3326 fs_info = btrfs_sb(page->mapping->host->i_sb);
3327
3328 if (fs_info->sectorsize < PAGE_SIZE)
3329 return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
3330
3331 attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
3332 return 0;
3333}
3334
3335void clear_page_extent_mapped(struct page *page)
3336{
3337 struct btrfs_fs_info *fs_info;
3338
3339 ASSERT(page->mapping);
3340
Guoqing Jiangd1b89bc2020-06-01 21:47:45 -07003341 if (!PagePrivate(page))
Qu Wenruo32443de2021-01-26 16:34:00 +08003342 return;
3343
3344 fs_info = btrfs_sb(page->mapping->host->i_sb);
3345 if (fs_info->sectorsize < PAGE_SIZE)
3346 return btrfs_detach_subpage(fs_info, page);
3347
3348 detach_page_private(page);
Chris Masond1310b22008-01-24 16:13:08 -05003349}
3350
Miao Xie125bac012013-07-25 19:22:37 +08003351static struct extent_map *
3352__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
Nikolay Borisov1a5ee1e2020-09-14 12:37:06 +03003353 u64 start, u64 len, struct extent_map **em_cached)
Miao Xie125bac012013-07-25 19:22:37 +08003354{
3355 struct extent_map *em;
3356
3357 if (em_cached && *em_cached) {
3358 em = *em_cached;
Filipe Mananacbc0e922014-02-25 14:15:12 +00003359 if (extent_map_in_tree(em) && start >= em->start &&
Miao Xie125bac012013-07-25 19:22:37 +08003360 start < extent_map_end(em)) {
Elena Reshetova490b54d2017-03-03 10:55:12 +02003361 refcount_inc(&em->refs);
Miao Xie125bac012013-07-25 19:22:37 +08003362 return em;
3363 }
3364
3365 free_extent_map(em);
3366 *em_cached = NULL;
3367 }
3368
Nikolay Borisov1a5ee1e2020-09-14 12:37:06 +03003369 em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
Miao Xie125bac012013-07-25 19:22:37 +08003370 if (em_cached && !IS_ERR_OR_NULL(em)) {
3371 BUG_ON(*em_cached);
Elena Reshetova490b54d2017-03-03 10:55:12 +02003372 refcount_inc(&em->refs);
Miao Xie125bac012013-07-25 19:22:37 +08003373 *em_cached = em;
3374 }
3375 return em;
3376}
Chris Masond1310b22008-01-24 16:13:08 -05003377/*
3378 * basic readpage implementation. Locked extent state structs are inserted
3379 * into the tree that are removed when the IO is done (by the end_io
3380 * handlers)
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003381 * XXX JDM: This needs looking at to ensure proper page locking
Liu Bobaf863b2016-07-11 10:39:07 -07003382 * return 0 on success, otherwise return error
Chris Masond1310b22008-01-24 16:13:08 -05003383 */
Nikolay Borisov0f208812020-09-14 14:39:16 +03003384int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
3385 struct bio **bio, unsigned long *bio_flags,
3386 unsigned int read_flags, u64 *prev_em_start)
Chris Masond1310b22008-01-24 16:13:08 -05003387{
3388 struct inode *inode = page->mapping->host;
Qu Wenruo92082d42021-02-02 10:28:36 +08003389 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie4eee4fa2012-12-21 09:17:45 +00003390 u64 start = page_offset(page);
David Sterba8eec8292017-06-06 19:50:13 +02003391 const u64 end = start + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05003392 u64 cur = start;
3393 u64 extent_offset;
3394 u64 last_byte = i_size_read(inode);
3395 u64 block_start;
3396 u64 cur_end;
Chris Masond1310b22008-01-24 16:13:08 -05003397 struct extent_map *em;
Liu Bobaf863b2016-07-11 10:39:07 -07003398 int ret = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003399 int nr = 0;
David Sterba306e16c2011-04-19 14:29:38 +02003400 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003401 size_t iosize;
3402 size_t blocksize = inode->i_sb->s_blocksize;
Filipe Manana7f042a82016-01-27 19:17:20 +00003403 unsigned long this_bio_flag = 0;
David Sterbaf657a312020-02-05 19:09:42 +01003404 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
David Sterbaae6957e2020-02-05 19:09:30 +01003405
Qu Wenruo32443de2021-01-26 16:34:00 +08003406 ret = set_page_extent_mapped(page);
3407 if (ret < 0) {
3408 unlock_extent(tree, start, end);
Qu Wenruo92082d42021-02-02 10:28:36 +08003409 btrfs_page_set_error(fs_info, page, start, PAGE_SIZE);
3410 unlock_page(page);
Qu Wenruo32443de2021-01-26 16:34:00 +08003411 goto out;
3412 }
Chris Masond1310b22008-01-24 16:13:08 -05003413
Dan Magenheimer90a887c2011-05-26 10:01:56 -06003414 if (!PageUptodate(page)) {
3415 if (cleancache_get_page(page) == 0) {
3416 BUG_ON(blocksize != PAGE_SIZE);
Miao Xie99740902013-07-25 19:22:36 +08003417 unlock_extent(tree, start, end);
Qu Wenruo92082d42021-02-02 10:28:36 +08003418 unlock_page(page);
Dan Magenheimer90a887c2011-05-26 10:01:56 -06003419 goto out;
3420 }
3421 }
3422
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003423 if (page->index == last_byte >> PAGE_SHIFT) {
Chris Masonc8b97812008-10-29 14:49:59 -04003424 char *userpage;
Johannes Thumshirn70730172018-12-05 15:23:03 +01003425 size_t zero_offset = offset_in_page(last_byte);
Chris Masonc8b97812008-10-29 14:49:59 -04003426
3427 if (zero_offset) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003428 iosize = PAGE_SIZE - zero_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08003429 userpage = kmap_atomic(page);
Chris Masonc8b97812008-10-29 14:49:59 -04003430 memset(userpage + zero_offset, 0, iosize);
3431 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08003432 kunmap_atomic(userpage);
Chris Masonc8b97812008-10-29 14:49:59 -04003433 }
3434 }
Qu Wenruo92082d42021-02-02 10:28:36 +08003435 begin_page_read(fs_info, page);
Chris Masond1310b22008-01-24 16:13:08 -05003436 while (cur <= end) {
Filipe Manana005efed2015-09-14 09:09:31 +01003437 bool force_bio_submit = false;
Qu Wenruo0c64c332021-01-06 09:01:40 +08003438 u64 disk_bytenr;
Josef Bacikc8f2f242013-02-11 11:33:00 -05003439
Chris Masond1310b22008-01-24 16:13:08 -05003440 if (cur >= last_byte) {
3441 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00003442 struct extent_state *cached = NULL;
3443
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003444 iosize = PAGE_SIZE - pg_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08003445 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02003446 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003447 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08003448 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05003449 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00003450 &cached, GFP_NOFS);
Filipe Manana7f042a82016-01-27 19:17:20 +00003451 unlock_extent_cached(tree, cur,
David Sterbae43bbe52017-12-12 21:43:52 +01003452 cur + iosize - 1, &cached);
Qu Wenruo92082d42021-02-02 10:28:36 +08003453 end_page_read(page, true, cur, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003454 break;
3455 }
Miao Xie125bac012013-07-25 19:22:37 +08003456 em = __get_extent_map(inode, page, pg_offset, cur,
Nikolay Borisov1a5ee1e2020-09-14 12:37:06 +03003457 end - cur + 1, em_cached);
David Sterbac7040052011-04-19 18:00:01 +02003458 if (IS_ERR_OR_NULL(em)) {
Filipe Manana7f042a82016-01-27 19:17:20 +00003459 unlock_extent(tree, cur, end);
Qu Wenruo92082d42021-02-02 10:28:36 +08003460 end_page_read(page, false, cur, end + 1 - cur);
Chris Masond1310b22008-01-24 16:13:08 -05003461 break;
3462 }
Chris Masond1310b22008-01-24 16:13:08 -05003463 extent_offset = cur - em->start;
3464 BUG_ON(extent_map_end(em) <= cur);
3465 BUG_ON(end < cur);
3466
Li Zefan261507a02010-12-17 14:21:50 +08003467 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
Mark Fasheh4b384312013-08-06 11:42:50 -07003468 this_bio_flag |= EXTENT_BIO_COMPRESSED;
Li Zefan261507a02010-12-17 14:21:50 +08003469 extent_set_compress_type(&this_bio_flag,
3470 em->compress_type);
3471 }
Chris Masonc8b97812008-10-29 14:49:59 -04003472
Chris Masond1310b22008-01-24 16:13:08 -05003473 iosize = min(extent_map_end(em) - cur, end - cur + 1);
3474 cur_end = min(extent_map_end(em) - 1, end);
Qu Wenruofda28322013-02-26 08:10:22 +00003475 iosize = ALIGN(iosize, blocksize);
Goldwyn Rodrigues949b3272020-09-15 10:41:40 -05003476 if (this_bio_flag & EXTENT_BIO_COMPRESSED)
Qu Wenruo0c64c332021-01-06 09:01:40 +08003477 disk_bytenr = em->block_start;
Goldwyn Rodrigues949b3272020-09-15 10:41:40 -05003478 else
Qu Wenruo0c64c332021-01-06 09:01:40 +08003479 disk_bytenr = em->block_start + extent_offset;
Chris Masond1310b22008-01-24 16:13:08 -05003480 block_start = em->block_start;
Yan Zhengd899e052008-10-30 14:25:28 -04003481 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3482 block_start = EXTENT_MAP_HOLE;
Filipe Manana005efed2015-09-14 09:09:31 +01003483
3484 /*
3485 * If we have a file range that points to a compressed extent
Randy Dunlap260db432020-08-04 19:48:34 -07003486 * and it's followed by a consecutive file range that points
Filipe Manana005efed2015-09-14 09:09:31 +01003487 * to the same compressed extent (possibly with a different
3488 * offset and/or length, so it either points to the whole extent
3489 * or only part of it), we must make sure we do not submit a
3490 * single bio to populate the pages for the 2 ranges because
3491 * this makes the compressed extent read zero out the pages
3492 * belonging to the 2nd range. Imagine the following scenario:
3493 *
3494 * File layout
3495 * [0 - 8K] [8K - 24K]
3496 * | |
3497 * | |
3498 * points to extent X, points to extent X,
3499 * offset 4K, length of 8K offset 0, length 16K
3500 *
3501 * [extent X, compressed length = 4K uncompressed length = 16K]
3502 *
3503 * If the bio to read the compressed extent covers both ranges,
3504 * it will decompress extent X into the pages belonging to the
3505 * first range and then it will stop, zeroing out the remaining
3506 * pages that belong to the other range that points to extent X.
3507 * So here we make sure we submit 2 bios, one for the first
3508 * range and another one for the third range. Both will target
3509 * the same physical extent from disk, but we can't currently
3510 * make the compressed bio endio callback populate the pages
3511 * for both ranges because each compressed bio is tightly
3512 * coupled with a single extent map, and each range can have
3513 * an extent map with a different offset value relative to the
3514 * uncompressed data of our extent and different lengths. This
3515 * is a corner case so we prioritize correctness over
3516 * non-optimal behavior (submitting 2 bios for the same extent).
3517 */
3518 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3519 prev_em_start && *prev_em_start != (u64)-1 &&
Filipe Manana8e928212019-02-14 15:17:20 +00003520 *prev_em_start != em->start)
Filipe Manana005efed2015-09-14 09:09:31 +01003521 force_bio_submit = true;
3522
3523 if (prev_em_start)
Filipe Manana8e928212019-02-14 15:17:20 +00003524 *prev_em_start = em->start;
Filipe Manana005efed2015-09-14 09:09:31 +01003525
Chris Masond1310b22008-01-24 16:13:08 -05003526 free_extent_map(em);
3527 em = NULL;
3528
3529 /* we've found a hole, just zero and go on */
3530 if (block_start == EXTENT_MAP_HOLE) {
3531 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00003532 struct extent_state *cached = NULL;
3533
Cong Wang7ac687d2011-11-25 23:14:28 +08003534 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02003535 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003536 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08003537 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05003538
3539 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00003540 &cached, GFP_NOFS);
Filipe Manana7f042a82016-01-27 19:17:20 +00003541 unlock_extent_cached(tree, cur,
David Sterbae43bbe52017-12-12 21:43:52 +01003542 cur + iosize - 1, &cached);
Qu Wenruo92082d42021-02-02 10:28:36 +08003543 end_page_read(page, true, cur, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003544 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003545 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003546 continue;
3547 }
3548 /* the get_extent function already copied into the page */
Chris Mason9655d292009-09-02 15:22:30 -04003549 if (test_range_bit(tree, cur, cur_end,
3550 EXTENT_UPTODATE, 1, NULL)) {
Chris Masona1b32a52008-09-05 16:09:51 -04003551 check_page_uptodate(tree, page);
Filipe Manana7f042a82016-01-27 19:17:20 +00003552 unlock_extent(tree, cur, cur + iosize - 1);
Qu Wenruo92082d42021-02-02 10:28:36 +08003553 end_page_read(page, true, cur, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003554 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003555 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003556 continue;
3557 }
Chris Mason70dec802008-01-29 09:59:12 -05003558 /* we have an inline extent but it didn't get marked up
3559 * to date. Error out
3560 */
3561 if (block_start == EXTENT_MAP_INLINE) {
Filipe Manana7f042a82016-01-27 19:17:20 +00003562 unlock_extent(tree, cur, cur + iosize - 1);
Qu Wenruo92082d42021-02-02 10:28:36 +08003563 end_page_read(page, false, cur, iosize);
Chris Mason70dec802008-01-29 09:59:12 -05003564 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003565 pg_offset += iosize;
Chris Mason70dec802008-01-29 09:59:12 -05003566 continue;
3567 }
Chris Masond1310b22008-01-24 16:13:08 -05003568
David Sterba0ceb34b2020-02-05 19:09:28 +01003569 ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
Qu Wenruo0c64c332021-01-06 09:01:40 +08003570 page, disk_bytenr, iosize,
David Sterbafa17ed02019-10-03 17:29:05 +02003571 pg_offset, bio,
Nikolay Borisovfd513002020-09-14 12:37:11 +03003572 end_bio_extent_readpage, 0,
Chris Masonc8b97812008-10-29 14:49:59 -04003573 *bio_flags,
Filipe Manana005efed2015-09-14 09:09:31 +01003574 this_bio_flag,
3575 force_bio_submit);
Josef Bacikc8f2f242013-02-11 11:33:00 -05003576 if (!ret) {
3577 nr++;
3578 *bio_flags = this_bio_flag;
3579 } else {
Filipe Manana7f042a82016-01-27 19:17:20 +00003580 unlock_extent(tree, cur, cur + iosize - 1);
Qu Wenruo92082d42021-02-02 10:28:36 +08003581 end_page_read(page, false, cur, iosize);
Liu Bobaf863b2016-07-11 10:39:07 -07003582 goto out;
Josef Bacikedd33c92012-10-05 16:40:32 -04003583 }
Chris Masond1310b22008-01-24 16:13:08 -05003584 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003585 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003586 }
Dan Magenheimer90a887c2011-05-26 10:01:56 -06003587out:
Liu Bobaf863b2016-07-11 10:39:07 -07003588 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05003589}
3590
David Sterbab6660e82020-02-05 19:09:40 +01003591static inline void contiguous_readpages(struct page *pages[], int nr_pages,
Miao Xie99740902013-07-25 19:22:36 +08003592 u64 start, u64 end,
Miao Xie125bac012013-07-25 19:22:37 +08003593 struct extent_map **em_cached,
Nikolay Borisovd3fac6b2017-10-24 11:50:39 +03003594 struct bio **bio,
Mike Christie1f7ad752016-06-05 14:31:51 -05003595 unsigned long *bio_flags,
Filipe Manana808f80b2015-09-28 09:56:26 +01003596 u64 *prev_em_start)
Miao Xie99740902013-07-25 19:22:36 +08003597{
Nikolay Borisov23d31bd2019-05-07 10:19:23 +03003598 struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
Miao Xie99740902013-07-25 19:22:36 +08003599 int index;
3600
David Sterbab272ae22020-02-05 19:09:33 +01003601 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
Miao Xie99740902013-07-25 19:22:36 +08003602
3603 for (index = 0; index < nr_pages; index++) {
Nikolay Borisov0f208812020-09-14 14:39:16 +03003604 btrfs_do_readpage(pages[index], em_cached, bio, bio_flags,
3605 REQ_RAHEAD, prev_em_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003606 put_page(pages[index]);
Miao Xie99740902013-07-25 19:22:36 +08003607 }
3608}
3609
David Sterba3d4b9492017-02-10 19:33:41 +01003610static void update_nr_written(struct writeback_control *wbc,
Liu Boa91326672016-03-07 16:56:21 -08003611 unsigned long nr_written)
Chris Mason11c83492009-04-20 15:50:09 -04003612{
3613 wbc->nr_to_write -= nr_written;
Chris Mason11c83492009-04-20 15:50:09 -04003614}
3615
Chris Masond1310b22008-01-24 16:13:08 -05003616/*
Chris Mason40f76582014-05-21 13:35:51 -07003617 * helper for __extent_writepage, doing all of the delayed allocation setup.
3618 *
Nikolay Borisov5eaad972018-11-01 14:09:46 +02003619 * This returns 1 if btrfs_run_delalloc_range function did all the work required
Chris Mason40f76582014-05-21 13:35:51 -07003620 * to write the page (copy into inline extent). In this case the IO has
3621 * been started and the page is already unlocked.
3622 *
3623 * This returns 0 if all went well (page still locked)
3624 * This returns < 0 if there were errors (page still locked)
Chris Masond1310b22008-01-24 16:13:08 -05003625 */
Nikolay Borisovcd4c0bf942020-06-05 10:42:10 +03003626static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
Nikolay Borisov8cc02372018-11-08 10:18:07 +02003627 struct page *page, struct writeback_control *wbc,
3628 u64 delalloc_start, unsigned long *nr_written)
Chris Masond1310b22008-01-24 16:13:08 -05003629{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003630 u64 page_end = delalloc_start + PAGE_SIZE - 1;
Lu Fengqi3522e902018-11-29 11:33:38 +08003631 bool found;
Chris Mason40f76582014-05-21 13:35:51 -07003632 u64 delalloc_to_write = 0;
3633 u64 delalloc_end = 0;
3634 int ret;
3635 int page_started = 0;
3636
Chris Mason40f76582014-05-21 13:35:51 -07003637
3638 while (delalloc_end < page_end) {
Nikolay Borisovcd4c0bf942020-06-05 10:42:10 +03003639 found = find_lock_delalloc_range(&inode->vfs_inode, page,
Chris Mason40f76582014-05-21 13:35:51 -07003640 &delalloc_start,
Nikolay Borisov917aace2018-10-26 14:43:20 +03003641 &delalloc_end);
Lu Fengqi3522e902018-11-29 11:33:38 +08003642 if (!found) {
Chris Mason40f76582014-05-21 13:35:51 -07003643 delalloc_start = delalloc_end + 1;
3644 continue;
3645 }
Nikolay Borisovcd4c0bf942020-06-05 10:42:10 +03003646 ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
Nikolay Borisov5eaad972018-11-01 14:09:46 +02003647 delalloc_end, &page_started, nr_written, wbc);
Chris Mason40f76582014-05-21 13:35:51 -07003648 if (ret) {
3649 SetPageError(page);
Nikolay Borisov5eaad972018-11-01 14:09:46 +02003650 /*
3651 * btrfs_run_delalloc_range should return < 0 for error
3652 * but just in case, we use > 0 here meaning the IO is
3653 * started, so we don't want to return > 0 unless
3654 * things are going well.
Chris Mason40f76582014-05-21 13:35:51 -07003655 */
Nikolay Borisovb69d1ee2020-07-16 18:17:19 +03003656 return ret < 0 ? ret : -EIO;
Chris Mason40f76582014-05-21 13:35:51 -07003657 }
3658 /*
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03003659 * delalloc_end is already one less than the total length, so
3660 * we don't subtract one from PAGE_SIZE
Chris Mason40f76582014-05-21 13:35:51 -07003661 */
3662 delalloc_to_write += (delalloc_end - delalloc_start +
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03003663 PAGE_SIZE) >> PAGE_SHIFT;
Chris Mason40f76582014-05-21 13:35:51 -07003664 delalloc_start = delalloc_end + 1;
3665 }
3666 if (wbc->nr_to_write < delalloc_to_write) {
3667 int thresh = 8192;
3668
3669 if (delalloc_to_write < thresh * 2)
3670 thresh = delalloc_to_write;
3671 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3672 thresh);
3673 }
3674
3675 /* did the fill delalloc function already unlock and start
3676 * the IO?
3677 */
3678 if (page_started) {
3679 /*
3680 * we've unlocked the page, so we can't update
3681 * the mapping's writeback index, just update
3682 * nr_to_write.
3683 */
3684 wbc->nr_to_write -= *nr_written;
3685 return 1;
3686 }
3687
Nikolay Borisovb69d1ee2020-07-16 18:17:19 +03003688 return 0;
Chris Mason40f76582014-05-21 13:35:51 -07003689}
3690
3691/*
3692 * helper for __extent_writepage. This calls the writepage start hooks,
3693 * and does the loop to map the page into extents and bios.
3694 *
3695 * We return 1 if the IO is started and the page is unlocked,
3696 * 0 if all went well (page still locked)
3697 * < 0 if there were errors (page still locked)
3698 */
Nikolay Borisovd4580fe2020-06-03 08:55:33 +03003699static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
Chris Mason40f76582014-05-21 13:35:51 -07003700 struct page *page,
3701 struct writeback_control *wbc,
3702 struct extent_page_data *epd,
3703 loff_t i_size,
3704 unsigned long nr_written,
David Sterba57e5ffe2019-10-29 18:28:55 +01003705 int *nr_ret)
Chris Mason40f76582014-05-21 13:35:51 -07003706{
Qu Wenruo6bc56362021-01-06 09:01:41 +08003707 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Nikolay Borisovd4580fe2020-06-03 08:55:33 +03003708 struct extent_io_tree *tree = &inode->io_tree;
Miao Xie4eee4fa2012-12-21 09:17:45 +00003709 u64 start = page_offset(page);
Qu Wenruo6bc56362021-01-06 09:01:41 +08003710 u64 end = start + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05003711 u64 cur = start;
3712 u64 extent_offset;
Chris Masond1310b22008-01-24 16:13:08 -05003713 u64 block_start;
Chris Masond1310b22008-01-24 16:13:08 -05003714 struct extent_map *em;
Chris Mason40f76582014-05-21 13:35:51 -07003715 int ret = 0;
3716 int nr = 0;
Naohiro Aotad8e3fb12021-02-04 19:22:05 +09003717 u32 opf = REQ_OP_WRITE;
David Sterba57e5ffe2019-10-29 18:28:55 +01003718 const unsigned int write_flags = wbc_to_write_flags(wbc);
Chris Mason40f76582014-05-21 13:35:51 -07003719 bool compressed;
Chris Masond1310b22008-01-24 16:13:08 -05003720
Qu Wenruo6bc56362021-01-06 09:01:41 +08003721 ret = btrfs_writepage_cow_fixup(page, start, end);
Nikolay Borisovd75855b2018-11-01 14:09:47 +02003722 if (ret) {
3723 /* Fixup worker will requeue */
Josef Bacik5ab58052020-01-21 11:51:43 -05003724 redirty_page_for_writepage(wbc, page);
Nikolay Borisovd75855b2018-11-01 14:09:47 +02003725 update_nr_written(wbc, nr_written);
3726 unlock_page(page);
3727 return 1;
Chris Mason247e7432008-07-17 12:53:51 -04003728 }
3729
Chris Mason11c83492009-04-20 15:50:09 -04003730 /*
3731 * we don't want to touch the inode after unlocking the page,
3732 * so we update the mapping writeback index now
3733 */
David Sterba3d4b9492017-02-10 19:33:41 +01003734 update_nr_written(wbc, nr_written + 1);
Chris Mason771ed682008-11-06 22:02:51 -05003735
Chris Masond1310b22008-01-24 16:13:08 -05003736 while (cur <= end) {
Qu Wenruo0c64c332021-01-06 09:01:40 +08003737 u64 disk_bytenr;
Chris Mason40f76582014-05-21 13:35:51 -07003738 u64 em_end;
Qu Wenruo6bc56362021-01-06 09:01:41 +08003739 u32 iosize;
David Sterba58409ed2016-05-04 11:46:10 +02003740
Chris Mason40f76582014-05-21 13:35:51 -07003741 if (cur >= i_size) {
Qu Wenruo6bc56362021-01-06 09:01:41 +08003742 btrfs_writepage_endio_finish_ordered(page, cur, end, 1);
Chris Masond1310b22008-01-24 16:13:08 -05003743 break;
3744 }
Nikolay Borisovd4580fe2020-06-03 08:55:33 +03003745 em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
David Sterbac7040052011-04-19 18:00:01 +02003746 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05003747 SetPageError(page);
Filipe Manana61391d52014-05-09 17:17:40 +01003748 ret = PTR_ERR_OR_ZERO(em);
Chris Masond1310b22008-01-24 16:13:08 -05003749 break;
3750 }
3751
3752 extent_offset = cur - em->start;
Chris Mason40f76582014-05-21 13:35:51 -07003753 em_end = extent_map_end(em);
Qu Wenruo6bc56362021-01-06 09:01:41 +08003754 ASSERT(cur <= em_end);
3755 ASSERT(cur < end);
3756 ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
3757 ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
Chris Masond1310b22008-01-24 16:13:08 -05003758 block_start = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04003759 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
Qu Wenruo6bc56362021-01-06 09:01:41 +08003760 disk_bytenr = em->block_start + extent_offset;
3761
3762 /* Note that em_end from extent_map_end() is exclusive */
3763 iosize = min(em_end, end + 1) - cur;
Naohiro Aotad8e3fb12021-02-04 19:22:05 +09003764
3765 if (btrfs_use_zone_append(inode, em))
3766 opf = REQ_OP_ZONE_APPEND;
3767
Chris Masond1310b22008-01-24 16:13:08 -05003768 free_extent_map(em);
3769 em = NULL;
3770
Chris Masonc8b97812008-10-29 14:49:59 -04003771 /*
3772 * compressed and inline extents are written through other
3773 * paths in the FS
3774 */
3775 if (compressed || block_start == EXTENT_MAP_HOLE ||
Chris Masond1310b22008-01-24 16:13:08 -05003776 block_start == EXTENT_MAP_INLINE) {
Omar Sandovalc8b04032019-12-02 17:34:24 -08003777 if (compressed)
Chris Masonc8b97812008-10-29 14:49:59 -04003778 nr++;
Omar Sandovalc8b04032019-12-02 17:34:24 -08003779 else
3780 btrfs_writepage_endio_finish_ordered(page, cur,
3781 cur + iosize - 1, 1);
Chris Masonc8b97812008-10-29 14:49:59 -04003782 cur += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003783 continue;
3784 }
Chris Masonc8b97812008-10-29 14:49:59 -04003785
David Sterba5cdc84b2018-07-18 20:32:52 +02003786 btrfs_set_range_writeback(tree, cur, cur + iosize - 1);
David Sterba58409ed2016-05-04 11:46:10 +02003787 if (!PageWriteback(page)) {
Nikolay Borisovd4580fe2020-06-03 08:55:33 +03003788 btrfs_err(inode->root->fs_info,
David Sterba58409ed2016-05-04 11:46:10 +02003789 "page %lu not writeback, cur %llu end %llu",
3790 page->index, cur, end);
Chris Masond1310b22008-01-24 16:13:08 -05003791 }
David Sterba58409ed2016-05-04 11:46:10 +02003792
Naohiro Aotad8e3fb12021-02-04 19:22:05 +09003793 ret = submit_extent_page(opf | write_flags, wbc, page,
3794 disk_bytenr, iosize,
Qu Wenruo6bc56362021-01-06 09:01:41 +08003795 cur - page_offset(page), &epd->bio,
David Sterba58409ed2016-05-04 11:46:10 +02003796 end_bio_extent_writepage,
3797 0, 0, 0, false);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09003798 if (ret) {
Chris Masond1310b22008-01-24 16:13:08 -05003799 SetPageError(page);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09003800 if (PageWriteback(page))
3801 end_page_writeback(page);
3802 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04003803
Qu Wenruo6bc56362021-01-06 09:01:41 +08003804 cur += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003805 nr++;
3806 }
Chris Mason40f76582014-05-21 13:35:51 -07003807 *nr_ret = nr;
Chris Mason40f76582014-05-21 13:35:51 -07003808 return ret;
3809}
3810
3811/*
3812 * the writepage semantics are similar to regular writepage. extent
3813 * records are inserted to lock ranges in the tree, and as dirty areas
3814 * are found, they are marked writeback. Then the lock bits are removed
3815 * and the end_io handler clears the writeback ranges
Qu Wenruo30659762019-03-20 14:27:42 +08003816 *
3817 * Return 0 if everything goes well.
3818 * Return <0 for error.
Chris Mason40f76582014-05-21 13:35:51 -07003819 */
3820static int __extent_writepage(struct page *page, struct writeback_control *wbc,
David Sterbaaab6e9e2017-11-30 18:00:02 +01003821 struct extent_page_data *epd)
Chris Mason40f76582014-05-21 13:35:51 -07003822{
3823 struct inode *inode = page->mapping->host;
Chris Mason40f76582014-05-21 13:35:51 -07003824 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003825 u64 page_end = start + PAGE_SIZE - 1;
Chris Mason40f76582014-05-21 13:35:51 -07003826 int ret;
3827 int nr = 0;
Omar Sandovaleb70d222019-12-02 17:34:20 -08003828 size_t pg_offset;
Chris Mason40f76582014-05-21 13:35:51 -07003829 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003830 unsigned long end_index = i_size >> PAGE_SHIFT;
Chris Mason40f76582014-05-21 13:35:51 -07003831 unsigned long nr_written = 0;
3832
Chris Mason40f76582014-05-21 13:35:51 -07003833 trace___extent_writepage(page, inode, wbc);
3834
3835 WARN_ON(!PageLocked(page));
3836
3837 ClearPageError(page);
3838
Johannes Thumshirn70730172018-12-05 15:23:03 +01003839 pg_offset = offset_in_page(i_size);
Chris Mason40f76582014-05-21 13:35:51 -07003840 if (page->index > end_index ||
3841 (page->index == end_index && !pg_offset)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003842 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
Chris Mason40f76582014-05-21 13:35:51 -07003843 unlock_page(page);
3844 return 0;
3845 }
3846
3847 if (page->index == end_index) {
3848 char *userpage;
3849
3850 userpage = kmap_atomic(page);
3851 memset(userpage + pg_offset, 0,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003852 PAGE_SIZE - pg_offset);
Chris Mason40f76582014-05-21 13:35:51 -07003853 kunmap_atomic(userpage);
3854 flush_dcache_page(page);
3855 }
3856
Qu Wenruo32443de2021-01-26 16:34:00 +08003857 ret = set_page_extent_mapped(page);
3858 if (ret < 0) {
3859 SetPageError(page);
3860 goto done;
3861 }
Chris Mason40f76582014-05-21 13:35:51 -07003862
Nikolay Borisov7789a552018-11-08 10:18:06 +02003863 if (!epd->extent_locked) {
Nikolay Borisovcd4c0bf942020-06-05 10:42:10 +03003864 ret = writepage_delalloc(BTRFS_I(inode), page, wbc, start,
3865 &nr_written);
Nikolay Borisov7789a552018-11-08 10:18:06 +02003866 if (ret == 1)
Omar Sandoval169d2c82019-12-02 17:34:21 -08003867 return 0;
Nikolay Borisov7789a552018-11-08 10:18:06 +02003868 if (ret)
3869 goto done;
3870 }
Chris Mason40f76582014-05-21 13:35:51 -07003871
Nikolay Borisovd4580fe2020-06-03 08:55:33 +03003872 ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
3873 nr_written, &nr);
Chris Mason40f76582014-05-21 13:35:51 -07003874 if (ret == 1)
Omar Sandoval169d2c82019-12-02 17:34:21 -08003875 return 0;
Chris Mason40f76582014-05-21 13:35:51 -07003876
3877done:
Chris Masond1310b22008-01-24 16:13:08 -05003878 if (nr == 0) {
3879 /* make sure the mapping tag for page dirty gets cleared */
3880 set_page_writeback(page);
3881 end_page_writeback(page);
3882 }
Filipe Manana61391d52014-05-09 17:17:40 +01003883 if (PageError(page)) {
3884 ret = ret < 0 ? ret : -EIO;
3885 end_extent_writepage(page, ret, start, page_end);
3886 }
Chris Masond1310b22008-01-24 16:13:08 -05003887 unlock_page(page);
Qu Wenruo30659762019-03-20 14:27:42 +08003888 ASSERT(ret <= 0);
Chris Mason40f76582014-05-21 13:35:51 -07003889 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05003890}
3891
Josef Bacikfd8b2b62013-04-24 16:41:19 -04003892void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003893{
NeilBrown74316202014-07-07 15:16:04 +10003894 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3895 TASK_UNINTERRUPTIBLE);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003896}
3897
Filipe Manana18dfa712019-09-11 17:42:00 +01003898static void end_extent_buffer_writeback(struct extent_buffer *eb)
3899{
3900 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3901 smp_mb__after_atomic();
3902 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3903}
3904
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003905/*
Qu Wenruoa3efb2f2020-10-21 14:24:49 +08003906 * Lock extent buffer status and pages for writeback.
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003907 *
Qu Wenruoa3efb2f2020-10-21 14:24:49 +08003908 * May try to flush write bio if we can't get the lock.
3909 *
3910 * Return 0 if the extent buffer doesn't need to be submitted.
3911 * (E.g. the extent buffer is not dirty)
3912 * Return >0 is the extent buffer is submitted to bio.
3913 * Return <0 if something went wrong, no page is locked.
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003914 */
David Sterba9df76fb2019-03-20 11:21:41 +01003915static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
Chris Mason0e378df2014-05-19 20:55:27 -07003916 struct extent_page_data *epd)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003917{
David Sterba9df76fb2019-03-20 11:21:41 +01003918 struct btrfs_fs_info *fs_info = eb->fs_info;
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003919 int i, num_pages, failed_page_nr;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003920 int flush = 0;
3921 int ret = 0;
3922
3923 if (!btrfs_try_tree_write_lock(eb)) {
Qu Wenruof4340622019-03-20 14:27:41 +08003924 ret = flush_write_bio(epd);
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003925 if (ret < 0)
3926 return ret;
3927 flush = 1;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003928 btrfs_tree_lock(eb);
3929 }
3930
3931 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3932 btrfs_tree_unlock(eb);
3933 if (!epd->sync_io)
3934 return 0;
3935 if (!flush) {
Qu Wenruof4340622019-03-20 14:27:41 +08003936 ret = flush_write_bio(epd);
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003937 if (ret < 0)
3938 return ret;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003939 flush = 1;
3940 }
Chris Masona098d8e82012-03-21 12:09:56 -04003941 while (1) {
3942 wait_on_extent_buffer_writeback(eb);
3943 btrfs_tree_lock(eb);
3944 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3945 break;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003946 btrfs_tree_unlock(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003947 }
3948 }
3949
Josef Bacik51561ff2012-07-20 16:25:24 -04003950 /*
3951 * We need to do this to prevent races in people who check if the eb is
3952 * under IO since we can end up having no IO bits set for a short period
3953 * of time.
3954 */
3955 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003956 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3957 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
Josef Bacik51561ff2012-07-20 16:25:24 -04003958 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003959 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
Nikolay Borisov104b4e52017-06-20 21:01:20 +03003960 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3961 -eb->len,
3962 fs_info->dirty_metadata_batch);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003963 ret = 1;
Josef Bacik51561ff2012-07-20 16:25:24 -04003964 } else {
3965 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003966 }
3967
3968 btrfs_tree_unlock(eb);
3969
Qu Wenruof3156df2021-04-06 08:36:02 +08003970 /*
3971 * Either we don't need to submit any tree block, or we're submitting
3972 * subpage eb.
3973 * Subpage metadata doesn't use page locking at all, so we can skip
3974 * the page locking.
3975 */
3976 if (!ret || fs_info->sectorsize < PAGE_SIZE)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003977 return ret;
3978
David Sterba65ad0102018-06-29 10:56:49 +02003979 num_pages = num_extent_pages(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003980 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02003981 struct page *p = eb->pages[i];
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003982
3983 if (!trylock_page(p)) {
3984 if (!flush) {
Filipe Manana18dfa712019-09-11 17:42:00 +01003985 int err;
3986
3987 err = flush_write_bio(epd);
3988 if (err < 0) {
3989 ret = err;
Qu Wenruo2e3c2512019-03-20 14:27:46 +08003990 failed_page_nr = i;
3991 goto err_unlock;
3992 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003993 flush = 1;
3994 }
3995 lock_page(p);
3996 }
3997 }
3998
3999 return ret;
Qu Wenruo2e3c2512019-03-20 14:27:46 +08004000err_unlock:
4001 /* Unlock already locked pages */
4002 for (i = 0; i < failed_page_nr; i++)
4003 unlock_page(eb->pages[i]);
Filipe Manana18dfa712019-09-11 17:42:00 +01004004 /*
4005 * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
4006 * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
4007 * be made and undo everything done before.
4008 */
4009 btrfs_tree_lock(eb);
4010 spin_lock(&eb->refs_lock);
4011 set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4012 end_extent_buffer_writeback(eb);
4013 spin_unlock(&eb->refs_lock);
4014 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
4015 fs_info->dirty_metadata_batch);
4016 btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
4017 btrfs_tree_unlock(eb);
Qu Wenruo2e3c2512019-03-20 14:27:46 +08004018 return ret;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004019}
4020
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004021static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
Filipe Manana656f30d2014-09-26 12:25:56 +01004022{
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004023 struct btrfs_fs_info *fs_info = eb->fs_info;
Filipe Manana656f30d2014-09-26 12:25:56 +01004024
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004025 btrfs_page_set_error(fs_info, page, eb->start, eb->len);
Filipe Manana656f30d2014-09-26 12:25:56 +01004026 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4027 return;
4028
4029 /*
Dennis Zhoueb5b64f2019-09-13 14:54:07 +01004030 * If we error out, we should add back the dirty_metadata_bytes
4031 * to make it consistent.
4032 */
Dennis Zhoueb5b64f2019-09-13 14:54:07 +01004033 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4034 eb->len, fs_info->dirty_metadata_batch);
4035
4036 /*
Filipe Manana656f30d2014-09-26 12:25:56 +01004037 * If writeback for a btree extent that doesn't belong to a log tree
4038 * failed, increment the counter transaction->eb_write_errors.
4039 * We do this because while the transaction is running and before it's
4040 * committing (when we call filemap_fdata[write|wait]_range against
4041 * the btree inode), we might have
4042 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
4043 * returns an error or an error happens during writeback, when we're
4044 * committing the transaction we wouldn't know about it, since the pages
4045 * can be no longer dirty nor marked anymore for writeback (if a
4046 * subsequent modification to the extent buffer didn't happen before the
4047 * transaction commit), which makes filemap_fdata[write|wait]_range not
4048 * able to find the pages tagged with SetPageError at transaction
4049 * commit time. So if this happens we must abort the transaction,
4050 * otherwise we commit a super block with btree roots that point to
4051 * btree nodes/leafs whose content on disk is invalid - either garbage
4052 * or the content of some node/leaf from a past generation that got
4053 * cowed or deleted and is no longer valid.
4054 *
4055 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
4056 * not be enough - we need to distinguish between log tree extents vs
4057 * non-log tree extents, and the next filemap_fdatawait_range() call
4058 * will catch and clear such errors in the mapping - and that call might
4059 * be from a log sync and not from a transaction commit. Also, checking
4060 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
4061 * not done and would not be reliable - the eb might have been released
4062 * from memory and reading it back again means that flag would not be
4063 * set (since it's a runtime flag, not persisted on disk).
4064 *
4065 * Using the flags below in the btree inode also makes us achieve the
4066 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
4067 * writeback for all dirty pages and before filemap_fdatawait_range()
4068 * is called, the writeback for all dirty pages had already finished
4069 * with errors - because we were not using AS_EIO/AS_ENOSPC,
4070 * filemap_fdatawait_range() would return success, as it could not know
4071 * that writeback errors happened (the pages were no longer tagged for
4072 * writeback).
4073 */
4074 switch (eb->log_index) {
4075 case -1:
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004076 set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
Filipe Manana656f30d2014-09-26 12:25:56 +01004077 break;
4078 case 0:
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004079 set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
Filipe Manana656f30d2014-09-26 12:25:56 +01004080 break;
4081 case 1:
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004082 set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
Filipe Manana656f30d2014-09-26 12:25:56 +01004083 break;
4084 default:
4085 BUG(); /* unexpected, logic error */
4086 }
4087}
4088
Qu Wenruo2f3186d2021-04-06 08:36:00 +08004089/*
4090 * The endio specific version which won't touch any unsafe spinlock in endio
4091 * context.
4092 */
4093static struct extent_buffer *find_extent_buffer_nolock(
4094 struct btrfs_fs_info *fs_info, u64 start)
4095{
4096 struct extent_buffer *eb;
4097
4098 rcu_read_lock();
4099 eb = radix_tree_lookup(&fs_info->buffer_radix,
4100 start >> fs_info->sectorsize_bits);
4101 if (eb && atomic_inc_not_zero(&eb->refs)) {
4102 rcu_read_unlock();
4103 return eb;
4104 }
4105 rcu_read_unlock();
4106 return NULL;
4107}
4108
4109/*
4110 * The endio function for subpage extent buffer write.
4111 *
4112 * Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback()
4113 * after all extent buffers in the page has finished their writeback.
4114 */
4115static void end_bio_subpage_eb_writepage(struct btrfs_fs_info *fs_info,
4116 struct bio *bio)
4117{
4118 struct bio_vec *bvec;
4119 struct bvec_iter_all iter_all;
4120
4121 ASSERT(!bio_flagged(bio, BIO_CLONED));
4122 bio_for_each_segment_all(bvec, bio, iter_all) {
4123 struct page *page = bvec->bv_page;
4124 u64 bvec_start = page_offset(page) + bvec->bv_offset;
4125 u64 bvec_end = bvec_start + bvec->bv_len - 1;
4126 u64 cur_bytenr = bvec_start;
4127
4128 ASSERT(IS_ALIGNED(bvec->bv_len, fs_info->nodesize));
4129
4130 /* Iterate through all extent buffers in the range */
4131 while (cur_bytenr <= bvec_end) {
4132 struct extent_buffer *eb;
4133 int done;
4134
4135 /*
4136 * Here we can't use find_extent_buffer(), as it may
4137 * try to lock eb->refs_lock, which is not safe in endio
4138 * context.
4139 */
4140 eb = find_extent_buffer_nolock(fs_info, cur_bytenr);
4141 ASSERT(eb);
4142
4143 cur_bytenr = eb->start + eb->len;
4144
4145 ASSERT(test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags));
4146 done = atomic_dec_and_test(&eb->io_pages);
4147 ASSERT(done);
4148
4149 if (bio->bi_status ||
4150 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
4151 ClearPageUptodate(page);
4152 set_btree_ioerr(page, eb);
4153 }
4154
4155 btrfs_subpage_clear_writeback(fs_info, page, eb->start,
4156 eb->len);
4157 end_extent_buffer_writeback(eb);
4158 /*
4159 * free_extent_buffer() will grab spinlock which is not
4160 * safe in endio context. Thus here we manually dec
4161 * the ref.
4162 */
4163 atomic_dec(&eb->refs);
4164 }
4165 }
4166 bio_put(bio);
4167}
4168
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02004169static void end_bio_extent_buffer_writepage(struct bio *bio)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004170{
Qu Wenruo2f3186d2021-04-06 08:36:00 +08004171 struct btrfs_fs_info *fs_info;
Kent Overstreet2c30c712013-11-07 12:20:26 -08004172 struct bio_vec *bvec;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004173 struct extent_buffer *eb;
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02004174 int done;
Ming Lei6dc4f102019-02-15 19:13:19 +08004175 struct bvec_iter_all iter_all;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004176
Qu Wenruo2f3186d2021-04-06 08:36:00 +08004177 fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
4178 if (fs_info->sectorsize < PAGE_SIZE)
4179 return end_bio_subpage_eb_writepage(fs_info, bio);
4180
David Sterbac09abff2017-07-13 18:10:07 +02004181 ASSERT(!bio_flagged(bio, BIO_CLONED));
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02004182 bio_for_each_segment_all(bvec, bio, iter_all) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004183 struct page *page = bvec->bv_page;
4184
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004185 eb = (struct extent_buffer *)page->private;
4186 BUG_ON(!eb);
4187 done = atomic_dec_and_test(&eb->io_pages);
4188
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02004189 if (bio->bi_status ||
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02004190 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004191 ClearPageUptodate(page);
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004192 set_btree_ioerr(page, eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004193 }
4194
4195 end_page_writeback(page);
4196
4197 if (!done)
4198 continue;
4199
4200 end_extent_buffer_writeback(eb);
Kent Overstreet2c30c712013-11-07 12:20:26 -08004201 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004202
4203 bio_put(bio);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004204}
4205
Qu Wenruo35b6ddf2021-04-06 08:36:01 +08004206/*
4207 * Unlike the work in write_one_eb(), we rely completely on extent locking.
4208 * Page locking is only utilized at minimum to keep the VMM code happy.
4209 *
4210 * Caller should still call write_one_eb() other than this function directly.
4211 * As write_one_eb() has extra preparation before submitting the extent buffer.
4212 */
4213static int write_one_subpage_eb(struct extent_buffer *eb,
4214 struct writeback_control *wbc,
4215 struct extent_page_data *epd)
4216{
4217 struct btrfs_fs_info *fs_info = eb->fs_info;
4218 struct page *page = eb->pages[0];
4219 unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
4220 bool no_dirty_ebs = false;
4221 int ret;
4222
4223 /* clear_page_dirty_for_io() in subpage helper needs page locked */
4224 lock_page(page);
4225 btrfs_subpage_set_writeback(fs_info, page, eb->start, eb->len);
4226
4227 /* Check if this is the last dirty bit to update nr_written */
4228 no_dirty_ebs = btrfs_subpage_clear_and_test_dirty(fs_info, page,
4229 eb->start, eb->len);
4230 if (no_dirty_ebs)
4231 clear_page_dirty_for_io(page);
4232
4233 ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc, page,
4234 eb->start, eb->len, eb->start - page_offset(page),
4235 &epd->bio, end_bio_extent_buffer_writepage, 0, 0, 0,
4236 false);
4237 if (ret) {
4238 btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
4239 set_btree_ioerr(page, eb);
4240 unlock_page(page);
4241
4242 if (atomic_dec_and_test(&eb->io_pages))
4243 end_extent_buffer_writeback(eb);
4244 return -EIO;
4245 }
4246 unlock_page(page);
4247 /*
4248 * Submission finished without problem, if no range of the page is
4249 * dirty anymore, we have submitted a page. Update nr_written in wbc.
4250 */
4251 if (no_dirty_ebs)
4252 update_nr_written(wbc, 1);
4253 return ret;
4254}
4255
Chris Mason0e378df2014-05-19 20:55:27 -07004256static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004257 struct writeback_control *wbc,
4258 struct extent_page_data *epd)
4259{
Qu Wenruo0c64c332021-01-06 09:01:40 +08004260 u64 disk_bytenr = eb->start;
Liu Bo851cd172016-09-23 13:44:44 -07004261 u32 nritems;
David Sterbacc5e31a2018-03-01 18:20:27 +01004262 int i, num_pages;
Liu Bo851cd172016-09-23 13:44:44 -07004263 unsigned long start, end;
Liu Boff40adf2017-08-24 18:19:48 -06004264 unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
Josef Bacikd7dbe9e2012-04-23 14:00:51 -04004265 int ret = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004266
Filipe Manana656f30d2014-09-26 12:25:56 +01004267 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
David Sterba65ad0102018-06-29 10:56:49 +02004268 num_pages = num_extent_pages(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004269 atomic_set(&eb->io_pages, num_pages);
Josef Bacikde0022b2012-09-25 14:25:58 -04004270
Liu Bo851cd172016-09-23 13:44:44 -07004271 /* set btree blocks beyond nritems with 0 to avoid stale content. */
4272 nritems = btrfs_header_nritems(eb);
Liu Bo3eb548e2016-09-14 17:22:57 -07004273 if (btrfs_header_level(eb) > 0) {
Liu Bo3eb548e2016-09-14 17:22:57 -07004274 end = btrfs_node_key_ptr_offset(nritems);
4275
David Sterbab159fa22016-11-08 18:09:03 +01004276 memzero_extent_buffer(eb, end, eb->len - end);
Liu Bo851cd172016-09-23 13:44:44 -07004277 } else {
4278 /*
4279 * leaf:
4280 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
4281 */
4282 start = btrfs_item_nr_offset(nritems);
David Sterba8f881e82019-03-20 11:33:10 +01004283 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
David Sterbab159fa22016-11-08 18:09:03 +01004284 memzero_extent_buffer(eb, start, end - start);
Liu Bo3eb548e2016-09-14 17:22:57 -07004285 }
4286
Qu Wenruo35b6ddf2021-04-06 08:36:01 +08004287 if (eb->fs_info->sectorsize < PAGE_SIZE)
4288 return write_one_subpage_eb(eb, wbc, epd);
4289
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004290 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02004291 struct page *p = eb->pages[i];
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004292
4293 clear_page_dirty_for_io(p);
4294 set_page_writeback(p);
David Sterba0ceb34b2020-02-05 19:09:28 +01004295 ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
Qu Wenruo0c64c332021-01-06 09:01:40 +08004296 p, disk_bytenr, PAGE_SIZE, 0,
David Sterbac2df8bb2017-02-10 19:29:38 +01004297 &epd->bio,
Mike Christie1f7ad752016-06-05 14:31:51 -05004298 end_bio_extent_buffer_writepage,
Liu Bo18fdc672017-09-13 12:18:22 -06004299 0, 0, 0, false);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004300 if (ret) {
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004301 set_btree_ioerr(p, eb);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09004302 if (PageWriteback(p))
4303 end_page_writeback(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004304 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
4305 end_extent_buffer_writeback(eb);
4306 ret = -EIO;
4307 break;
4308 }
Qu Wenruo0c64c332021-01-06 09:01:40 +08004309 disk_bytenr += PAGE_SIZE;
David Sterba3d4b9492017-02-10 19:33:41 +01004310 update_nr_written(wbc, 1);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004311 unlock_page(p);
4312 }
4313
4314 if (unlikely(ret)) {
4315 for (; i < num_pages; i++) {
Chris Masonbbf65cf2014-10-04 09:56:45 -07004316 struct page *p = eb->pages[i];
Liu Bo81465022014-09-23 22:22:33 +08004317 clear_page_dirty_for_io(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004318 unlock_page(p);
4319 }
4320 }
4321
4322 return ret;
4323}
4324
Qu Wenruof91e0d02020-12-02 14:48:00 +08004325/*
Qu Wenruoc4aec292021-04-06 08:36:03 +08004326 * Submit one subpage btree page.
4327 *
4328 * The main difference to submit_eb_page() is:
4329 * - Page locking
4330 * For subpage, we don't rely on page locking at all.
4331 *
4332 * - Flush write bio
4333 * We only flush bio if we may be unable to fit current extent buffers into
4334 * current bio.
4335 *
4336 * Return >=0 for the number of submitted extent buffers.
4337 * Return <0 for fatal error.
4338 */
4339static int submit_eb_subpage(struct page *page,
4340 struct writeback_control *wbc,
4341 struct extent_page_data *epd)
4342{
4343 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
4344 int submitted = 0;
4345 u64 page_start = page_offset(page);
4346 int bit_start = 0;
4347 const int nbits = BTRFS_SUBPAGE_BITMAP_SIZE;
4348 int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
4349 int ret;
4350
4351 /* Lock and write each dirty extent buffers in the range */
4352 while (bit_start < nbits) {
4353 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
4354 struct extent_buffer *eb;
4355 unsigned long flags;
4356 u64 start;
4357
4358 /*
4359 * Take private lock to ensure the subpage won't be detached
4360 * in the meantime.
4361 */
4362 spin_lock(&page->mapping->private_lock);
4363 if (!PagePrivate(page)) {
4364 spin_unlock(&page->mapping->private_lock);
4365 break;
4366 }
4367 spin_lock_irqsave(&subpage->lock, flags);
4368 if (!((1 << bit_start) & subpage->dirty_bitmap)) {
4369 spin_unlock_irqrestore(&subpage->lock, flags);
4370 spin_unlock(&page->mapping->private_lock);
4371 bit_start++;
4372 continue;
4373 }
4374
4375 start = page_start + bit_start * fs_info->sectorsize;
4376 bit_start += sectors_per_node;
4377
4378 /*
4379 * Here we just want to grab the eb without touching extra
4380 * spin locks, so call find_extent_buffer_nolock().
4381 */
4382 eb = find_extent_buffer_nolock(fs_info, start);
4383 spin_unlock_irqrestore(&subpage->lock, flags);
4384 spin_unlock(&page->mapping->private_lock);
4385
4386 /*
4387 * The eb has already reached 0 refs thus find_extent_buffer()
4388 * doesn't return it. We don't need to write back such eb
4389 * anyway.
4390 */
4391 if (!eb)
4392 continue;
4393
4394 ret = lock_extent_buffer_for_io(eb, epd);
4395 if (ret == 0) {
4396 free_extent_buffer(eb);
4397 continue;
4398 }
4399 if (ret < 0) {
4400 free_extent_buffer(eb);
4401 goto cleanup;
4402 }
4403 ret = write_one_eb(eb, wbc, epd);
4404 free_extent_buffer(eb);
4405 if (ret < 0)
4406 goto cleanup;
4407 submitted++;
4408 }
4409 return submitted;
4410
4411cleanup:
4412 /* We hit error, end bio for the submitted extent buffers */
4413 end_write_bio(epd, ret);
4414 return ret;
4415}
4416
4417/*
Qu Wenruof91e0d02020-12-02 14:48:00 +08004418 * Submit all page(s) of one extent buffer.
4419 *
4420 * @page: the page of one extent buffer
4421 * @eb_context: to determine if we need to submit this page, if current page
4422 * belongs to this eb, we don't need to submit
4423 *
4424 * The caller should pass each page in their bytenr order, and here we use
4425 * @eb_context to determine if we have submitted pages of one extent buffer.
4426 *
4427 * If we have, we just skip until we hit a new page that doesn't belong to
4428 * current @eb_context.
4429 *
4430 * If not, we submit all the page(s) of the extent buffer.
4431 *
4432 * Return >0 if we have submitted the extent buffer successfully.
4433 * Return 0 if we don't need to submit the page, as it's already submitted by
4434 * previous call.
4435 * Return <0 for fatal error.
4436 */
4437static int submit_eb_page(struct page *page, struct writeback_control *wbc,
4438 struct extent_page_data *epd,
4439 struct extent_buffer **eb_context)
4440{
4441 struct address_space *mapping = page->mapping;
Naohiro Aota0bc09ca2021-02-04 19:22:08 +09004442 struct btrfs_block_group *cache = NULL;
Qu Wenruof91e0d02020-12-02 14:48:00 +08004443 struct extent_buffer *eb;
4444 int ret;
4445
4446 if (!PagePrivate(page))
4447 return 0;
4448
Qu Wenruoc4aec292021-04-06 08:36:03 +08004449 if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
4450 return submit_eb_subpage(page, wbc, epd);
4451
Qu Wenruof91e0d02020-12-02 14:48:00 +08004452 spin_lock(&mapping->private_lock);
4453 if (!PagePrivate(page)) {
4454 spin_unlock(&mapping->private_lock);
4455 return 0;
4456 }
4457
4458 eb = (struct extent_buffer *)page->private;
4459
4460 /*
4461 * Shouldn't happen and normally this would be a BUG_ON but no point
4462 * crashing the machine for something we can survive anyway.
4463 */
4464 if (WARN_ON(!eb)) {
4465 spin_unlock(&mapping->private_lock);
4466 return 0;
4467 }
4468
4469 if (eb == *eb_context) {
4470 spin_unlock(&mapping->private_lock);
4471 return 0;
4472 }
4473 ret = atomic_inc_not_zero(&eb->refs);
4474 spin_unlock(&mapping->private_lock);
4475 if (!ret)
4476 return 0;
4477
Naohiro Aota0bc09ca2021-02-04 19:22:08 +09004478 if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
4479 /*
4480 * If for_sync, this hole will be filled with
4481 * trasnsaction commit.
4482 */
4483 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
4484 ret = -EAGAIN;
4485 else
4486 ret = 0;
4487 free_extent_buffer(eb);
4488 return ret;
4489 }
4490
Qu Wenruof91e0d02020-12-02 14:48:00 +08004491 *eb_context = eb;
4492
4493 ret = lock_extent_buffer_for_io(eb, epd);
4494 if (ret <= 0) {
Naohiro Aota0bc09ca2021-02-04 19:22:08 +09004495 btrfs_revert_meta_write_pointer(cache, eb);
4496 if (cache)
4497 btrfs_put_block_group(cache);
Qu Wenruof91e0d02020-12-02 14:48:00 +08004498 free_extent_buffer(eb);
4499 return ret;
4500 }
Naohiro Aota0bc09ca2021-02-04 19:22:08 +09004501 if (cache)
4502 btrfs_put_block_group(cache);
Qu Wenruof91e0d02020-12-02 14:48:00 +08004503 ret = write_one_eb(eb, wbc, epd);
4504 free_extent_buffer(eb);
4505 if (ret < 0)
4506 return ret;
4507 return 1;
4508}
4509
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004510int btree_write_cache_pages(struct address_space *mapping,
4511 struct writeback_control *wbc)
4512{
Qu Wenruof91e0d02020-12-02 14:48:00 +08004513 struct extent_buffer *eb_context = NULL;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004514 struct extent_page_data epd = {
4515 .bio = NULL,
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004516 .extent_locked = 0,
4517 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4518 };
Qu Wenruob3ff8f12020-02-12 14:12:44 +08004519 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004520 int ret = 0;
4521 int done = 0;
4522 int nr_to_write_done = 0;
4523 struct pagevec pvec;
4524 int nr_pages;
4525 pgoff_t index;
4526 pgoff_t end; /* Inclusive */
4527 int scanned = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -05004528 xa_mark_t tag;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004529
Mel Gorman86679822017-11-15 17:37:52 -08004530 pagevec_init(&pvec);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004531 if (wbc->range_cyclic) {
4532 index = mapping->writeback_index; /* Start from prev offset */
4533 end = -1;
Josef Bacik556755a2020-01-03 10:38:44 -05004534 /*
4535 * Start from the beginning does not need to cycle over the
4536 * range, mark it as scanned.
4537 */
4538 scanned = (index == 0);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004539 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004540 index = wbc->range_start >> PAGE_SHIFT;
4541 end = wbc->range_end >> PAGE_SHIFT;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004542 scanned = 1;
4543 }
4544 if (wbc->sync_mode == WB_SYNC_ALL)
4545 tag = PAGECACHE_TAG_TOWRITE;
4546 else
4547 tag = PAGECACHE_TAG_DIRTY;
Naohiro Aota0bc09ca2021-02-04 19:22:08 +09004548 btrfs_zoned_meta_io_lock(fs_info);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004549retry:
4550 if (wbc->sync_mode == WB_SYNC_ALL)
4551 tag_pages_for_writeback(mapping, index, end);
4552 while (!done && !nr_to_write_done && (index <= end) &&
Jan Kara4006f432017-11-15 17:34:37 -08004553 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
Jan Kara67fd7072017-11-15 17:35:19 -08004554 tag))) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004555 unsigned i;
4556
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004557 for (i = 0; i < nr_pages; i++) {
4558 struct page *page = pvec.pages[i];
4559
Qu Wenruof91e0d02020-12-02 14:48:00 +08004560 ret = submit_eb_page(page, wbc, &epd, &eb_context);
4561 if (ret == 0)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004562 continue;
Qu Wenruof91e0d02020-12-02 14:48:00 +08004563 if (ret < 0) {
Filipe Manana0607eb1d2019-09-11 17:42:28 +01004564 done = 1;
Filipe Manana0607eb1d2019-09-11 17:42:28 +01004565 break;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004566 }
4567
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004568 /*
4569 * the filesystem may choose to bump up nr_to_write.
4570 * We have to make sure to honor the new nr_to_write
4571 * at any time
4572 */
4573 nr_to_write_done = wbc->nr_to_write <= 0;
4574 }
4575 pagevec_release(&pvec);
4576 cond_resched();
4577 }
4578 if (!scanned && !done) {
4579 /*
4580 * We hit the last page and there is more work to be done: wrap
4581 * back to the start of the file
4582 */
4583 scanned = 1;
4584 index = 0;
4585 goto retry;
4586 }
Qu Wenruo2b952ee2019-03-20 14:27:43 +08004587 if (ret < 0) {
4588 end_write_bio(&epd, ret);
Naohiro Aota0bc09ca2021-02-04 19:22:08 +09004589 goto out;
Qu Wenruo2b952ee2019-03-20 14:27:43 +08004590 }
Qu Wenruob3ff8f12020-02-12 14:12:44 +08004591 /*
4592 * If something went wrong, don't allow any metadata write bio to be
4593 * submitted.
4594 *
4595 * This would prevent use-after-free if we had dirty pages not
4596 * cleaned up, which can still happen by fuzzed images.
4597 *
4598 * - Bad extent tree
4599 * Allowing existing tree block to be allocated for other trees.
4600 *
4601 * - Log tree operations
4602 * Exiting tree blocks get allocated to log tree, bumps its
4603 * generation, then get cleaned in tree re-balance.
4604 * Such tree block will not be written back, since it's clean,
4605 * thus no WRITTEN flag set.
4606 * And after log writes back, this tree block is not traced by
4607 * any dirty extent_io_tree.
4608 *
4609 * - Offending tree block gets re-dirtied from its original owner
4610 * Since it has bumped generation, no WRITTEN flag, it can be
4611 * reused without COWing. This tree block will not be traced
4612 * by btrfs_transaction::dirty_pages.
4613 *
4614 * Now such dirty tree block will not be cleaned by any dirty
4615 * extent io tree. Thus we don't want to submit such wild eb
4616 * if the fs already has error.
4617 */
4618 if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
4619 ret = flush_write_bio(&epd);
4620 } else {
Josef Bacikfbabd4a2020-07-21 10:38:37 -04004621 ret = -EROFS;
Qu Wenruob3ff8f12020-02-12 14:12:44 +08004622 end_write_bio(&epd, ret);
4623 }
Naohiro Aota0bc09ca2021-02-04 19:22:08 +09004624out:
4625 btrfs_zoned_meta_io_unlock(fs_info);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004626 return ret;
4627}
4628
Chris Masond1310b22008-01-24 16:13:08 -05004629/**
Nikolay Borisov3bed2da2021-01-22 11:58:03 +02004630 * Walk the list of dirty pages of the given address space and write all of them.
4631 *
Chris Masond1310b22008-01-24 16:13:08 -05004632 * @mapping: address space structure to write
Nikolay Borisov3bed2da2021-01-22 11:58:03 +02004633 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
4634 * @epd: holds context for the write, namely the bio
Chris Masond1310b22008-01-24 16:13:08 -05004635 *
4636 * If a page is already under I/O, write_cache_pages() skips it, even
4637 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
4638 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
4639 * and msync() need to guarantee that all the data which was dirty at the time
4640 * the call was made get new I/O started against them. If wbc->sync_mode is
4641 * WB_SYNC_ALL then we were called for data integrity and we must wait for
4642 * existing IO to complete.
4643 */
David Sterba4242b642017-02-10 19:38:24 +01004644static int extent_write_cache_pages(struct address_space *mapping,
Chris Mason4bef0842008-09-08 11:18:08 -04004645 struct writeback_control *wbc,
David Sterbaaab6e9e2017-11-30 18:00:02 +01004646 struct extent_page_data *epd)
Chris Masond1310b22008-01-24 16:13:08 -05004647{
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04004648 struct inode *inode = mapping->host;
Chris Masond1310b22008-01-24 16:13:08 -05004649 int ret = 0;
4650 int done = 0;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04004651 int nr_to_write_done = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004652 struct pagevec pvec;
4653 int nr_pages;
4654 pgoff_t index;
4655 pgoff_t end; /* Inclusive */
Liu Boa91326672016-03-07 16:56:21 -08004656 pgoff_t done_index;
4657 int range_whole = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004658 int scanned = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -05004659 xa_mark_t tag;
Chris Masond1310b22008-01-24 16:13:08 -05004660
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04004661 /*
4662 * We have to hold onto the inode so that ordered extents can do their
4663 * work when the IO finishes. The alternative to this is failing to add
4664 * an ordered extent if the igrab() fails there and that is a huge pain
4665 * to deal with, so instead just hold onto the inode throughout the
4666 * writepages operation. If it fails here we are freeing up the inode
4667 * anyway and we'd rather not waste our time writing out stuff that is
4668 * going to be truncated anyway.
4669 */
4670 if (!igrab(inode))
4671 return 0;
4672
Mel Gorman86679822017-11-15 17:37:52 -08004673 pagevec_init(&pvec);
Chris Masond1310b22008-01-24 16:13:08 -05004674 if (wbc->range_cyclic) {
4675 index = mapping->writeback_index; /* Start from prev offset */
4676 end = -1;
Josef Bacik556755a2020-01-03 10:38:44 -05004677 /*
4678 * Start from the beginning does not need to cycle over the
4679 * range, mark it as scanned.
4680 */
4681 scanned = (index == 0);
Chris Masond1310b22008-01-24 16:13:08 -05004682 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004683 index = wbc->range_start >> PAGE_SHIFT;
4684 end = wbc->range_end >> PAGE_SHIFT;
Liu Boa91326672016-03-07 16:56:21 -08004685 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4686 range_whole = 1;
Chris Masond1310b22008-01-24 16:13:08 -05004687 scanned = 1;
4688 }
Ethan Lien3cd24c62018-11-01 14:49:03 +08004689
4690 /*
4691 * We do the tagged writepage as long as the snapshot flush bit is set
4692 * and we are the first one who do the filemap_flush() on this inode.
4693 *
4694 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
4695 * not race in and drop the bit.
4696 */
4697 if (range_whole && wbc->nr_to_write == LONG_MAX &&
4698 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
4699 &BTRFS_I(inode)->runtime_flags))
4700 wbc->tagged_writepages = 1;
4701
4702 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
Josef Bacikf7aaa062011-07-15 21:26:38 +00004703 tag = PAGECACHE_TAG_TOWRITE;
4704 else
4705 tag = PAGECACHE_TAG_DIRTY;
Chris Masond1310b22008-01-24 16:13:08 -05004706retry:
Ethan Lien3cd24c62018-11-01 14:49:03 +08004707 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
Josef Bacikf7aaa062011-07-15 21:26:38 +00004708 tag_pages_for_writeback(mapping, index, end);
Liu Boa91326672016-03-07 16:56:21 -08004709 done_index = index;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04004710 while (!done && !nr_to_write_done && (index <= end) &&
Jan Kara67fd7072017-11-15 17:35:19 -08004711 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
4712 &index, end, tag))) {
Chris Masond1310b22008-01-24 16:13:08 -05004713 unsigned i;
4714
Chris Masond1310b22008-01-24 16:13:08 -05004715 for (i = 0; i < nr_pages; i++) {
4716 struct page *page = pvec.pages[i];
4717
Tejun Heof7bddf12019-10-03 07:27:13 -07004718 done_index = page->index + 1;
Chris Masond1310b22008-01-24 16:13:08 -05004719 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07004720 * At this point we hold neither the i_pages lock nor
4721 * the page lock: the page may be truncated or
4722 * invalidated (changing page->mapping to NULL),
4723 * or even swizzled back from swapper_space to
4724 * tmpfs file mapping
Chris Masond1310b22008-01-24 16:13:08 -05004725 */
Josef Bacikc8f2f242013-02-11 11:33:00 -05004726 if (!trylock_page(page)) {
Qu Wenruof4340622019-03-20 14:27:41 +08004727 ret = flush_write_bio(epd);
4728 BUG_ON(ret < 0);
Josef Bacikc8f2f242013-02-11 11:33:00 -05004729 lock_page(page);
Chris Mason01d658f2011-11-01 10:08:06 -04004730 }
Chris Masond1310b22008-01-24 16:13:08 -05004731
4732 if (unlikely(page->mapping != mapping)) {
4733 unlock_page(page);
4734 continue;
4735 }
4736
Chris Masond2c3f4f2008-11-19 12:44:22 -05004737 if (wbc->sync_mode != WB_SYNC_NONE) {
Qu Wenruof4340622019-03-20 14:27:41 +08004738 if (PageWriteback(page)) {
4739 ret = flush_write_bio(epd);
4740 BUG_ON(ret < 0);
4741 }
Chris Masond1310b22008-01-24 16:13:08 -05004742 wait_on_page_writeback(page);
Chris Masond2c3f4f2008-11-19 12:44:22 -05004743 }
Chris Masond1310b22008-01-24 16:13:08 -05004744
4745 if (PageWriteback(page) ||
4746 !clear_page_dirty_for_io(page)) {
4747 unlock_page(page);
4748 continue;
4749 }
4750
David Sterbaaab6e9e2017-11-30 18:00:02 +01004751 ret = __extent_writepage(page, wbc, epd);
Liu Boa91326672016-03-07 16:56:21 -08004752 if (ret < 0) {
Liu Boa91326672016-03-07 16:56:21 -08004753 done = 1;
4754 break;
4755 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04004756
4757 /*
4758 * the filesystem may choose to bump up nr_to_write.
4759 * We have to make sure to honor the new nr_to_write
4760 * at any time
4761 */
4762 nr_to_write_done = wbc->nr_to_write <= 0;
Chris Masond1310b22008-01-24 16:13:08 -05004763 }
4764 pagevec_release(&pvec);
4765 cond_resched();
4766 }
Liu Bo894b36e2016-03-07 16:56:22 -08004767 if (!scanned && !done) {
Chris Masond1310b22008-01-24 16:13:08 -05004768 /*
4769 * We hit the last page and there is more work to be done: wrap
4770 * back to the start of the file
4771 */
4772 scanned = 1;
4773 index = 0;
Josef Bacik42ffb0b2020-01-23 15:33:02 -05004774
4775 /*
4776 * If we're looping we could run into a page that is locked by a
4777 * writer and that writer could be waiting on writeback for a
4778 * page in our current bio, and thus deadlock, so flush the
4779 * write bio here.
4780 */
4781 ret = flush_write_bio(epd);
4782 if (!ret)
4783 goto retry;
Chris Masond1310b22008-01-24 16:13:08 -05004784 }
Liu Boa91326672016-03-07 16:56:21 -08004785
4786 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
4787 mapping->writeback_index = done_index;
4788
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04004789 btrfs_add_delayed_iput(inode);
Liu Bo894b36e2016-03-07 16:56:22 -08004790 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05004791}
Chris Masond1310b22008-01-24 16:13:08 -05004792
Nikolay Borisov0a9b0e52017-12-08 15:55:59 +02004793int extent_write_full_page(struct page *page, struct writeback_control *wbc)
Chris Masond1310b22008-01-24 16:13:08 -05004794{
4795 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05004796 struct extent_page_data epd = {
4797 .bio = NULL,
Chris Mason771ed682008-11-06 22:02:51 -05004798 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04004799 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05004800 };
Chris Masond1310b22008-01-24 16:13:08 -05004801
Chris Masond1310b22008-01-24 16:13:08 -05004802 ret = __extent_writepage(page, wbc, &epd);
Qu Wenruo30659762019-03-20 14:27:42 +08004803 ASSERT(ret <= 0);
4804 if (ret < 0) {
4805 end_write_bio(&epd, ret);
4806 return ret;
4807 }
Chris Masond1310b22008-01-24 16:13:08 -05004808
Qu Wenruo30659762019-03-20 14:27:42 +08004809 ret = flush_write_bio(&epd);
4810 ASSERT(ret <= 0);
Chris Masond1310b22008-01-24 16:13:08 -05004811 return ret;
4812}
Chris Masond1310b22008-01-24 16:13:08 -05004813
Nikolay Borisov5e3ee232017-12-08 15:55:58 +02004814int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
Chris Mason771ed682008-11-06 22:02:51 -05004815 int mode)
4816{
4817 int ret = 0;
4818 struct address_space *mapping = inode->i_mapping;
4819 struct page *page;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004820 unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4821 PAGE_SHIFT;
Chris Mason771ed682008-11-06 22:02:51 -05004822
4823 struct extent_page_data epd = {
4824 .bio = NULL,
Chris Mason771ed682008-11-06 22:02:51 -05004825 .extent_locked = 1,
Chris Masonffbd5172009-04-20 15:50:09 -04004826 .sync_io = mode == WB_SYNC_ALL,
Chris Mason771ed682008-11-06 22:02:51 -05004827 };
4828 struct writeback_control wbc_writepages = {
Chris Mason771ed682008-11-06 22:02:51 -05004829 .sync_mode = mode,
Chris Mason771ed682008-11-06 22:02:51 -05004830 .nr_to_write = nr_pages * 2,
4831 .range_start = start,
4832 .range_end = end + 1,
Chris Masonec39f762019-07-10 12:28:17 -07004833 /* We're called from an async helper function */
4834 .punt_to_cgroup = 1,
4835 .no_cgroup_owner = 1,
Chris Mason771ed682008-11-06 22:02:51 -05004836 };
4837
Chris Masondbb70be2019-07-10 12:28:18 -07004838 wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
Chris Masond3977122009-01-05 21:25:51 -05004839 while (start <= end) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004840 page = find_get_page(mapping, start >> PAGE_SHIFT);
Chris Mason771ed682008-11-06 22:02:51 -05004841 if (clear_page_dirty_for_io(page))
4842 ret = __extent_writepage(page, &wbc_writepages, &epd);
4843 else {
Nikolay Borisov7087a9d2018-11-01 14:09:48 +02004844 btrfs_writepage_endio_finish_ordered(page, start,
Nikolay Borisovc6297322018-11-08 10:18:08 +02004845 start + PAGE_SIZE - 1, 1);
Chris Mason771ed682008-11-06 22:02:51 -05004846 unlock_page(page);
4847 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004848 put_page(page);
4849 start += PAGE_SIZE;
Chris Mason771ed682008-11-06 22:02:51 -05004850 }
4851
Qu Wenruo02c6db42019-03-20 14:27:45 +08004852 ASSERT(ret <= 0);
Chris Masondbb70be2019-07-10 12:28:18 -07004853 if (ret == 0)
4854 ret = flush_write_bio(&epd);
4855 else
Qu Wenruo02c6db42019-03-20 14:27:45 +08004856 end_write_bio(&epd, ret);
Chris Masondbb70be2019-07-10 12:28:18 -07004857
4858 wbc_detach_inode(&wbc_writepages);
Chris Mason771ed682008-11-06 22:02:51 -05004859 return ret;
4860}
Chris Masond1310b22008-01-24 16:13:08 -05004861
Nikolay Borisov8ae225a2018-04-19 10:46:38 +03004862int extent_writepages(struct address_space *mapping,
Chris Masond1310b22008-01-24 16:13:08 -05004863 struct writeback_control *wbc)
4864{
4865 int ret = 0;
4866 struct extent_page_data epd = {
4867 .bio = NULL,
Chris Mason771ed682008-11-06 22:02:51 -05004868 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04004869 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05004870 };
4871
David Sterba935db852017-06-23 04:30:28 +02004872 ret = extent_write_cache_pages(mapping, wbc, &epd);
Qu Wenruoa2a72fb2019-03-20 14:27:48 +08004873 ASSERT(ret <= 0);
4874 if (ret < 0) {
4875 end_write_bio(&epd, ret);
4876 return ret;
4877 }
4878 ret = flush_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05004879 return ret;
4880}
Chris Masond1310b22008-01-24 16:13:08 -05004881
Matthew Wilcox (Oracle)ba206a02020-06-01 21:47:05 -07004882void extent_readahead(struct readahead_control *rac)
Chris Masond1310b22008-01-24 16:13:08 -05004883{
4884 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04004885 unsigned long bio_flags = 0;
Liu Bo67c96842012-07-20 21:43:09 -06004886 struct page *pagepool[16];
Miao Xie125bac012013-07-25 19:22:37 +08004887 struct extent_map *em_cached = NULL;
Filipe Manana808f80b2015-09-28 09:56:26 +01004888 u64 prev_em_start = (u64)-1;
Matthew Wilcox (Oracle)ba206a02020-06-01 21:47:05 -07004889 int nr;
Chris Masond1310b22008-01-24 16:13:08 -05004890
Matthew Wilcox (Oracle)ba206a02020-06-01 21:47:05 -07004891 while ((nr = readahead_page_batch(rac, pagepool))) {
Matthew Wilcox (Oracle)32c0a6b2021-03-21 21:03:11 +00004892 u64 contig_start = readahead_pos(rac);
4893 u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
Chris Masond1310b22008-01-24 16:13:08 -05004894
Matthew Wilcox (Oracle)ba206a02020-06-01 21:47:05 -07004895 contiguous_readpages(pagepool, nr, contig_start, contig_end,
4896 &em_cached, &bio, &bio_flags, &prev_em_start);
Chris Masond1310b22008-01-24 16:13:08 -05004897 }
Liu Bo67c96842012-07-20 21:43:09 -06004898
Miao Xie125bac012013-07-25 19:22:37 +08004899 if (em_cached)
4900 free_extent_map(em_cached);
4901
Matthew Wilcox (Oracle)ba206a02020-06-01 21:47:05 -07004902 if (bio) {
4903 if (submit_one_bio(bio, 0, bio_flags))
4904 return;
4905 }
Chris Masond1310b22008-01-24 16:13:08 -05004906}
Chris Masond1310b22008-01-24 16:13:08 -05004907
4908/*
4909 * basic invalidatepage code, this waits on any locked or writeback
4910 * ranges corresponding to the page, and then deletes any extent state
4911 * records from the tree
4912 */
4913int extent_invalidatepage(struct extent_io_tree *tree,
4914 struct page *page, unsigned long offset)
4915{
Josef Bacik2ac55d42010-02-03 19:33:23 +00004916 struct extent_state *cached_state = NULL;
Miao Xie4eee4fa2012-12-21 09:17:45 +00004917 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004918 u64 end = start + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05004919 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4920
Qu Wenruo829ddec2020-11-13 20:51:39 +08004921 /* This function is only called for the btree inode */
4922 ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
4923
Qu Wenruofda28322013-02-26 08:10:22 +00004924 start += ALIGN(offset, blocksize);
Chris Masond1310b22008-01-24 16:13:08 -05004925 if (start > end)
4926 return 0;
4927
David Sterbaff13db42015-12-03 14:30:40 +01004928 lock_extent_bits(tree, start, end, &cached_state);
Chris Mason1edbb732009-09-02 13:24:36 -04004929 wait_on_page_writeback(page);
Qu Wenruo829ddec2020-11-13 20:51:39 +08004930
4931 /*
4932 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
4933 * so here we only need to unlock the extent range to free any
4934 * existing extent state.
4935 */
4936 unlock_extent_cached(tree, start, end, &cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05004937 return 0;
4938}
Chris Masond1310b22008-01-24 16:13:08 -05004939
4940/*
Chris Mason7b13b7b2008-04-18 10:29:50 -04004941 * a helper for releasepage, this tests for areas of the page that
4942 * are locked or under IO and drops the related state bits if it is safe
4943 * to drop the page.
4944 */
Nikolay Borisov29c68b2d2018-04-19 10:46:35 +03004945static int try_release_extent_state(struct extent_io_tree *tree,
Eric Sandeen48a3b632013-04-25 20:41:01 +00004946 struct page *page, gfp_t mask)
Chris Mason7b13b7b2008-04-18 10:29:50 -04004947{
Miao Xie4eee4fa2012-12-21 09:17:45 +00004948 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004949 u64 end = start + PAGE_SIZE - 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004950 int ret = 1;
4951
Nikolay Borisov88826792019-03-14 15:28:31 +02004952 if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
Chris Mason7b13b7b2008-04-18 10:29:50 -04004953 ret = 0;
Nikolay Borisov88826792019-03-14 15:28:31 +02004954 } else {
Chris Mason11ef1602009-09-23 20:28:46 -04004955 /*
Filipe Manana2766ff62020-11-04 11:07:34 +00004956 * At this point we can safely clear everything except the
4957 * locked bit, the nodatasum bit and the delalloc new bit.
4958 * The delalloc new bit will be cleared by ordered extent
4959 * completion.
Chris Mason11ef1602009-09-23 20:28:46 -04004960 */
David Sterba66b0c882017-10-31 16:30:47 +01004961 ret = __clear_extent_bit(tree, start, end,
Filipe Manana2766ff62020-11-04 11:07:34 +00004962 ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW),
4963 0, 0, NULL, mask, NULL);
Chris Masone3f24cc2011-02-14 12:52:08 -05004964
4965 /* if clear_extent_bit failed for enomem reasons,
4966 * we can't allow the release to continue.
4967 */
4968 if (ret < 0)
4969 ret = 0;
4970 else
4971 ret = 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004972 }
4973 return ret;
4974}
Chris Mason7b13b7b2008-04-18 10:29:50 -04004975
4976/*
Chris Masond1310b22008-01-24 16:13:08 -05004977 * a helper for releasepage. As long as there are no locked extents
4978 * in the range corresponding to the page, both state records and extent
4979 * map records are removed
4980 */
Nikolay Borisov477a30b2018-04-19 10:46:34 +03004981int try_release_extent_mapping(struct page *page, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05004982{
4983 struct extent_map *em;
Miao Xie4eee4fa2012-12-21 09:17:45 +00004984 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004985 u64 end = start + PAGE_SIZE - 1;
Filipe Mananabd3599a2018-07-12 01:36:43 +01004986 struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
4987 struct extent_io_tree *tree = &btrfs_inode->io_tree;
4988 struct extent_map_tree *map = &btrfs_inode->extent_tree;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004989
Mel Gormand0164ad2015-11-06 16:28:21 -08004990 if (gfpflags_allow_blocking(mask) &&
Byongho Leeee221842015-12-15 01:42:10 +09004991 page->mapping->host->i_size > SZ_16M) {
Yan39b56372008-02-15 10:40:50 -05004992 u64 len;
Chris Mason70dec802008-01-29 09:59:12 -05004993 while (start <= end) {
Filipe Mananafbc2bd72020-07-22 12:28:52 +01004994 struct btrfs_fs_info *fs_info;
4995 u64 cur_gen;
4996
Yan39b56372008-02-15 10:40:50 -05004997 len = end - start + 1;
Chris Mason890871b2009-09-02 16:24:52 -04004998 write_lock(&map->lock);
Yan39b56372008-02-15 10:40:50 -05004999 em = lookup_extent_mapping(map, start, len);
Tsutomu Itoh285190d2012-02-16 16:23:58 +09005000 if (!em) {
Chris Mason890871b2009-09-02 16:24:52 -04005001 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05005002 break;
5003 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04005004 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
5005 em->start != start) {
Chris Mason890871b2009-09-02 16:24:52 -04005006 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05005007 free_extent_map(em);
5008 break;
5009 }
Filipe Manana3d6448e2020-07-22 12:28:37 +01005010 if (test_range_bit(tree, em->start,
5011 extent_map_end(em) - 1,
5012 EXTENT_LOCKED, 0, NULL))
5013 goto next;
5014 /*
5015 * If it's not in the list of modified extents, used
5016 * by a fast fsync, we can remove it. If it's being
5017 * logged we can safely remove it since fsync took an
5018 * extra reference on the em.
5019 */
5020 if (list_empty(&em->list) ||
Filipe Mananafbc2bd72020-07-22 12:28:52 +01005021 test_bit(EXTENT_FLAG_LOGGING, &em->flags))
5022 goto remove_em;
5023 /*
5024 * If it's in the list of modified extents, remove it
5025 * only if its generation is older then the current one,
5026 * in which case we don't need it for a fast fsync.
5027 * Otherwise don't remove it, we could be racing with an
5028 * ongoing fast fsync that could miss the new extent.
5029 */
5030 fs_info = btrfs_inode->root->fs_info;
5031 spin_lock(&fs_info->trans_lock);
5032 cur_gen = fs_info->generation;
5033 spin_unlock(&fs_info->trans_lock);
5034 if (em->generation >= cur_gen)
5035 goto next;
5036remove_em:
Filipe Manana5e548b32020-07-22 12:29:01 +01005037 /*
5038 * We only remove extent maps that are not in the list of
5039 * modified extents or that are in the list but with a
5040 * generation lower then the current generation, so there
5041 * is no need to set the full fsync flag on the inode (it
5042 * hurts the fsync performance for workloads with a data
5043 * size that exceeds or is close to the system's memory).
5044 */
Filipe Mananafbc2bd72020-07-22 12:28:52 +01005045 remove_extent_mapping(map, em);
5046 /* once for the rb tree */
5047 free_extent_map(em);
Filipe Manana3d6448e2020-07-22 12:28:37 +01005048next:
Chris Mason70dec802008-01-29 09:59:12 -05005049 start = extent_map_end(em);
Chris Mason890871b2009-09-02 16:24:52 -04005050 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05005051
5052 /* once for us */
Chris Masond1310b22008-01-24 16:13:08 -05005053 free_extent_map(em);
Paul E. McKenney9f47eb52020-05-08 14:15:37 -07005054
5055 cond_resched(); /* Allow large-extent preemption. */
Chris Masond1310b22008-01-24 16:13:08 -05005056 }
Chris Masond1310b22008-01-24 16:13:08 -05005057 }
Nikolay Borisov29c68b2d2018-04-19 10:46:35 +03005058 return try_release_extent_state(tree, page, mask);
Chris Masond1310b22008-01-24 16:13:08 -05005059}
Chris Masond1310b22008-01-24 16:13:08 -05005060
Chris Masonec29ed52011-02-23 16:23:20 -05005061/*
5062 * helper function for fiemap, which doesn't want to see any holes.
5063 * This maps until we find something past 'last'
5064 */
Nikolay Borisovf1bbde82020-08-31 14:42:45 +03005065static struct extent_map *get_extent_skip_holes(struct btrfs_inode *inode,
David Sterbae3350e12017-06-23 04:09:57 +02005066 u64 offset, u64 last)
Chris Masonec29ed52011-02-23 16:23:20 -05005067{
Nikolay Borisovf1bbde82020-08-31 14:42:45 +03005068 u64 sectorsize = btrfs_inode_sectorsize(inode);
Chris Masonec29ed52011-02-23 16:23:20 -05005069 struct extent_map *em;
5070 u64 len;
5071
5072 if (offset >= last)
5073 return NULL;
5074
Dulshani Gunawardhana67871252013-10-31 10:33:04 +05305075 while (1) {
Chris Masonec29ed52011-02-23 16:23:20 -05005076 len = last - offset;
5077 if (len == 0)
5078 break;
Qu Wenruofda28322013-02-26 08:10:22 +00005079 len = ALIGN(len, sectorsize);
Nikolay Borisovf1bbde82020-08-31 14:42:45 +03005080 em = btrfs_get_extent_fiemap(inode, offset, len);
David Sterbac7040052011-04-19 18:00:01 +02005081 if (IS_ERR_OR_NULL(em))
Chris Masonec29ed52011-02-23 16:23:20 -05005082 return em;
5083
5084 /* if this isn't a hole return it */
Nikolay Borisov4a2d25c2017-11-23 10:51:43 +02005085 if (em->block_start != EXTENT_MAP_HOLE)
Chris Masonec29ed52011-02-23 16:23:20 -05005086 return em;
Chris Masonec29ed52011-02-23 16:23:20 -05005087
5088 /* this is a hole, advance to the next extent */
5089 offset = extent_map_end(em);
5090 free_extent_map(em);
5091 if (offset >= last)
5092 break;
5093 }
5094 return NULL;
5095}
5096
Qu Wenruo47518322017-04-07 10:43:15 +08005097/*
5098 * To cache previous fiemap extent
5099 *
5100 * Will be used for merging fiemap extent
5101 */
5102struct fiemap_cache {
5103 u64 offset;
5104 u64 phys;
5105 u64 len;
5106 u32 flags;
5107 bool cached;
5108};
5109
5110/*
5111 * Helper to submit fiemap extent.
5112 *
5113 * Will try to merge current fiemap extent specified by @offset, @phys,
5114 * @len and @flags with cached one.
5115 * And only when we fails to merge, cached one will be submitted as
5116 * fiemap extent.
5117 *
5118 * Return value is the same as fiemap_fill_next_extent().
5119 */
5120static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
5121 struct fiemap_cache *cache,
5122 u64 offset, u64 phys, u64 len, u32 flags)
5123{
5124 int ret = 0;
5125
5126 if (!cache->cached)
5127 goto assign;
5128
5129 /*
5130 * Sanity check, extent_fiemap() should have ensured that new
Andrea Gelmini52042d82018-11-28 12:05:13 +01005131 * fiemap extent won't overlap with cached one.
Qu Wenruo47518322017-04-07 10:43:15 +08005132 * Not recoverable.
5133 *
5134 * NOTE: Physical address can overlap, due to compression
5135 */
5136 if (cache->offset + cache->len > offset) {
5137 WARN_ON(1);
5138 return -EINVAL;
5139 }
5140
5141 /*
5142 * Only merges fiemap extents if
5143 * 1) Their logical addresses are continuous
5144 *
5145 * 2) Their physical addresses are continuous
5146 * So truly compressed (physical size smaller than logical size)
5147 * extents won't get merged with each other
5148 *
5149 * 3) Share same flags except FIEMAP_EXTENT_LAST
5150 * So regular extent won't get merged with prealloc extent
5151 */
5152 if (cache->offset + cache->len == offset &&
5153 cache->phys + cache->len == phys &&
5154 (cache->flags & ~FIEMAP_EXTENT_LAST) ==
5155 (flags & ~FIEMAP_EXTENT_LAST)) {
5156 cache->len += len;
5157 cache->flags |= flags;
5158 goto try_submit_last;
5159 }
5160
5161 /* Not mergeable, need to submit cached one */
5162 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
5163 cache->len, cache->flags);
5164 cache->cached = false;
5165 if (ret)
5166 return ret;
5167assign:
5168 cache->cached = true;
5169 cache->offset = offset;
5170 cache->phys = phys;
5171 cache->len = len;
5172 cache->flags = flags;
5173try_submit_last:
5174 if (cache->flags & FIEMAP_EXTENT_LAST) {
5175 ret = fiemap_fill_next_extent(fieinfo, cache->offset,
5176 cache->phys, cache->len, cache->flags);
5177 cache->cached = false;
5178 }
5179 return ret;
5180}
5181
5182/*
Qu Wenruo848c23b2017-06-22 10:01:21 +08005183 * Emit last fiemap cache
Qu Wenruo47518322017-04-07 10:43:15 +08005184 *
Qu Wenruo848c23b2017-06-22 10:01:21 +08005185 * The last fiemap cache may still be cached in the following case:
5186 * 0 4k 8k
5187 * |<- Fiemap range ->|
5188 * |<------------ First extent ----------->|
5189 *
5190 * In this case, the first extent range will be cached but not emitted.
5191 * So we must emit it before ending extent_fiemap().
Qu Wenruo47518322017-04-07 10:43:15 +08005192 */
David Sterba5c5aff92019-03-20 11:29:46 +01005193static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
Qu Wenruo848c23b2017-06-22 10:01:21 +08005194 struct fiemap_cache *cache)
Qu Wenruo47518322017-04-07 10:43:15 +08005195{
5196 int ret;
5197
5198 if (!cache->cached)
5199 return 0;
5200
Qu Wenruo47518322017-04-07 10:43:15 +08005201 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
5202 cache->len, cache->flags);
5203 cache->cached = false;
5204 if (ret > 0)
5205 ret = 0;
5206 return ret;
5207}
5208
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005209int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
David Sterbabab16e22020-06-23 20:56:12 +02005210 u64 start, u64 len)
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005211{
Josef Bacik975f84f2010-11-23 19:36:57 +00005212 int ret = 0;
Boris Burkov15c77452021-04-06 15:31:18 -07005213 u64 off;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005214 u64 max = start + len;
5215 u32 flags = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00005216 u32 found_type;
5217 u64 last;
Chris Masonec29ed52011-02-23 16:23:20 -05005218 u64 last_for_get_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005219 u64 disko = 0;
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005220 u64 isize = i_size_read(&inode->vfs_inode);
Josef Bacik975f84f2010-11-23 19:36:57 +00005221 struct btrfs_key found_key;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005222 struct extent_map *em = NULL;
Josef Bacik2ac55d42010-02-03 19:33:23 +00005223 struct extent_state *cached_state = NULL;
Josef Bacik975f84f2010-11-23 19:36:57 +00005224 struct btrfs_path *path;
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005225 struct btrfs_root *root = inode->root;
Qu Wenruo47518322017-04-07 10:43:15 +08005226 struct fiemap_cache cache = { 0 };
David Sterba5911c8f2019-05-15 15:31:04 +02005227 struct ulist *roots;
5228 struct ulist *tmp_ulist;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005229 int end = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05005230 u64 em_start = 0;
5231 u64 em_len = 0;
5232 u64 em_end = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005233
5234 if (len == 0)
5235 return -EINVAL;
5236
Josef Bacik975f84f2010-11-23 19:36:57 +00005237 path = btrfs_alloc_path();
5238 if (!path)
5239 return -ENOMEM;
Josef Bacik975f84f2010-11-23 19:36:57 +00005240
David Sterba5911c8f2019-05-15 15:31:04 +02005241 roots = ulist_alloc(GFP_KERNEL);
5242 tmp_ulist = ulist_alloc(GFP_KERNEL);
5243 if (!roots || !tmp_ulist) {
5244 ret = -ENOMEM;
5245 goto out_free_ulist;
5246 }
5247
Boris Burkov15c77452021-04-06 15:31:18 -07005248 /*
5249 * We can't initialize that to 'start' as this could miss extents due
5250 * to extent item merging
5251 */
5252 off = 0;
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005253 start = round_down(start, btrfs_inode_sectorsize(inode));
5254 len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
Josef Bacik4d479cf2011-11-17 11:34:31 -05005255
Chris Masonec29ed52011-02-23 16:23:20 -05005256 /*
5257 * lookup the last file extent. We're not using i_size here
5258 * because there might be preallocation past i_size
5259 */
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005260 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
5261 0);
Josef Bacik975f84f2010-11-23 19:36:57 +00005262 if (ret < 0) {
David Sterba5911c8f2019-05-15 15:31:04 +02005263 goto out_free_ulist;
Liu Bo2d324f52016-05-17 17:21:48 -07005264 } else {
5265 WARN_ON(!ret);
5266 if (ret == 1)
5267 ret = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00005268 }
Liu Bo2d324f52016-05-17 17:21:48 -07005269
Josef Bacik975f84f2010-11-23 19:36:57 +00005270 path->slots[0]--;
Josef Bacik975f84f2010-11-23 19:36:57 +00005271 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
David Sterba962a2982014-06-04 18:41:45 +02005272 found_type = found_key.type;
Josef Bacik975f84f2010-11-23 19:36:57 +00005273
Chris Masonec29ed52011-02-23 16:23:20 -05005274 /* No extents, but there might be delalloc bits */
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005275 if (found_key.objectid != btrfs_ino(inode) ||
Josef Bacik975f84f2010-11-23 19:36:57 +00005276 found_type != BTRFS_EXTENT_DATA_KEY) {
Chris Masonec29ed52011-02-23 16:23:20 -05005277 /* have to trust i_size as the end */
5278 last = (u64)-1;
5279 last_for_get_extent = isize;
5280 } else {
5281 /*
5282 * remember the start of the last extent. There are a
5283 * bunch of different factors that go into the length of the
5284 * extent, so its much less complex to remember where it started
5285 */
5286 last = found_key.offset;
5287 last_for_get_extent = last + 1;
Josef Bacik975f84f2010-11-23 19:36:57 +00005288 }
Liu Bofe09e162013-09-22 12:54:23 +08005289 btrfs_release_path(path);
Josef Bacik975f84f2010-11-23 19:36:57 +00005290
Chris Masonec29ed52011-02-23 16:23:20 -05005291 /*
5292 * we might have some extents allocated but more delalloc past those
5293 * extents. so, we trust isize unless the start of the last extent is
5294 * beyond isize
5295 */
5296 if (last < isize) {
5297 last = (u64)-1;
5298 last_for_get_extent = isize;
5299 }
5300
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005301 lock_extent_bits(&inode->io_tree, start, start + len - 1,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01005302 &cached_state);
Chris Masonec29ed52011-02-23 16:23:20 -05005303
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005304 em = get_extent_skip_holes(inode, start, last_for_get_extent);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005305 if (!em)
5306 goto out;
5307 if (IS_ERR(em)) {
5308 ret = PTR_ERR(em);
5309 goto out;
5310 }
Josef Bacik975f84f2010-11-23 19:36:57 +00005311
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005312 while (!end) {
Josef Bacikb76bb702013-07-05 13:52:51 -04005313 u64 offset_in_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005314
Chris Masonea8efc72011-03-08 11:54:40 -05005315 /* break if the extent we found is outside the range */
5316 if (em->start >= max || extent_map_end(em) < off)
5317 break;
5318
5319 /*
5320 * get_extent may return an extent that starts before our
5321 * requested range. We have to make sure the ranges
5322 * we return to fiemap always move forward and don't
5323 * overlap, so adjust the offsets here
5324 */
5325 em_start = max(em->start, off);
5326
5327 /*
5328 * record the offset from the start of the extent
Josef Bacikb76bb702013-07-05 13:52:51 -04005329 * for adjusting the disk offset below. Only do this if the
5330 * extent isn't compressed since our in ram offset may be past
5331 * what we have actually allocated on disk.
Chris Masonea8efc72011-03-08 11:54:40 -05005332 */
Josef Bacikb76bb702013-07-05 13:52:51 -04005333 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
5334 offset_in_extent = em_start - em->start;
Chris Masonec29ed52011-02-23 16:23:20 -05005335 em_end = extent_map_end(em);
Chris Masonea8efc72011-03-08 11:54:40 -05005336 em_len = em_end - em_start;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005337 flags = 0;
Filipe Mananaf0986312018-06-20 10:02:30 +01005338 if (em->block_start < EXTENT_MAP_LAST_BYTE)
5339 disko = em->block_start + offset_in_extent;
5340 else
5341 disko = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005342
Chris Masonea8efc72011-03-08 11:54:40 -05005343 /*
5344 * bump off for our next call to get_extent
5345 */
5346 off = extent_map_end(em);
5347 if (off >= max)
5348 end = 1;
5349
Heiko Carstens93dbfad2009-04-03 10:33:45 -04005350 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005351 end = 1;
5352 flags |= FIEMAP_EXTENT_LAST;
Heiko Carstens93dbfad2009-04-03 10:33:45 -04005353 } else if (em->block_start == EXTENT_MAP_INLINE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005354 flags |= (FIEMAP_EXTENT_DATA_INLINE |
5355 FIEMAP_EXTENT_NOT_ALIGNED);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04005356 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005357 flags |= (FIEMAP_EXTENT_DELALLOC |
5358 FIEMAP_EXTENT_UNKNOWN);
Josef Bacikdc046b12014-09-10 16:20:45 -04005359 } else if (fieinfo->fi_extents_max) {
5360 u64 bytenr = em->block_start -
5361 (em->start - em->orig_start);
Liu Bofe09e162013-09-22 12:54:23 +08005362
Liu Bofe09e162013-09-22 12:54:23 +08005363 /*
5364 * As btrfs supports shared space, this information
5365 * can be exported to userspace tools via
Josef Bacikdc046b12014-09-10 16:20:45 -04005366 * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0
5367 * then we're just getting a count and we can skip the
5368 * lookup stuff.
Liu Bofe09e162013-09-22 12:54:23 +08005369 */
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005370 ret = btrfs_check_shared(root, btrfs_ino(inode),
David Sterba5911c8f2019-05-15 15:31:04 +02005371 bytenr, roots, tmp_ulist);
Josef Bacikdc046b12014-09-10 16:20:45 -04005372 if (ret < 0)
Liu Bofe09e162013-09-22 12:54:23 +08005373 goto out_free;
Josef Bacikdc046b12014-09-10 16:20:45 -04005374 if (ret)
Liu Bofe09e162013-09-22 12:54:23 +08005375 flags |= FIEMAP_EXTENT_SHARED;
Josef Bacikdc046b12014-09-10 16:20:45 -04005376 ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005377 }
5378 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
5379 flags |= FIEMAP_EXTENT_ENCODED;
Josef Bacik0d2b2372015-05-19 10:44:04 -04005380 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5381 flags |= FIEMAP_EXTENT_UNWRITTEN;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005382
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005383 free_extent_map(em);
5384 em = NULL;
Chris Masonec29ed52011-02-23 16:23:20 -05005385 if ((em_start >= last) || em_len == (u64)-1 ||
5386 (last == (u64)-1 && isize <= em_end)) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005387 flags |= FIEMAP_EXTENT_LAST;
5388 end = 1;
5389 }
5390
Chris Masonec29ed52011-02-23 16:23:20 -05005391 /* now scan forward to see if this is really the last extent. */
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005392 em = get_extent_skip_holes(inode, off, last_for_get_extent);
Chris Masonec29ed52011-02-23 16:23:20 -05005393 if (IS_ERR(em)) {
5394 ret = PTR_ERR(em);
5395 goto out;
5396 }
5397 if (!em) {
Josef Bacik975f84f2010-11-23 19:36:57 +00005398 flags |= FIEMAP_EXTENT_LAST;
5399 end = 1;
5400 }
Qu Wenruo47518322017-04-07 10:43:15 +08005401 ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
5402 em_len, flags);
Chengyu Song26e726a2015-03-24 18:12:56 -04005403 if (ret) {
5404 if (ret == 1)
5405 ret = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05005406 goto out_free;
Chengyu Song26e726a2015-03-24 18:12:56 -04005407 }
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005408 }
5409out_free:
Qu Wenruo47518322017-04-07 10:43:15 +08005410 if (!ret)
David Sterba5c5aff92019-03-20 11:29:46 +01005411 ret = emit_last_fiemap_cache(fieinfo, &cache);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005412 free_extent_map(em);
5413out:
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005414 unlock_extent_cached(&inode->io_tree, start, start + len - 1,
David Sterbae43bbe52017-12-12 21:43:52 +01005415 &cached_state);
David Sterba5911c8f2019-05-15 15:31:04 +02005416
5417out_free_ulist:
Colin Ian Kinge02d48e2019-07-05 08:26:24 +01005418 btrfs_free_path(path);
David Sterba5911c8f2019-05-15 15:31:04 +02005419 ulist_free(roots);
5420 ulist_free(tmp_ulist);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005421 return ret;
5422}
5423
Chris Mason727011e2010-08-06 13:21:20 -04005424static void __free_extent_buffer(struct extent_buffer *eb)
5425{
Chris Mason727011e2010-08-06 13:21:20 -04005426 kmem_cache_free(extent_buffer_cache, eb);
5427}
5428
David Sterba2b489662020-04-29 03:04:10 +02005429int extent_buffer_under_io(const struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05005430{
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005431 return (atomic_read(&eb->io_pages) ||
5432 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
5433 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
Chris Masond1310b22008-01-24 16:13:08 -05005434}
5435
Qu Wenruo8ff84662021-01-26 16:33:50 +08005436static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
Miao Xie897ca6e92010-10-26 20:57:29 -04005437{
Qu Wenruo8ff84662021-01-26 16:33:50 +08005438 struct btrfs_subpage *subpage;
Miao Xie897ca6e92010-10-26 20:57:29 -04005439
Qu Wenruo8ff84662021-01-26 16:33:50 +08005440 lockdep_assert_held(&page->mapping->private_lock);
Miao Xie897ca6e92010-10-26 20:57:29 -04005441
Qu Wenruo8ff84662021-01-26 16:33:50 +08005442 if (PagePrivate(page)) {
5443 subpage = (struct btrfs_subpage *)page->private;
5444 if (atomic_read(&subpage->eb_refs))
5445 return true;
5446 }
5447 return false;
5448}
Miao Xie897ca6e92010-10-26 20:57:29 -04005449
Qu Wenruo8ff84662021-01-26 16:33:50 +08005450static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
5451{
5452 struct btrfs_fs_info *fs_info = eb->fs_info;
5453 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
5454
5455 /*
5456 * For mapped eb, we're going to change the page private, which should
5457 * be done under the private_lock.
5458 */
5459 if (mapped)
5460 spin_lock(&page->mapping->private_lock);
5461
5462 if (!PagePrivate(page)) {
Forrest Liu5d2361d2015-02-09 17:31:45 +08005463 if (mapped)
Qu Wenruo8ff84662021-01-26 16:33:50 +08005464 spin_unlock(&page->mapping->private_lock);
5465 return;
5466 }
5467
5468 if (fs_info->sectorsize == PAGE_SIZE) {
Forrest Liu5d2361d2015-02-09 17:31:45 +08005469 /*
5470 * We do this since we'll remove the pages after we've
5471 * removed the eb from the radix tree, so we could race
5472 * and have this page now attached to the new eb. So
5473 * only clear page_private if it's still connected to
5474 * this eb.
5475 */
5476 if (PagePrivate(page) &&
5477 page->private == (unsigned long)eb) {
5478 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
5479 BUG_ON(PageDirty(page));
5480 BUG_ON(PageWriteback(page));
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005481 /*
Forrest Liu5d2361d2015-02-09 17:31:45 +08005482 * We need to make sure we haven't be attached
5483 * to a new eb.
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005484 */
Guoqing Jiangd1b89bc2020-06-01 21:47:45 -07005485 detach_page_private(page);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005486 }
Forrest Liu5d2361d2015-02-09 17:31:45 +08005487 if (mapped)
5488 spin_unlock(&page->mapping->private_lock);
Qu Wenruo8ff84662021-01-26 16:33:50 +08005489 return;
5490 }
5491
5492 /*
5493 * For subpage, we can have dummy eb with page private. In this case,
5494 * we can directly detach the private as such page is only attached to
5495 * one dummy eb, no sharing.
5496 */
5497 if (!mapped) {
5498 btrfs_detach_subpage(fs_info, page);
5499 return;
5500 }
5501
5502 btrfs_page_dec_eb_refs(fs_info, page);
5503
5504 /*
5505 * We can only detach the page private if there are no other ebs in the
5506 * page range.
5507 */
5508 if (!page_range_has_eb(fs_info, page))
5509 btrfs_detach_subpage(fs_info, page);
5510
5511 spin_unlock(&page->mapping->private_lock);
5512}
5513
5514/* Release all pages attached to the extent buffer */
5515static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
5516{
5517 int i;
5518 int num_pages;
5519
5520 ASSERT(!extent_buffer_under_io(eb));
5521
5522 num_pages = num_extent_pages(eb);
5523 for (i = 0; i < num_pages; i++) {
5524 struct page *page = eb->pages[i];
5525
5526 if (!page)
5527 continue;
5528
5529 detach_extent_buffer_page(eb, page);
Forrest Liu5d2361d2015-02-09 17:31:45 +08005530
Nicholas D Steeves01327612016-05-19 21:18:45 -04005531 /* One for when we allocated the page */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005532 put_page(page);
Nikolay Borisovd64766f2018-06-27 16:38:22 +03005533 }
Miao Xie897ca6e92010-10-26 20:57:29 -04005534}
5535
5536/*
5537 * Helper for releasing the extent buffer.
5538 */
5539static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
5540{
David Sterba55ac0132018-07-19 17:24:32 +02005541 btrfs_release_extent_buffer_pages(eb);
Josef Bacik8c389382020-02-14 16:11:42 -05005542 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
Miao Xie897ca6e92010-10-26 20:57:29 -04005543 __free_extent_buffer(eb);
5544}
5545
Josef Bacikf28491e2013-12-16 13:24:27 -05005546static struct extent_buffer *
5547__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
David Sterba23d79d82014-06-15 02:55:29 +02005548 unsigned long len)
Josef Bacikdb7f3432013-08-07 14:54:37 -04005549{
5550 struct extent_buffer *eb = NULL;
5551
Michal Hockod1b5c562015-08-19 14:17:40 +02005552 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005553 eb->start = start;
5554 eb->len = len;
Josef Bacikf28491e2013-12-16 13:24:27 -05005555 eb->fs_info = fs_info;
Josef Bacikdb7f3432013-08-07 14:54:37 -04005556 eb->bflags = 0;
Josef Bacik196d59a2020-08-20 11:46:09 -04005557 init_rwsem(&eb->lock);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005558
Josef Bacik3fd63722020-02-14 16:11:40 -05005559 btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
5560 &fs_info->allocated_ebs);
Naohiro Aotad35751562021-02-04 19:21:54 +09005561 INIT_LIST_HEAD(&eb->release_list);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005562
5563 spin_lock_init(&eb->refs_lock);
5564 atomic_set(&eb->refs, 1);
5565 atomic_set(&eb->io_pages, 0);
5566
Qu Wenruodeb67892020-12-02 14:48:01 +08005567 ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005568
5569 return eb;
5570}
5571
David Sterba2b489662020-04-29 03:04:10 +02005572struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
Josef Bacikdb7f3432013-08-07 14:54:37 -04005573{
David Sterbacc5e31a2018-03-01 18:20:27 +01005574 int i;
Josef Bacikdb7f3432013-08-07 14:54:37 -04005575 struct page *p;
5576 struct extent_buffer *new;
David Sterbacc5e31a2018-03-01 18:20:27 +01005577 int num_pages = num_extent_pages(src);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005578
David Sterba3f556f72014-06-15 03:20:26 +02005579 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005580 if (new == NULL)
5581 return NULL;
5582
Qu Wenruo62c053f2021-01-26 16:33:46 +08005583 /*
5584 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
5585 * btrfs_release_extent_buffer() have different behavior for
5586 * UNMAPPED subpage extent buffer.
5587 */
5588 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
5589
Josef Bacikdb7f3432013-08-07 14:54:37 -04005590 for (i = 0; i < num_pages; i++) {
Qu Wenruo760f9912021-01-26 16:33:48 +08005591 int ret;
5592
Josef Bacik9ec72672013-08-07 16:57:23 -04005593 p = alloc_page(GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005594 if (!p) {
5595 btrfs_release_extent_buffer(new);
5596 return NULL;
5597 }
Qu Wenruo760f9912021-01-26 16:33:48 +08005598 ret = attach_extent_buffer_page(new, p, NULL);
5599 if (ret < 0) {
5600 put_page(p);
5601 btrfs_release_extent_buffer(new);
5602 return NULL;
5603 }
Josef Bacikdb7f3432013-08-07 14:54:37 -04005604 WARN_ON(PageDirty(p));
Josef Bacikdb7f3432013-08-07 14:54:37 -04005605 new->pages[i] = p;
David Sterbafba1acf2016-11-08 17:56:24 +01005606 copy_page(page_address(p), page_address(src->pages[i]));
Josef Bacikdb7f3432013-08-07 14:54:37 -04005607 }
Qu Wenruo92d83e92021-01-26 16:33:55 +08005608 set_extent_buffer_uptodate(new);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005609
5610 return new;
5611}
5612
Omar Sandoval0f331222015-09-29 20:50:31 -07005613struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5614 u64 start, unsigned long len)
Josef Bacikdb7f3432013-08-07 14:54:37 -04005615{
5616 struct extent_buffer *eb;
David Sterbacc5e31a2018-03-01 18:20:27 +01005617 int num_pages;
5618 int i;
Josef Bacikdb7f3432013-08-07 14:54:37 -04005619
David Sterba3f556f72014-06-15 03:20:26 +02005620 eb = __alloc_extent_buffer(fs_info, start, len);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005621 if (!eb)
5622 return NULL;
5623
David Sterba65ad0102018-06-29 10:56:49 +02005624 num_pages = num_extent_pages(eb);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005625 for (i = 0; i < num_pages; i++) {
Qu Wenruo09bc1f02021-01-26 16:33:51 +08005626 int ret;
5627
Josef Bacik9ec72672013-08-07 16:57:23 -04005628 eb->pages[i] = alloc_page(GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005629 if (!eb->pages[i])
5630 goto err;
Qu Wenruo09bc1f02021-01-26 16:33:51 +08005631 ret = attach_extent_buffer_page(eb, eb->pages[i], NULL);
5632 if (ret < 0)
5633 goto err;
Josef Bacikdb7f3432013-08-07 14:54:37 -04005634 }
5635 set_extent_buffer_uptodate(eb);
5636 btrfs_set_header_nritems(eb, 0);
Nikolay Borisovb0132a32018-06-27 16:38:24 +03005637 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005638
5639 return eb;
5640err:
Qu Wenruo09bc1f02021-01-26 16:33:51 +08005641 for (; i > 0; i--) {
5642 detach_extent_buffer_page(eb, eb->pages[i - 1]);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005643 __free_page(eb->pages[i - 1]);
Qu Wenruo09bc1f02021-01-26 16:33:51 +08005644 }
Josef Bacikdb7f3432013-08-07 14:54:37 -04005645 __free_extent_buffer(eb);
5646 return NULL;
5647}
5648
Omar Sandoval0f331222015-09-29 20:50:31 -07005649struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -04005650 u64 start)
Omar Sandoval0f331222015-09-29 20:50:31 -07005651{
Jeff Mahoneyda170662016-06-15 09:22:56 -04005652 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
Omar Sandoval0f331222015-09-29 20:50:31 -07005653}
5654
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005655static void check_buffer_tree_ref(struct extent_buffer *eb)
5656{
Chris Mason242e18c2013-01-29 17:49:37 -05005657 int refs;
Boris Burkov6bf9cd22020-06-17 11:35:19 -07005658 /*
5659 * The TREE_REF bit is first set when the extent_buffer is added
5660 * to the radix tree. It is also reset, if unset, when a new reference
5661 * is created by find_extent_buffer.
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005662 *
Boris Burkov6bf9cd22020-06-17 11:35:19 -07005663 * It is only cleared in two cases: freeing the last non-tree
5664 * reference to the extent_buffer when its STALE bit is set or
5665 * calling releasepage when the tree reference is the only reference.
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005666 *
Boris Burkov6bf9cd22020-06-17 11:35:19 -07005667 * In both cases, care is taken to ensure that the extent_buffer's
5668 * pages are not under io. However, releasepage can be concurrently
5669 * called with creating new references, which is prone to race
5670 * conditions between the calls to check_buffer_tree_ref in those
5671 * codepaths and clearing TREE_REF in try_release_extent_buffer.
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005672 *
Boris Burkov6bf9cd22020-06-17 11:35:19 -07005673 * The actual lifetime of the extent_buffer in the radix tree is
5674 * adequately protected by the refcount, but the TREE_REF bit and
5675 * its corresponding reference are not. To protect against this
5676 * class of races, we call check_buffer_tree_ref from the codepaths
5677 * which trigger io after they set eb->io_pages. Note that once io is
5678 * initiated, TREE_REF can no longer be cleared, so that is the
5679 * moment at which any such race is best fixed.
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005680 */
Chris Mason242e18c2013-01-29 17:49:37 -05005681 refs = atomic_read(&eb->refs);
5682 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5683 return;
5684
Josef Bacik594831c2012-07-20 16:11:08 -04005685 spin_lock(&eb->refs_lock);
5686 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005687 atomic_inc(&eb->refs);
Josef Bacik594831c2012-07-20 16:11:08 -04005688 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005689}
5690
Mel Gorman2457aec2014-06-04 16:10:31 -07005691static void mark_extent_buffer_accessed(struct extent_buffer *eb,
5692 struct page *accessed)
Josef Bacik5df42352012-03-15 18:24:42 -04005693{
David Sterbacc5e31a2018-03-01 18:20:27 +01005694 int num_pages, i;
Josef Bacik5df42352012-03-15 18:24:42 -04005695
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005696 check_buffer_tree_ref(eb);
5697
David Sterba65ad0102018-06-29 10:56:49 +02005698 num_pages = num_extent_pages(eb);
Josef Bacik5df42352012-03-15 18:24:42 -04005699 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005700 struct page *p = eb->pages[i];
5701
Mel Gorman2457aec2014-06-04 16:10:31 -07005702 if (p != accessed)
5703 mark_page_accessed(p);
Josef Bacik5df42352012-03-15 18:24:42 -04005704 }
5705}
5706
Josef Bacikf28491e2013-12-16 13:24:27 -05005707struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
5708 u64 start)
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005709{
5710 struct extent_buffer *eb;
5711
Qu Wenruo2f3186d2021-04-06 08:36:00 +08005712 eb = find_extent_buffer_nolock(fs_info, start);
5713 if (!eb)
5714 return NULL;
5715 /*
5716 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
5717 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
5718 * another task running free_extent_buffer() might have seen that flag
5719 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
5720 * writeback flags not set) and it's still in the tree (flag
5721 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
5722 * decrementing the extent buffer's reference count twice. So here we
5723 * could race and increment the eb's reference count, clear its stale
5724 * flag, mark it as dirty and drop our reference before the other task
5725 * finishes executing free_extent_buffer, which would later result in
5726 * an attempt to free an extent buffer that is dirty.
5727 */
5728 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
5729 spin_lock(&eb->refs_lock);
5730 spin_unlock(&eb->refs_lock);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005731 }
Qu Wenruo2f3186d2021-04-06 08:36:00 +08005732 mark_extent_buffer_accessed(eb, NULL);
5733 return eb;
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005734}
5735
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005736#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5737struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -04005738 u64 start)
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005739{
5740 struct extent_buffer *eb, *exists = NULL;
5741 int ret;
5742
5743 eb = find_extent_buffer(fs_info, start);
5744 if (eb)
5745 return eb;
Jeff Mahoneyda170662016-06-15 09:22:56 -04005746 eb = alloc_dummy_extent_buffer(fs_info, start);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005747 if (!eb)
Dan Carpenterb6293c82019-12-03 14:24:58 +03005748 return ERR_PTR(-ENOMEM);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005749 eb->fs_info = fs_info;
5750again:
David Sterbae1860a72016-05-09 14:11:38 +02005751 ret = radix_tree_preload(GFP_NOFS);
Dan Carpenterb6293c82019-12-03 14:24:58 +03005752 if (ret) {
5753 exists = ERR_PTR(ret);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005754 goto free_eb;
Dan Carpenterb6293c82019-12-03 14:24:58 +03005755 }
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005756 spin_lock(&fs_info->buffer_lock);
5757 ret = radix_tree_insert(&fs_info->buffer_radix,
Qu Wenruo478ef882020-10-21 14:25:05 +08005758 start >> fs_info->sectorsize_bits, eb);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005759 spin_unlock(&fs_info->buffer_lock);
5760 radix_tree_preload_end();
5761 if (ret == -EEXIST) {
5762 exists = find_extent_buffer(fs_info, start);
5763 if (exists)
5764 goto free_eb;
5765 else
5766 goto again;
5767 }
5768 check_buffer_tree_ref(eb);
5769 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5770
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005771 return eb;
5772free_eb:
5773 btrfs_release_extent_buffer(eb);
5774 return exists;
5775}
5776#endif
5777
Qu Wenruo819822102021-01-26 16:33:49 +08005778static struct extent_buffer *grab_extent_buffer(
5779 struct btrfs_fs_info *fs_info, struct page *page)
Qu Wenruoc0f0a9e2021-01-06 09:01:45 +08005780{
5781 struct extent_buffer *exists;
5782
Qu Wenruo819822102021-01-26 16:33:49 +08005783 /*
5784 * For subpage case, we completely rely on radix tree to ensure we
5785 * don't try to insert two ebs for the same bytenr. So here we always
5786 * return NULL and just continue.
5787 */
5788 if (fs_info->sectorsize < PAGE_SIZE)
5789 return NULL;
5790
Qu Wenruoc0f0a9e2021-01-06 09:01:45 +08005791 /* Page not yet attached to an extent buffer */
5792 if (!PagePrivate(page))
5793 return NULL;
5794
5795 /*
5796 * We could have already allocated an eb for this page and attached one
5797 * so lets see if we can get a ref on the existing eb, and if we can we
5798 * know it's good and we can just return that one, else we know we can
5799 * just overwrite page->private.
5800 */
5801 exists = (struct extent_buffer *)page->private;
5802 if (atomic_inc_not_zero(&exists->refs))
5803 return exists;
5804
5805 WARN_ON(PageDirty(page));
5806 detach_page_private(page);
5807 return NULL;
5808}
5809
Josef Bacikf28491e2013-12-16 13:24:27 -05005810struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
Josef Bacik3fbaf252020-11-05 10:45:20 -05005811 u64 start, u64 owner_root, int level)
Chris Masond1310b22008-01-24 16:13:08 -05005812{
Jeff Mahoneyda170662016-06-15 09:22:56 -04005813 unsigned long len = fs_info->nodesize;
David Sterbacc5e31a2018-03-01 18:20:27 +01005814 int num_pages;
5815 int i;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005816 unsigned long index = start >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005817 struct extent_buffer *eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04005818 struct extent_buffer *exists = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05005819 struct page *p;
Josef Bacikf28491e2013-12-16 13:24:27 -05005820 struct address_space *mapping = fs_info->btree_inode->i_mapping;
Chris Masond1310b22008-01-24 16:13:08 -05005821 int uptodate = 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04005822 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05005823
Jeff Mahoneyda170662016-06-15 09:22:56 -04005824 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
Liu Boc871b0f2016-06-06 12:01:23 -07005825 btrfs_err(fs_info, "bad tree block start %llu", start);
5826 return ERR_PTR(-EINVAL);
5827 }
5828
Qu Wenruoe9306ad2021-02-25 09:18:14 +08005829#if BITS_PER_LONG == 32
5830 if (start >= MAX_LFS_FILESIZE) {
5831 btrfs_err_rl(fs_info,
5832 "extent buffer %llu is beyond 32bit page cache limit", start);
5833 btrfs_err_32bit_limit(fs_info);
5834 return ERR_PTR(-EOVERFLOW);
5835 }
5836 if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
5837 btrfs_warn_32bit_limit(fs_info);
5838#endif
5839
Qu Wenruo1aaac382020-12-02 14:48:02 +08005840 if (fs_info->sectorsize < PAGE_SIZE &&
5841 offset_in_page(start) + len > PAGE_SIZE) {
5842 btrfs_err(fs_info,
5843 "tree block crosses page boundary, start %llu nodesize %lu",
5844 start, len);
5845 return ERR_PTR(-EINVAL);
5846 }
5847
Josef Bacikf28491e2013-12-16 13:24:27 -05005848 eb = find_extent_buffer(fs_info, start);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005849 if (eb)
Chris Mason6af118ce2008-07-22 11:18:07 -04005850 return eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04005851
David Sterba23d79d82014-06-15 02:55:29 +02005852 eb = __alloc_extent_buffer(fs_info, start, len);
Peter2b114d12008-04-01 11:21:40 -04005853 if (!eb)
Liu Boc871b0f2016-06-06 12:01:23 -07005854 return ERR_PTR(-ENOMEM);
Josef Bacike114c542020-11-05 10:45:21 -05005855 btrfs_set_buffer_lockdep_class(owner_root, eb, level);
Chris Masond1310b22008-01-24 16:13:08 -05005856
David Sterba65ad0102018-06-29 10:56:49 +02005857 num_pages = num_extent_pages(eb);
Chris Mason727011e2010-08-06 13:21:20 -04005858 for (i = 0; i < num_pages; i++, index++) {
Qu Wenruo760f9912021-01-26 16:33:48 +08005859 struct btrfs_subpage *prealloc = NULL;
5860
Michal Hockod1b5c562015-08-19 14:17:40 +02005861 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
Liu Boc871b0f2016-06-06 12:01:23 -07005862 if (!p) {
5863 exists = ERR_PTR(-ENOMEM);
Chris Mason6af118ce2008-07-22 11:18:07 -04005864 goto free_eb;
Liu Boc871b0f2016-06-06 12:01:23 -07005865 }
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005866
Qu Wenruo760f9912021-01-26 16:33:48 +08005867 /*
5868 * Preallocate page->private for subpage case, so that we won't
5869 * allocate memory with private_lock hold. The memory will be
5870 * freed by attach_extent_buffer_page() or freed manually if
5871 * we exit earlier.
5872 *
5873 * Although we have ensured one subpage eb can only have one
5874 * page, but it may change in the future for 16K page size
5875 * support, so we still preallocate the memory in the loop.
5876 */
5877 ret = btrfs_alloc_subpage(fs_info, &prealloc,
5878 BTRFS_SUBPAGE_METADATA);
5879 if (ret < 0) {
5880 unlock_page(p);
5881 put_page(p);
5882 exists = ERR_PTR(ret);
5883 goto free_eb;
5884 }
5885
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005886 spin_lock(&mapping->private_lock);
Qu Wenruo819822102021-01-26 16:33:49 +08005887 exists = grab_extent_buffer(fs_info, p);
Qu Wenruoc0f0a9e2021-01-06 09:01:45 +08005888 if (exists) {
5889 spin_unlock(&mapping->private_lock);
5890 unlock_page(p);
5891 put_page(p);
5892 mark_extent_buffer_accessed(exists, p);
Qu Wenruo760f9912021-01-26 16:33:48 +08005893 btrfs_free_subpage(prealloc);
Qu Wenruoc0f0a9e2021-01-06 09:01:45 +08005894 goto free_eb;
Chris Masond1310b22008-01-24 16:13:08 -05005895 }
Qu Wenruo760f9912021-01-26 16:33:48 +08005896 /* Should not fail, as we have preallocated the memory */
5897 ret = attach_extent_buffer_page(eb, p, prealloc);
5898 ASSERT(!ret);
Qu Wenruo8ff84662021-01-26 16:33:50 +08005899 /*
5900 * To inform we have extra eb under allocation, so that
5901 * detach_extent_buffer_page() won't release the page private
5902 * when the eb hasn't yet been inserted into radix tree.
5903 *
5904 * The ref will be decreased when the eb released the page, in
5905 * detach_extent_buffer_page().
5906 * Thus needs no special handling in error path.
5907 */
5908 btrfs_page_inc_eb_refs(fs_info, p);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005909 spin_unlock(&mapping->private_lock);
Qu Wenruo760f9912021-01-26 16:33:48 +08005910
Qu Wenruo1e5eb3d2021-03-25 15:14:41 +08005911 WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
Chris Mason727011e2010-08-06 13:21:20 -04005912 eb->pages[i] = p;
Chris Masond1310b22008-01-24 16:13:08 -05005913 if (!PageUptodate(p))
5914 uptodate = 0;
Chris Masoneb14ab82011-02-10 12:35:00 -05005915
5916 /*
Nikolay Borisovb16d0112018-07-04 10:24:52 +03005917 * We can't unlock the pages just yet since the extent buffer
5918 * hasn't been properly inserted in the radix tree, this
5919 * opens a race with btree_releasepage which can free a page
5920 * while we are still filling in all pages for the buffer and
5921 * we could crash.
Chris Masoneb14ab82011-02-10 12:35:00 -05005922 */
Chris Masond1310b22008-01-24 16:13:08 -05005923 }
5924 if (uptodate)
Chris Masonb4ce94d2009-02-04 09:25:08 -05005925 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Josef Bacik115391d2012-03-09 09:51:43 -05005926again:
David Sterbae1860a72016-05-09 14:11:38 +02005927 ret = radix_tree_preload(GFP_NOFS);
Liu Boc871b0f2016-06-06 12:01:23 -07005928 if (ret) {
5929 exists = ERR_PTR(ret);
Miao Xie19fe0a82010-10-26 20:57:29 -04005930 goto free_eb;
Liu Boc871b0f2016-06-06 12:01:23 -07005931 }
Miao Xie19fe0a82010-10-26 20:57:29 -04005932
Josef Bacikf28491e2013-12-16 13:24:27 -05005933 spin_lock(&fs_info->buffer_lock);
5934 ret = radix_tree_insert(&fs_info->buffer_radix,
Qu Wenruo478ef882020-10-21 14:25:05 +08005935 start >> fs_info->sectorsize_bits, eb);
Josef Bacikf28491e2013-12-16 13:24:27 -05005936 spin_unlock(&fs_info->buffer_lock);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005937 radix_tree_preload_end();
Miao Xie19fe0a82010-10-26 20:57:29 -04005938 if (ret == -EEXIST) {
Josef Bacikf28491e2013-12-16 13:24:27 -05005939 exists = find_extent_buffer(fs_info, start);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005940 if (exists)
5941 goto free_eb;
5942 else
Josef Bacik115391d2012-03-09 09:51:43 -05005943 goto again;
Chris Mason6af118ce2008-07-22 11:18:07 -04005944 }
Chris Mason6af118ce2008-07-22 11:18:07 -04005945 /* add one reference for the tree */
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005946 check_buffer_tree_ref(eb);
Josef Bacik34b41ac2013-12-13 10:41:51 -05005947 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
Chris Masoneb14ab82011-02-10 12:35:00 -05005948
5949 /*
Nikolay Borisovb16d0112018-07-04 10:24:52 +03005950 * Now it's safe to unlock the pages because any calls to
5951 * btree_releasepage will correctly detect that a page belongs to a
5952 * live buffer and won't free them prematurely.
Chris Masoneb14ab82011-02-10 12:35:00 -05005953 */
Nikolay Borisov28187ae2018-07-04 10:24:51 +03005954 for (i = 0; i < num_pages; i++)
5955 unlock_page(eb->pages[i]);
Chris Masond1310b22008-01-24 16:13:08 -05005956 return eb;
5957
Chris Mason6af118ce2008-07-22 11:18:07 -04005958free_eb:
Omar Sandoval5ca64f42015-02-24 02:47:05 -08005959 WARN_ON(!atomic_dec_and_test(&eb->refs));
Chris Mason727011e2010-08-06 13:21:20 -04005960 for (i = 0; i < num_pages; i++) {
5961 if (eb->pages[i])
5962 unlock_page(eb->pages[i]);
5963 }
Chris Masoneb14ab82011-02-10 12:35:00 -05005964
Miao Xie897ca6e92010-10-26 20:57:29 -04005965 btrfs_release_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04005966 return exists;
Chris Masond1310b22008-01-24 16:13:08 -05005967}
Chris Masond1310b22008-01-24 16:13:08 -05005968
Josef Bacik3083ee22012-03-09 16:01:49 -05005969static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
5970{
5971 struct extent_buffer *eb =
5972 container_of(head, struct extent_buffer, rcu_head);
5973
5974 __free_extent_buffer(eb);
5975}
5976
David Sterbaf7a52a42013-04-26 14:56:29 +00005977static int release_extent_buffer(struct extent_buffer *eb)
Jules Irenge5ce48d02020-02-23 23:16:42 +00005978 __releases(&eb->refs_lock)
Josef Bacik3083ee22012-03-09 16:01:49 -05005979{
Nikolay Borisov07e21c42018-06-27 16:38:23 +03005980 lockdep_assert_held(&eb->refs_lock);
5981
Josef Bacik3083ee22012-03-09 16:01:49 -05005982 WARN_ON(atomic_read(&eb->refs) == 0);
5983 if (atomic_dec_and_test(&eb->refs)) {
Josef Bacik34b41ac2013-12-13 10:41:51 -05005984 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
Josef Bacikf28491e2013-12-16 13:24:27 -05005985 struct btrfs_fs_info *fs_info = eb->fs_info;
Josef Bacik3083ee22012-03-09 16:01:49 -05005986
Jan Schmidt815a51c2012-05-16 17:00:02 +02005987 spin_unlock(&eb->refs_lock);
Josef Bacik3083ee22012-03-09 16:01:49 -05005988
Josef Bacikf28491e2013-12-16 13:24:27 -05005989 spin_lock(&fs_info->buffer_lock);
5990 radix_tree_delete(&fs_info->buffer_radix,
Qu Wenruo478ef882020-10-21 14:25:05 +08005991 eb->start >> fs_info->sectorsize_bits);
Josef Bacikf28491e2013-12-16 13:24:27 -05005992 spin_unlock(&fs_info->buffer_lock);
Josef Bacik34b41ac2013-12-13 10:41:51 -05005993 } else {
5994 spin_unlock(&eb->refs_lock);
Jan Schmidt815a51c2012-05-16 17:00:02 +02005995 }
Josef Bacik3083ee22012-03-09 16:01:49 -05005996
Josef Bacik8c389382020-02-14 16:11:42 -05005997 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
Josef Bacik3083ee22012-03-09 16:01:49 -05005998 /* Should be safe to release our pages at this point */
David Sterba55ac0132018-07-19 17:24:32 +02005999 btrfs_release_extent_buffer_pages(eb);
Josef Bacikbcb7e442015-03-16 17:38:02 -04006000#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
Nikolay Borisovb0132a32018-06-27 16:38:24 +03006001 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
Josef Bacikbcb7e442015-03-16 17:38:02 -04006002 __free_extent_buffer(eb);
6003 return 1;
6004 }
6005#endif
Josef Bacik3083ee22012-03-09 16:01:49 -05006006 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
Josef Bacike64860a2012-07-20 16:05:36 -04006007 return 1;
Josef Bacik3083ee22012-03-09 16:01:49 -05006008 }
6009 spin_unlock(&eb->refs_lock);
Josef Bacike64860a2012-07-20 16:05:36 -04006010
6011 return 0;
Josef Bacik3083ee22012-03-09 16:01:49 -05006012}
6013
Chris Masond1310b22008-01-24 16:13:08 -05006014void free_extent_buffer(struct extent_buffer *eb)
6015{
Chris Mason242e18c2013-01-29 17:49:37 -05006016 int refs;
6017 int old;
Chris Masond1310b22008-01-24 16:13:08 -05006018 if (!eb)
6019 return;
6020
Chris Mason242e18c2013-01-29 17:49:37 -05006021 while (1) {
6022 refs = atomic_read(&eb->refs);
Nikolay Borisov46cc7752018-10-15 17:04:01 +03006023 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
6024 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
6025 refs == 1))
Chris Mason242e18c2013-01-29 17:49:37 -05006026 break;
6027 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
6028 if (old == refs)
6029 return;
6030 }
6031
Josef Bacik3083ee22012-03-09 16:01:49 -05006032 spin_lock(&eb->refs_lock);
6033 if (atomic_read(&eb->refs) == 2 &&
6034 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006035 !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05006036 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6037 atomic_dec(&eb->refs);
Chris Masond1310b22008-01-24 16:13:08 -05006038
Josef Bacik3083ee22012-03-09 16:01:49 -05006039 /*
6040 * I know this is terrible, but it's temporary until we stop tracking
6041 * the uptodate bits and such for the extent buffers.
6042 */
David Sterbaf7a52a42013-04-26 14:56:29 +00006043 release_extent_buffer(eb);
Chris Masond1310b22008-01-24 16:13:08 -05006044}
Chris Masond1310b22008-01-24 16:13:08 -05006045
Josef Bacik3083ee22012-03-09 16:01:49 -05006046void free_extent_buffer_stale(struct extent_buffer *eb)
6047{
6048 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05006049 return;
6050
Josef Bacik3083ee22012-03-09 16:01:49 -05006051 spin_lock(&eb->refs_lock);
6052 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
6053
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006054 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05006055 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6056 atomic_dec(&eb->refs);
David Sterbaf7a52a42013-04-26 14:56:29 +00006057 release_extent_buffer(eb);
Chris Masond1310b22008-01-24 16:13:08 -05006058}
6059
Qu Wenruo0d277972021-03-25 15:14:43 +08006060static void btree_clear_page_dirty(struct page *page)
6061{
6062 ASSERT(PageDirty(page));
6063 ASSERT(PageLocked(page));
6064 clear_page_dirty_for_io(page);
6065 xa_lock_irq(&page->mapping->i_pages);
6066 if (!PageDirty(page))
6067 __xa_clear_mark(&page->mapping->i_pages,
6068 page_index(page), PAGECACHE_TAG_DIRTY);
6069 xa_unlock_irq(&page->mapping->i_pages);
6070}
6071
6072static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
6073{
6074 struct btrfs_fs_info *fs_info = eb->fs_info;
6075 struct page *page = eb->pages[0];
6076 bool last;
6077
6078 /* btree_clear_page_dirty() needs page locked */
6079 lock_page(page);
6080 last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start,
6081 eb->len);
6082 if (last)
6083 btree_clear_page_dirty(page);
6084 unlock_page(page);
6085 WARN_ON(atomic_read(&eb->refs) == 0);
6086}
6087
David Sterba2b489662020-04-29 03:04:10 +02006088void clear_extent_buffer_dirty(const struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05006089{
David Sterbacc5e31a2018-03-01 18:20:27 +01006090 int i;
6091 int num_pages;
Chris Masond1310b22008-01-24 16:13:08 -05006092 struct page *page;
6093
Qu Wenruo0d277972021-03-25 15:14:43 +08006094 if (eb->fs_info->sectorsize < PAGE_SIZE)
6095 return clear_subpage_extent_buffer_dirty(eb);
6096
David Sterba65ad0102018-06-29 10:56:49 +02006097 num_pages = num_extent_pages(eb);
Chris Masond1310b22008-01-24 16:13:08 -05006098
6099 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02006100 page = eb->pages[i];
Chris Masonb9473432009-03-13 11:00:37 -04006101 if (!PageDirty(page))
Chris Masond2c3f4f2008-11-19 12:44:22 -05006102 continue;
Chris Masona61e6f22008-07-22 11:18:08 -04006103 lock_page(page);
Qu Wenruo0d277972021-03-25 15:14:43 +08006104 btree_clear_page_dirty(page);
Chris Masonbf0da8c2011-11-04 12:29:37 -04006105 ClearPageError(page);
Chris Masona61e6f22008-07-22 11:18:08 -04006106 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05006107 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006108 WARN_ON(atomic_read(&eb->refs) == 0);
Chris Masond1310b22008-01-24 16:13:08 -05006109}
Chris Masond1310b22008-01-24 16:13:08 -05006110
Liu Boabb57ef2018-09-14 01:44:42 +08006111bool set_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05006112{
David Sterbacc5e31a2018-03-01 18:20:27 +01006113 int i;
6114 int num_pages;
Liu Boabb57ef2018-09-14 01:44:42 +08006115 bool was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05006116
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006117 check_buffer_tree_ref(eb);
6118
Chris Masonb9473432009-03-13 11:00:37 -04006119 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006120
David Sterba65ad0102018-06-29 10:56:49 +02006121 num_pages = num_extent_pages(eb);
Josef Bacik3083ee22012-03-09 16:01:49 -05006122 WARN_ON(atomic_read(&eb->refs) == 0);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006123 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
6124
Qu Wenruo0d277972021-03-25 15:14:43 +08006125 if (!was_dirty) {
6126 bool subpage = eb->fs_info->sectorsize < PAGE_SIZE;
Liu Bo51995c32018-09-14 01:46:08 +08006127
Qu Wenruo0d277972021-03-25 15:14:43 +08006128 /*
6129 * For subpage case, we can have other extent buffers in the
6130 * same page, and in clear_subpage_extent_buffer_dirty() we
6131 * have to clear page dirty without subpage lock held.
6132 * This can cause race where our page gets dirty cleared after
6133 * we just set it.
6134 *
6135 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
6136 * its page for other reasons, we can use page lock to prevent
6137 * the above race.
6138 */
6139 if (subpage)
6140 lock_page(eb->pages[0]);
6141 for (i = 0; i < num_pages; i++)
6142 btrfs_page_set_dirty(eb->fs_info, eb->pages[i],
6143 eb->start, eb->len);
6144 if (subpage)
6145 unlock_page(eb->pages[0]);
6146 }
Liu Bo51995c32018-09-14 01:46:08 +08006147#ifdef CONFIG_BTRFS_DEBUG
6148 for (i = 0; i < num_pages; i++)
6149 ASSERT(PageDirty(eb->pages[i]));
6150#endif
6151
Chris Masonb9473432009-03-13 11:00:37 -04006152 return was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05006153}
Chris Masond1310b22008-01-24 16:13:08 -05006154
David Sterba69ba3922015-12-03 13:08:59 +01006155void clear_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Mason1259ab72008-05-12 13:39:03 -04006156{
Qu Wenruo251f2ac2021-01-26 16:33:54 +08006157 struct btrfs_fs_info *fs_info = eb->fs_info;
Chris Mason1259ab72008-05-12 13:39:03 -04006158 struct page *page;
David Sterbacc5e31a2018-03-01 18:20:27 +01006159 int num_pages;
Qu Wenruo251f2ac2021-01-26 16:33:54 +08006160 int i;
Chris Mason1259ab72008-05-12 13:39:03 -04006161
Chris Masonb4ce94d2009-02-04 09:25:08 -05006162 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
David Sterba65ad0102018-06-29 10:56:49 +02006163 num_pages = num_extent_pages(eb);
Chris Mason1259ab72008-05-12 13:39:03 -04006164 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02006165 page = eb->pages[i];
Chris Mason33958dc2008-07-30 10:29:12 -04006166 if (page)
Qu Wenruo251f2ac2021-01-26 16:33:54 +08006167 btrfs_page_clear_uptodate(fs_info, page,
6168 eb->start, eb->len);
Chris Mason1259ab72008-05-12 13:39:03 -04006169 }
Chris Mason1259ab72008-05-12 13:39:03 -04006170}
6171
David Sterba09c25a82015-12-03 13:08:59 +01006172void set_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05006173{
Qu Wenruo251f2ac2021-01-26 16:33:54 +08006174 struct btrfs_fs_info *fs_info = eb->fs_info;
Chris Masond1310b22008-01-24 16:13:08 -05006175 struct page *page;
David Sterbacc5e31a2018-03-01 18:20:27 +01006176 int num_pages;
Qu Wenruo251f2ac2021-01-26 16:33:54 +08006177 int i;
Chris Masond1310b22008-01-24 16:13:08 -05006178
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006179 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
David Sterba65ad0102018-06-29 10:56:49 +02006180 num_pages = num_extent_pages(eb);
Chris Masond1310b22008-01-24 16:13:08 -05006181 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02006182 page = eb->pages[i];
Qu Wenruo251f2ac2021-01-26 16:33:54 +08006183 btrfs_page_set_uptodate(fs_info, page, eb->start, eb->len);
Chris Masond1310b22008-01-24 16:13:08 -05006184 }
Chris Masond1310b22008-01-24 16:13:08 -05006185}
Chris Masond1310b22008-01-24 16:13:08 -05006186
Qu Wenruo4012daf2021-01-26 16:33:57 +08006187static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
6188 int mirror_num)
6189{
6190 struct btrfs_fs_info *fs_info = eb->fs_info;
6191 struct extent_io_tree *io_tree;
6192 struct page *page = eb->pages[0];
6193 struct bio *bio = NULL;
6194 int ret = 0;
6195
6196 ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags));
6197 ASSERT(PagePrivate(page));
6198 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
6199
6200 if (wait == WAIT_NONE) {
6201 ret = try_lock_extent(io_tree, eb->start,
6202 eb->start + eb->len - 1);
6203 if (ret <= 0)
6204 return ret;
6205 } else {
6206 ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1);
6207 if (ret < 0)
6208 return ret;
6209 }
6210
6211 ret = 0;
6212 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags) ||
6213 PageUptodate(page) ||
6214 btrfs_subpage_test_uptodate(fs_info, page, eb->start, eb->len)) {
6215 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6216 unlock_extent(io_tree, eb->start, eb->start + eb->len - 1);
6217 return ret;
6218 }
6219
6220 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
6221 eb->read_mirror = 0;
6222 atomic_set(&eb->io_pages, 1);
6223 check_buffer_tree_ref(eb);
6224 btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len);
6225
6226 ret = submit_extent_page(REQ_OP_READ | REQ_META, NULL, page, eb->start,
6227 eb->len, eb->start - page_offset(page), &bio,
6228 end_bio_extent_readpage, mirror_num, 0, 0,
6229 true);
6230 if (ret) {
6231 /*
6232 * In the endio function, if we hit something wrong we will
6233 * increase the io_pages, so here we need to decrease it for
6234 * error path.
6235 */
6236 atomic_dec(&eb->io_pages);
6237 }
6238 if (bio) {
6239 int tmp;
6240
6241 tmp = submit_one_bio(bio, mirror_num, 0);
6242 if (tmp < 0)
6243 return tmp;
6244 }
6245 if (ret || wait != WAIT_COMPLETE)
6246 return ret;
6247
6248 wait_extent_bit(io_tree, eb->start, eb->start + eb->len - 1, EXTENT_LOCKED);
6249 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
6250 ret = -EIO;
6251 return ret;
6252}
6253
Nikolay Borisovc2ccfbc2019-04-10 17:24:40 +03006254int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05006255{
David Sterbacc5e31a2018-03-01 18:20:27 +01006256 int i;
Chris Masond1310b22008-01-24 16:13:08 -05006257 struct page *page;
6258 int err;
6259 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04006260 int locked_pages = 0;
6261 int all_uptodate = 1;
David Sterbacc5e31a2018-03-01 18:20:27 +01006262 int num_pages;
Chris Mason727011e2010-08-06 13:21:20 -04006263 unsigned long num_reads = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05006264 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04006265 unsigned long bio_flags = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05006266
Chris Masonb4ce94d2009-02-04 09:25:08 -05006267 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
Chris Masond1310b22008-01-24 16:13:08 -05006268 return 0;
6269
Qu Wenruo4012daf2021-01-26 16:33:57 +08006270 if (eb->fs_info->sectorsize < PAGE_SIZE)
6271 return read_extent_buffer_subpage(eb, wait, mirror_num);
6272
David Sterba65ad0102018-06-29 10:56:49 +02006273 num_pages = num_extent_pages(eb);
Josef Bacik8436ea912016-09-02 15:40:03 -04006274 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02006275 page = eb->pages[i];
Arne Jansenbb82ab82011-06-10 14:06:53 +02006276 if (wait == WAIT_NONE) {
Qu Wenruo2c4d8cb2021-01-28 19:25:08 +08006277 /*
6278 * WAIT_NONE is only utilized by readahead. If we can't
6279 * acquire the lock atomically it means either the eb
6280 * is being read out or under modification.
6281 * Either way the eb will be or has been cached,
6282 * readahead can exit safely.
6283 */
David Woodhouse2db04962008-08-07 11:19:43 -04006284 if (!trylock_page(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04006285 goto unlock_exit;
Chris Masond1310b22008-01-24 16:13:08 -05006286 } else {
6287 lock_page(page);
6288 }
Chris Masonce9adaa2008-04-09 16:28:12 -04006289 locked_pages++;
Liu Bo2571e732016-08-03 12:33:01 -07006290 }
6291 /*
6292 * We need to firstly lock all pages to make sure that
6293 * the uptodate bit of our pages won't be affected by
6294 * clear_extent_buffer_uptodate().
6295 */
Josef Bacik8436ea912016-09-02 15:40:03 -04006296 for (i = 0; i < num_pages; i++) {
Liu Bo2571e732016-08-03 12:33:01 -07006297 page = eb->pages[i];
Chris Mason727011e2010-08-06 13:21:20 -04006298 if (!PageUptodate(page)) {
6299 num_reads++;
Chris Masonce9adaa2008-04-09 16:28:12 -04006300 all_uptodate = 0;
Chris Mason727011e2010-08-06 13:21:20 -04006301 }
Chris Masonce9adaa2008-04-09 16:28:12 -04006302 }
Liu Bo2571e732016-08-03 12:33:01 -07006303
Chris Masonce9adaa2008-04-09 16:28:12 -04006304 if (all_uptodate) {
Josef Bacik8436ea912016-09-02 15:40:03 -04006305 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masonce9adaa2008-04-09 16:28:12 -04006306 goto unlock_exit;
6307 }
6308
Filipe Manana656f30d2014-09-26 12:25:56 +01006309 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
Josef Bacik5cf1ab52012-04-16 09:42:26 -04006310 eb->read_mirror = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006311 atomic_set(&eb->io_pages, num_reads);
Boris Burkov6bf9cd22020-06-17 11:35:19 -07006312 /*
6313 * It is possible for releasepage to clear the TREE_REF bit before we
6314 * set io_pages. See check_buffer_tree_ref for a more detailed comment.
6315 */
6316 check_buffer_tree_ref(eb);
Josef Bacik8436ea912016-09-02 15:40:03 -04006317 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02006318 page = eb->pages[i];
Liu Bobaf863b2016-07-11 10:39:07 -07006319
Chris Masonce9adaa2008-04-09 16:28:12 -04006320 if (!PageUptodate(page)) {
Liu Bobaf863b2016-07-11 10:39:07 -07006321 if (ret) {
6322 atomic_dec(&eb->io_pages);
6323 unlock_page(page);
6324 continue;
6325 }
6326
Chris Masonf1885912008-04-09 16:28:12 -04006327 ClearPageError(page);
Nikolay Borisov04201772020-09-14 12:37:04 +03006328 err = submit_extent_page(REQ_OP_READ | REQ_META, NULL,
6329 page, page_offset(page), PAGE_SIZE, 0,
6330 &bio, end_bio_extent_readpage,
6331 mirror_num, 0, 0, false);
Liu Bobaf863b2016-07-11 10:39:07 -07006332 if (err) {
Liu Bobaf863b2016-07-11 10:39:07 -07006333 /*
Nikolay Borisov04201772020-09-14 12:37:04 +03006334 * We failed to submit the bio so it's the
6335 * caller's responsibility to perform cleanup
6336 * i.e unlock page/set error bit.
Liu Bobaf863b2016-07-11 10:39:07 -07006337 */
Nikolay Borisov04201772020-09-14 12:37:04 +03006338 ret = err;
6339 SetPageError(page);
6340 unlock_page(page);
Liu Bobaf863b2016-07-11 10:39:07 -07006341 atomic_dec(&eb->io_pages);
6342 }
Chris Masond1310b22008-01-24 16:13:08 -05006343 } else {
6344 unlock_page(page);
6345 }
6346 }
6347
Jeff Mahoney355808c2011-10-03 23:23:14 -04006348 if (bio) {
Mike Christie1f7ad752016-06-05 14:31:51 -05006349 err = submit_one_bio(bio, mirror_num, bio_flags);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01006350 if (err)
6351 return err;
Jeff Mahoney355808c2011-10-03 23:23:14 -04006352 }
Chris Masona86c12c2008-02-07 10:50:54 -05006353
Arne Jansenbb82ab82011-06-10 14:06:53 +02006354 if (ret || wait != WAIT_COMPLETE)
Chris Masond1310b22008-01-24 16:13:08 -05006355 return ret;
Chris Masond3977122009-01-05 21:25:51 -05006356
Josef Bacik8436ea912016-09-02 15:40:03 -04006357 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02006358 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05006359 wait_on_page_locked(page);
Chris Masond3977122009-01-05 21:25:51 -05006360 if (!PageUptodate(page))
Chris Masond1310b22008-01-24 16:13:08 -05006361 ret = -EIO;
Chris Masond1310b22008-01-24 16:13:08 -05006362 }
Chris Masond3977122009-01-05 21:25:51 -05006363
Chris Masond1310b22008-01-24 16:13:08 -05006364 return ret;
Chris Masonce9adaa2008-04-09 16:28:12 -04006365
6366unlock_exit:
Chris Masond3977122009-01-05 21:25:51 -05006367 while (locked_pages > 0) {
Chris Masonce9adaa2008-04-09 16:28:12 -04006368 locked_pages--;
Josef Bacik8436ea912016-09-02 15:40:03 -04006369 page = eb->pages[locked_pages];
6370 unlock_page(page);
Chris Masonce9adaa2008-04-09 16:28:12 -04006371 }
6372 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05006373}
Chris Masond1310b22008-01-24 16:13:08 -05006374
Qu Wenruof98b6212020-08-19 14:35:47 +08006375static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
6376 unsigned long len)
6377{
6378 btrfs_warn(eb->fs_info,
6379 "access to eb bytenr %llu len %lu out of range start %lu len %lu",
6380 eb->start, eb->len, start, len);
6381 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
6382
6383 return true;
6384}
6385
6386/*
6387 * Check if the [start, start + len) range is valid before reading/writing
6388 * the eb.
6389 * NOTE: @start and @len are offset inside the eb, not logical address.
6390 *
6391 * Caller should not touch the dst/src memory if this function returns error.
6392 */
6393static inline int check_eb_range(const struct extent_buffer *eb,
6394 unsigned long start, unsigned long len)
6395{
6396 unsigned long offset;
6397
6398 /* start, start + len should not go beyond eb->len nor overflow */
6399 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
6400 return report_eb_range(eb, start, len);
6401
6402 return false;
6403}
6404
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06006405void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
6406 unsigned long start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05006407{
6408 size_t cur;
6409 size_t offset;
6410 struct page *page;
6411 char *kaddr;
6412 char *dst = (char *)dstv;
Qu Wenruo884b07d2020-12-02 14:48:04 +08006413 unsigned long i = get_eb_page_index(start);
Chris Masond1310b22008-01-24 16:13:08 -05006414
Qu Wenruof98b6212020-08-19 14:35:47 +08006415 if (check_eb_range(eb, start, len))
Liu Bof716abd2017-08-09 11:10:16 -06006416 return;
Chris Masond1310b22008-01-24 16:13:08 -05006417
Qu Wenruo884b07d2020-12-02 14:48:04 +08006418 offset = get_eb_offset_in_page(eb, start);
Chris Masond1310b22008-01-24 16:13:08 -05006419
Chris Masond3977122009-01-05 21:25:51 -05006420 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02006421 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05006422
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006423 cur = min(len, (PAGE_SIZE - offset));
Chris Masona6591712011-07-19 12:04:14 -04006424 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05006425 memcpy(dst, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05006426
6427 dst += cur;
6428 len -= cur;
6429 offset = 0;
6430 i++;
6431 }
6432}
Chris Masond1310b22008-01-24 16:13:08 -05006433
Josef Bacika48b73e2020-08-10 11:42:27 -04006434int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
6435 void __user *dstv,
6436 unsigned long start, unsigned long len)
Gerhard Heift550ac1d2014-01-30 16:24:01 +01006437{
6438 size_t cur;
6439 size_t offset;
6440 struct page *page;
6441 char *kaddr;
6442 char __user *dst = (char __user *)dstv;
Qu Wenruo884b07d2020-12-02 14:48:04 +08006443 unsigned long i = get_eb_page_index(start);
Gerhard Heift550ac1d2014-01-30 16:24:01 +01006444 int ret = 0;
6445
6446 WARN_ON(start > eb->len);
6447 WARN_ON(start + len > eb->start + eb->len);
6448
Qu Wenruo884b07d2020-12-02 14:48:04 +08006449 offset = get_eb_offset_in_page(eb, start);
Gerhard Heift550ac1d2014-01-30 16:24:01 +01006450
6451 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02006452 page = eb->pages[i];
Gerhard Heift550ac1d2014-01-30 16:24:01 +01006453
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006454 cur = min(len, (PAGE_SIZE - offset));
Gerhard Heift550ac1d2014-01-30 16:24:01 +01006455 kaddr = page_address(page);
Josef Bacika48b73e2020-08-10 11:42:27 -04006456 if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
Gerhard Heift550ac1d2014-01-30 16:24:01 +01006457 ret = -EFAULT;
6458 break;
6459 }
6460
6461 dst += cur;
6462 len -= cur;
6463 offset = 0;
6464 i++;
6465 }
6466
6467 return ret;
6468}
6469
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06006470int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
6471 unsigned long start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05006472{
6473 size_t cur;
6474 size_t offset;
6475 struct page *page;
6476 char *kaddr;
6477 char *ptr = (char *)ptrv;
Qu Wenruo884b07d2020-12-02 14:48:04 +08006478 unsigned long i = get_eb_page_index(start);
Chris Masond1310b22008-01-24 16:13:08 -05006479 int ret = 0;
6480
Qu Wenruof98b6212020-08-19 14:35:47 +08006481 if (check_eb_range(eb, start, len))
6482 return -EINVAL;
Chris Masond1310b22008-01-24 16:13:08 -05006483
Qu Wenruo884b07d2020-12-02 14:48:04 +08006484 offset = get_eb_offset_in_page(eb, start);
Chris Masond1310b22008-01-24 16:13:08 -05006485
Chris Masond3977122009-01-05 21:25:51 -05006486 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02006487 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05006488
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006489 cur = min(len, (PAGE_SIZE - offset));
Chris Masond1310b22008-01-24 16:13:08 -05006490
Chris Masona6591712011-07-19 12:04:14 -04006491 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05006492 ret = memcmp(ptr, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05006493 if (ret)
6494 break;
6495
6496 ptr += cur;
6497 len -= cur;
6498 offset = 0;
6499 i++;
6500 }
6501 return ret;
6502}
Chris Masond1310b22008-01-24 16:13:08 -05006503
Qu Wenruob8f95772021-03-25 15:14:42 +08006504/*
6505 * Check that the extent buffer is uptodate.
6506 *
6507 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
6508 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
6509 */
6510static void assert_eb_page_uptodate(const struct extent_buffer *eb,
6511 struct page *page)
6512{
6513 struct btrfs_fs_info *fs_info = eb->fs_info;
6514
6515 if (fs_info->sectorsize < PAGE_SIZE) {
6516 bool uptodate;
6517
6518 uptodate = btrfs_subpage_test_uptodate(fs_info, page,
6519 eb->start, eb->len);
6520 WARN_ON(!uptodate);
6521 } else {
6522 WARN_ON(!PageUptodate(page));
6523 }
6524}
6525
David Sterba2b489662020-04-29 03:04:10 +02006526void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
David Sterbaf157bf72016-11-09 17:43:38 +01006527 const void *srcv)
6528{
6529 char *kaddr;
6530
Qu Wenruob8f95772021-03-25 15:14:42 +08006531 assert_eb_page_uptodate(eb, eb->pages[0]);
Qu Wenruo884b07d2020-12-02 14:48:04 +08006532 kaddr = page_address(eb->pages[0]) + get_eb_offset_in_page(eb, 0);
David Sterbaf157bf72016-11-09 17:43:38 +01006533 memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
6534 BTRFS_FSID_SIZE);
6535}
6536
David Sterba2b489662020-04-29 03:04:10 +02006537void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
David Sterbaf157bf72016-11-09 17:43:38 +01006538{
6539 char *kaddr;
6540
Qu Wenruob8f95772021-03-25 15:14:42 +08006541 assert_eb_page_uptodate(eb, eb->pages[0]);
Qu Wenruo884b07d2020-12-02 14:48:04 +08006542 kaddr = page_address(eb->pages[0]) + get_eb_offset_in_page(eb, 0);
David Sterbaf157bf72016-11-09 17:43:38 +01006543 memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
6544 BTRFS_FSID_SIZE);
6545}
6546
David Sterba2b489662020-04-29 03:04:10 +02006547void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
Chris Masond1310b22008-01-24 16:13:08 -05006548 unsigned long start, unsigned long len)
6549{
6550 size_t cur;
6551 size_t offset;
6552 struct page *page;
6553 char *kaddr;
6554 char *src = (char *)srcv;
Qu Wenruo884b07d2020-12-02 14:48:04 +08006555 unsigned long i = get_eb_page_index(start);
Chris Masond1310b22008-01-24 16:13:08 -05006556
Naohiro Aotad35751562021-02-04 19:21:54 +09006557 WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));
6558
Qu Wenruof98b6212020-08-19 14:35:47 +08006559 if (check_eb_range(eb, start, len))
6560 return;
Chris Masond1310b22008-01-24 16:13:08 -05006561
Qu Wenruo884b07d2020-12-02 14:48:04 +08006562 offset = get_eb_offset_in_page(eb, start);
Chris Masond1310b22008-01-24 16:13:08 -05006563
Chris Masond3977122009-01-05 21:25:51 -05006564 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02006565 page = eb->pages[i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006566 assert_eb_page_uptodate(eb, page);
Chris Masond1310b22008-01-24 16:13:08 -05006567
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006568 cur = min(len, PAGE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04006569 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05006570 memcpy(kaddr + offset, src, cur);
Chris Masond1310b22008-01-24 16:13:08 -05006571
6572 src += cur;
6573 len -= cur;
6574 offset = 0;
6575 i++;
6576 }
6577}
Chris Masond1310b22008-01-24 16:13:08 -05006578
David Sterba2b489662020-04-29 03:04:10 +02006579void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
David Sterbab159fa22016-11-08 18:09:03 +01006580 unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05006581{
6582 size_t cur;
6583 size_t offset;
6584 struct page *page;
6585 char *kaddr;
Qu Wenruo884b07d2020-12-02 14:48:04 +08006586 unsigned long i = get_eb_page_index(start);
Chris Masond1310b22008-01-24 16:13:08 -05006587
Qu Wenruof98b6212020-08-19 14:35:47 +08006588 if (check_eb_range(eb, start, len))
6589 return;
Chris Masond1310b22008-01-24 16:13:08 -05006590
Qu Wenruo884b07d2020-12-02 14:48:04 +08006591 offset = get_eb_offset_in_page(eb, start);
Chris Masond1310b22008-01-24 16:13:08 -05006592
Chris Masond3977122009-01-05 21:25:51 -05006593 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02006594 page = eb->pages[i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006595 assert_eb_page_uptodate(eb, page);
Chris Masond1310b22008-01-24 16:13:08 -05006596
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006597 cur = min(len, PAGE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04006598 kaddr = page_address(page);
David Sterbab159fa22016-11-08 18:09:03 +01006599 memset(kaddr + offset, 0, cur);
Chris Masond1310b22008-01-24 16:13:08 -05006600
6601 len -= cur;
6602 offset = 0;
6603 i++;
6604 }
6605}
Chris Masond1310b22008-01-24 16:13:08 -05006606
David Sterba2b489662020-04-29 03:04:10 +02006607void copy_extent_buffer_full(const struct extent_buffer *dst,
6608 const struct extent_buffer *src)
David Sterba58e80122016-11-08 18:30:31 +01006609{
6610 int i;
David Sterbacc5e31a2018-03-01 18:20:27 +01006611 int num_pages;
David Sterba58e80122016-11-08 18:30:31 +01006612
6613 ASSERT(dst->len == src->len);
6614
Qu Wenruo884b07d2020-12-02 14:48:04 +08006615 if (dst->fs_info->sectorsize == PAGE_SIZE) {
6616 num_pages = num_extent_pages(dst);
6617 for (i = 0; i < num_pages; i++)
6618 copy_page(page_address(dst->pages[i]),
6619 page_address(src->pages[i]));
6620 } else {
6621 size_t src_offset = get_eb_offset_in_page(src, 0);
6622 size_t dst_offset = get_eb_offset_in_page(dst, 0);
6623
6624 ASSERT(src->fs_info->sectorsize < PAGE_SIZE);
6625 memcpy(page_address(dst->pages[0]) + dst_offset,
6626 page_address(src->pages[0]) + src_offset,
6627 src->len);
6628 }
David Sterba58e80122016-11-08 18:30:31 +01006629}
6630
David Sterba2b489662020-04-29 03:04:10 +02006631void copy_extent_buffer(const struct extent_buffer *dst,
6632 const struct extent_buffer *src,
Chris Masond1310b22008-01-24 16:13:08 -05006633 unsigned long dst_offset, unsigned long src_offset,
6634 unsigned long len)
6635{
6636 u64 dst_len = dst->len;
6637 size_t cur;
6638 size_t offset;
6639 struct page *page;
6640 char *kaddr;
Qu Wenruo884b07d2020-12-02 14:48:04 +08006641 unsigned long i = get_eb_page_index(dst_offset);
Chris Masond1310b22008-01-24 16:13:08 -05006642
Qu Wenruof98b6212020-08-19 14:35:47 +08006643 if (check_eb_range(dst, dst_offset, len) ||
6644 check_eb_range(src, src_offset, len))
6645 return;
6646
Chris Masond1310b22008-01-24 16:13:08 -05006647 WARN_ON(src->len != dst_len);
6648
Qu Wenruo884b07d2020-12-02 14:48:04 +08006649 offset = get_eb_offset_in_page(dst, dst_offset);
Chris Masond1310b22008-01-24 16:13:08 -05006650
Chris Masond3977122009-01-05 21:25:51 -05006651 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02006652 page = dst->pages[i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006653 assert_eb_page_uptodate(dst, page);
Chris Masond1310b22008-01-24 16:13:08 -05006654
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006655 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
Chris Masond1310b22008-01-24 16:13:08 -05006656
Chris Masona6591712011-07-19 12:04:14 -04006657 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05006658 read_extent_buffer(src, kaddr + offset, src_offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05006659
6660 src_offset += cur;
6661 len -= cur;
6662 offset = 0;
6663 i++;
6664 }
6665}
Chris Masond1310b22008-01-24 16:13:08 -05006666
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006667/*
6668 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
6669 * given bit number
6670 * @eb: the extent buffer
6671 * @start: offset of the bitmap item in the extent buffer
6672 * @nr: bit number
6673 * @page_index: return index of the page in the extent buffer that contains the
6674 * given bit number
6675 * @page_offset: return offset into the page given by page_index
6676 *
6677 * This helper hides the ugliness of finding the byte in an extent buffer which
6678 * contains a given bit.
6679 */
David Sterba2b489662020-04-29 03:04:10 +02006680static inline void eb_bitmap_offset(const struct extent_buffer *eb,
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006681 unsigned long start, unsigned long nr,
6682 unsigned long *page_index,
6683 size_t *page_offset)
6684{
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006685 size_t byte_offset = BIT_BYTE(nr);
6686 size_t offset;
6687
6688 /*
6689 * The byte we want is the offset of the extent buffer + the offset of
6690 * the bitmap item in the extent buffer + the offset of the byte in the
6691 * bitmap item.
6692 */
Qu Wenruo884b07d2020-12-02 14:48:04 +08006693 offset = start + offset_in_page(eb->start) + byte_offset;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006694
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006695 *page_index = offset >> PAGE_SHIFT;
Johannes Thumshirn70730172018-12-05 15:23:03 +01006696 *page_offset = offset_in_page(offset);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006697}
6698
6699/**
6700 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
6701 * @eb: the extent buffer
6702 * @start: offset of the bitmap item in the extent buffer
6703 * @nr: bit number to test
6704 */
David Sterba2b489662020-04-29 03:04:10 +02006705int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006706 unsigned long nr)
6707{
Omar Sandoval2fe1d552016-09-22 17:24:20 -07006708 u8 *kaddr;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006709 struct page *page;
6710 unsigned long i;
6711 size_t offset;
6712
6713 eb_bitmap_offset(eb, start, nr, &i, &offset);
6714 page = eb->pages[i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006715 assert_eb_page_uptodate(eb, page);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006716 kaddr = page_address(page);
6717 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
6718}
6719
6720/**
6721 * extent_buffer_bitmap_set - set an area of a bitmap
6722 * @eb: the extent buffer
6723 * @start: offset of the bitmap item in the extent buffer
6724 * @pos: bit number of the first bit
6725 * @len: number of bits to set
6726 */
David Sterba2b489662020-04-29 03:04:10 +02006727void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006728 unsigned long pos, unsigned long len)
6729{
Omar Sandoval2fe1d552016-09-22 17:24:20 -07006730 u8 *kaddr;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006731 struct page *page;
6732 unsigned long i;
6733 size_t offset;
6734 const unsigned int size = pos + len;
6735 int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
Omar Sandoval2fe1d552016-09-22 17:24:20 -07006736 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006737
6738 eb_bitmap_offset(eb, start, pos, &i, &offset);
6739 page = eb->pages[i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006740 assert_eb_page_uptodate(eb, page);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006741 kaddr = page_address(page);
6742
6743 while (len >= bits_to_set) {
6744 kaddr[offset] |= mask_to_set;
6745 len -= bits_to_set;
6746 bits_to_set = BITS_PER_BYTE;
Dan Carpenter9c894692016-10-12 11:33:21 +03006747 mask_to_set = ~0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006748 if (++offset >= PAGE_SIZE && len > 0) {
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006749 offset = 0;
6750 page = eb->pages[++i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006751 assert_eb_page_uptodate(eb, page);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006752 kaddr = page_address(page);
6753 }
6754 }
6755 if (len) {
6756 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
6757 kaddr[offset] |= mask_to_set;
6758 }
6759}
6760
6761
6762/**
6763 * extent_buffer_bitmap_clear - clear an area of a bitmap
6764 * @eb: the extent buffer
6765 * @start: offset of the bitmap item in the extent buffer
6766 * @pos: bit number of the first bit
6767 * @len: number of bits to clear
6768 */
David Sterba2b489662020-04-29 03:04:10 +02006769void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
6770 unsigned long start, unsigned long pos,
6771 unsigned long len)
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006772{
Omar Sandoval2fe1d552016-09-22 17:24:20 -07006773 u8 *kaddr;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006774 struct page *page;
6775 unsigned long i;
6776 size_t offset;
6777 const unsigned int size = pos + len;
6778 int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
Omar Sandoval2fe1d552016-09-22 17:24:20 -07006779 u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006780
6781 eb_bitmap_offset(eb, start, pos, &i, &offset);
6782 page = eb->pages[i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006783 assert_eb_page_uptodate(eb, page);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006784 kaddr = page_address(page);
6785
6786 while (len >= bits_to_clear) {
6787 kaddr[offset] &= ~mask_to_clear;
6788 len -= bits_to_clear;
6789 bits_to_clear = BITS_PER_BYTE;
Dan Carpenter9c894692016-10-12 11:33:21 +03006790 mask_to_clear = ~0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006791 if (++offset >= PAGE_SIZE && len > 0) {
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006792 offset = 0;
6793 page = eb->pages[++i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006794 assert_eb_page_uptodate(eb, page);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006795 kaddr = page_address(page);
6796 }
6797 }
6798 if (len) {
6799 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
6800 kaddr[offset] &= ~mask_to_clear;
6801 }
6802}
6803
Sergei Trofimovich33872062011-04-11 21:52:52 +00006804static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
6805{
6806 unsigned long distance = (src > dst) ? src - dst : dst - src;
6807 return distance < len;
6808}
6809
Chris Masond1310b22008-01-24 16:13:08 -05006810static void copy_pages(struct page *dst_page, struct page *src_page,
6811 unsigned long dst_off, unsigned long src_off,
6812 unsigned long len)
6813{
Chris Masona6591712011-07-19 12:04:14 -04006814 char *dst_kaddr = page_address(dst_page);
Chris Masond1310b22008-01-24 16:13:08 -05006815 char *src_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04006816 int must_memmove = 0;
Chris Masond1310b22008-01-24 16:13:08 -05006817
Sergei Trofimovich33872062011-04-11 21:52:52 +00006818 if (dst_page != src_page) {
Chris Masona6591712011-07-19 12:04:14 -04006819 src_kaddr = page_address(src_page);
Sergei Trofimovich33872062011-04-11 21:52:52 +00006820 } else {
Chris Masond1310b22008-01-24 16:13:08 -05006821 src_kaddr = dst_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04006822 if (areas_overlap(src_off, dst_off, len))
6823 must_memmove = 1;
Sergei Trofimovich33872062011-04-11 21:52:52 +00006824 }
Chris Masond1310b22008-01-24 16:13:08 -05006825
Chris Mason727011e2010-08-06 13:21:20 -04006826 if (must_memmove)
6827 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
6828 else
6829 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
Chris Masond1310b22008-01-24 16:13:08 -05006830}
6831
David Sterba2b489662020-04-29 03:04:10 +02006832void memcpy_extent_buffer(const struct extent_buffer *dst,
6833 unsigned long dst_offset, unsigned long src_offset,
6834 unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05006835{
6836 size_t cur;
6837 size_t dst_off_in_page;
6838 size_t src_off_in_page;
Chris Masond1310b22008-01-24 16:13:08 -05006839 unsigned long dst_i;
6840 unsigned long src_i;
6841
Qu Wenruof98b6212020-08-19 14:35:47 +08006842 if (check_eb_range(dst, dst_offset, len) ||
6843 check_eb_range(dst, src_offset, len))
6844 return;
Chris Masond1310b22008-01-24 16:13:08 -05006845
Chris Masond3977122009-01-05 21:25:51 -05006846 while (len > 0) {
Qu Wenruo884b07d2020-12-02 14:48:04 +08006847 dst_off_in_page = get_eb_offset_in_page(dst, dst_offset);
6848 src_off_in_page = get_eb_offset_in_page(dst, src_offset);
Chris Masond1310b22008-01-24 16:13:08 -05006849
Qu Wenruo884b07d2020-12-02 14:48:04 +08006850 dst_i = get_eb_page_index(dst_offset);
6851 src_i = get_eb_page_index(src_offset);
Chris Masond1310b22008-01-24 16:13:08 -05006852
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006853 cur = min(len, (unsigned long)(PAGE_SIZE -
Chris Masond1310b22008-01-24 16:13:08 -05006854 src_off_in_page));
6855 cur = min_t(unsigned long, cur,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006856 (unsigned long)(PAGE_SIZE - dst_off_in_page));
Chris Masond1310b22008-01-24 16:13:08 -05006857
David Sterbafb85fc92014-07-31 01:03:53 +02006858 copy_pages(dst->pages[dst_i], dst->pages[src_i],
Chris Masond1310b22008-01-24 16:13:08 -05006859 dst_off_in_page, src_off_in_page, cur);
6860
6861 src_offset += cur;
6862 dst_offset += cur;
6863 len -= cur;
6864 }
6865}
Chris Masond1310b22008-01-24 16:13:08 -05006866
David Sterba2b489662020-04-29 03:04:10 +02006867void memmove_extent_buffer(const struct extent_buffer *dst,
6868 unsigned long dst_offset, unsigned long src_offset,
6869 unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05006870{
6871 size_t cur;
6872 size_t dst_off_in_page;
6873 size_t src_off_in_page;
6874 unsigned long dst_end = dst_offset + len - 1;
6875 unsigned long src_end = src_offset + len - 1;
Chris Masond1310b22008-01-24 16:13:08 -05006876 unsigned long dst_i;
6877 unsigned long src_i;
6878
Qu Wenruof98b6212020-08-19 14:35:47 +08006879 if (check_eb_range(dst, dst_offset, len) ||
6880 check_eb_range(dst, src_offset, len))
6881 return;
Chris Mason727011e2010-08-06 13:21:20 -04006882 if (dst_offset < src_offset) {
Chris Masond1310b22008-01-24 16:13:08 -05006883 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
6884 return;
6885 }
Chris Masond3977122009-01-05 21:25:51 -05006886 while (len > 0) {
Qu Wenruo884b07d2020-12-02 14:48:04 +08006887 dst_i = get_eb_page_index(dst_end);
6888 src_i = get_eb_page_index(src_end);
Chris Masond1310b22008-01-24 16:13:08 -05006889
Qu Wenruo884b07d2020-12-02 14:48:04 +08006890 dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
6891 src_off_in_page = get_eb_offset_in_page(dst, src_end);
Chris Masond1310b22008-01-24 16:13:08 -05006892
6893 cur = min_t(unsigned long, len, src_off_in_page + 1);
6894 cur = min(cur, dst_off_in_page + 1);
David Sterbafb85fc92014-07-31 01:03:53 +02006895 copy_pages(dst->pages[dst_i], dst->pages[src_i],
Chris Masond1310b22008-01-24 16:13:08 -05006896 dst_off_in_page - cur + 1,
6897 src_off_in_page - cur + 1, cur);
6898
6899 dst_end -= cur;
6900 src_end -= cur;
6901 len -= cur;
6902 }
6903}
Chris Mason6af118ce2008-07-22 11:18:07 -04006904
Qu Wenruod1e86e32021-01-26 16:33:56 +08006905static struct extent_buffer *get_next_extent_buffer(
6906 struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
6907{
6908 struct extent_buffer *gang[BTRFS_SUBPAGE_BITMAP_SIZE];
6909 struct extent_buffer *found = NULL;
6910 u64 page_start = page_offset(page);
6911 int ret;
6912 int i;
6913
6914 ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
6915 ASSERT(PAGE_SIZE / fs_info->nodesize <= BTRFS_SUBPAGE_BITMAP_SIZE);
6916 lockdep_assert_held(&fs_info->buffer_lock);
6917
6918 ret = radix_tree_gang_lookup(&fs_info->buffer_radix, (void **)gang,
6919 bytenr >> fs_info->sectorsize_bits,
6920 PAGE_SIZE / fs_info->nodesize);
6921 for (i = 0; i < ret; i++) {
6922 /* Already beyond page end */
6923 if (gang[i]->start >= page_start + PAGE_SIZE)
6924 break;
6925 /* Found one */
6926 if (gang[i]->start >= bytenr) {
6927 found = gang[i];
6928 break;
6929 }
6930 }
6931 return found;
6932}
6933
6934static int try_release_subpage_extent_buffer(struct page *page)
6935{
6936 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
6937 u64 cur = page_offset(page);
6938 const u64 end = page_offset(page) + PAGE_SIZE;
6939 int ret;
6940
6941 while (cur < end) {
6942 struct extent_buffer *eb = NULL;
6943
6944 /*
6945 * Unlike try_release_extent_buffer() which uses page->private
6946 * to grab buffer, for subpage case we rely on radix tree, thus
6947 * we need to ensure radix tree consistency.
6948 *
6949 * We also want an atomic snapshot of the radix tree, thus go
6950 * with spinlock rather than RCU.
6951 */
6952 spin_lock(&fs_info->buffer_lock);
6953 eb = get_next_extent_buffer(fs_info, page, cur);
6954 if (!eb) {
6955 /* No more eb in the page range after or at cur */
6956 spin_unlock(&fs_info->buffer_lock);
6957 break;
6958 }
6959 cur = eb->start + eb->len;
6960
6961 /*
6962 * The same as try_release_extent_buffer(), to ensure the eb
6963 * won't disappear out from under us.
6964 */
6965 spin_lock(&eb->refs_lock);
6966 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
6967 spin_unlock(&eb->refs_lock);
6968 spin_unlock(&fs_info->buffer_lock);
6969 break;
6970 }
6971 spin_unlock(&fs_info->buffer_lock);
6972
6973 /*
6974 * If tree ref isn't set then we know the ref on this eb is a
6975 * real ref, so just return, this eb will likely be freed soon
6976 * anyway.
6977 */
6978 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
6979 spin_unlock(&eb->refs_lock);
6980 break;
6981 }
6982
6983 /*
6984 * Here we don't care about the return value, we will always
6985 * check the page private at the end. And
6986 * release_extent_buffer() will release the refs_lock.
6987 */
6988 release_extent_buffer(eb);
6989 }
6990 /*
6991 * Finally to check if we have cleared page private, as if we have
6992 * released all ebs in the page, the page private should be cleared now.
6993 */
6994 spin_lock(&page->mapping->private_lock);
6995 if (!PagePrivate(page))
6996 ret = 1;
6997 else
6998 ret = 0;
6999 spin_unlock(&page->mapping->private_lock);
7000 return ret;
7001
7002}
7003
David Sterbaf7a52a42013-04-26 14:56:29 +00007004int try_release_extent_buffer(struct page *page)
Miao Xie19fe0a82010-10-26 20:57:29 -04007005{
Chris Mason6af118ce2008-07-22 11:18:07 -04007006 struct extent_buffer *eb;
Miao Xie897ca6e92010-10-26 20:57:29 -04007007
Qu Wenruod1e86e32021-01-26 16:33:56 +08007008 if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
7009 return try_release_subpage_extent_buffer(page);
7010
Miao Xie19fe0a82010-10-26 20:57:29 -04007011 /*
Qu Wenruod1e86e32021-01-26 16:33:56 +08007012 * We need to make sure nobody is changing page->private, as we rely on
7013 * page->private as the pointer to extent buffer.
Miao Xie19fe0a82010-10-26 20:57:29 -04007014 */
Josef Bacik3083ee22012-03-09 16:01:49 -05007015 spin_lock(&page->mapping->private_lock);
7016 if (!PagePrivate(page)) {
7017 spin_unlock(&page->mapping->private_lock);
7018 return 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04007019 }
7020
Josef Bacik3083ee22012-03-09 16:01:49 -05007021 eb = (struct extent_buffer *)page->private;
7022 BUG_ON(!eb);
Miao Xie19fe0a82010-10-26 20:57:29 -04007023
Josef Bacik0b32f4b2012-03-13 09:38:00 -04007024 /*
Josef Bacik3083ee22012-03-09 16:01:49 -05007025 * This is a little awful but should be ok, we need to make sure that
7026 * the eb doesn't disappear out from under us while we're looking at
7027 * this page.
7028 */
7029 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04007030 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
Josef Bacik3083ee22012-03-09 16:01:49 -05007031 spin_unlock(&eb->refs_lock);
7032 spin_unlock(&page->mapping->private_lock);
7033 return 0;
7034 }
7035 spin_unlock(&page->mapping->private_lock);
7036
Josef Bacik3083ee22012-03-09 16:01:49 -05007037 /*
7038 * If tree ref isn't set then we know the ref on this eb is a real ref,
7039 * so just return, this page will likely be freed soon anyway.
7040 */
7041 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
7042 spin_unlock(&eb->refs_lock);
7043 return 0;
7044 }
Josef Bacik3083ee22012-03-09 16:01:49 -05007045
David Sterbaf7a52a42013-04-26 14:56:29 +00007046 return release_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04007047}
Josef Bacikbfb484d2020-11-05 10:45:09 -05007048
7049/*
7050 * btrfs_readahead_tree_block - attempt to readahead a child block
7051 * @fs_info: the fs_info
7052 * @bytenr: bytenr to read
Josef Bacik3fbaf252020-11-05 10:45:20 -05007053 * @owner_root: objectid of the root that owns this eb
Josef Bacikbfb484d2020-11-05 10:45:09 -05007054 * @gen: generation for the uptodate check, can be 0
Josef Bacik3fbaf252020-11-05 10:45:20 -05007055 * @level: level for the eb
Josef Bacikbfb484d2020-11-05 10:45:09 -05007056 *
7057 * Attempt to readahead a tree block at @bytenr. If @gen is 0 then we do a
7058 * normal uptodate check of the eb, without checking the generation. If we have
7059 * to read the block we will not block on anything.
7060 */
7061void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
Josef Bacik3fbaf252020-11-05 10:45:20 -05007062 u64 bytenr, u64 owner_root, u64 gen, int level)
Josef Bacikbfb484d2020-11-05 10:45:09 -05007063{
7064 struct extent_buffer *eb;
7065 int ret;
7066
Josef Bacik3fbaf252020-11-05 10:45:20 -05007067 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
Josef Bacikbfb484d2020-11-05 10:45:09 -05007068 if (IS_ERR(eb))
7069 return;
7070
7071 if (btrfs_buffer_uptodate(eb, gen, 1)) {
7072 free_extent_buffer(eb);
7073 return;
7074 }
7075
7076 ret = read_extent_buffer_pages(eb, WAIT_NONE, 0);
7077 if (ret < 0)
7078 free_extent_buffer_stale(eb);
7079 else
7080 free_extent_buffer(eb);
7081}
7082
7083/*
7084 * btrfs_readahead_node_child - readahead a node's child block
7085 * @node: parent node we're reading from
7086 * @slot: slot in the parent node for the child we want to read
7087 *
7088 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
7089 * the slot in the node provided.
7090 */
7091void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
7092{
7093 btrfs_readahead_tree_block(node->fs_info,
7094 btrfs_node_blockptr(node, slot),
Josef Bacik3fbaf252020-11-05 10:45:20 -05007095 btrfs_header_owner(node),
7096 btrfs_node_ptr_generation(node, slot),
7097 btrfs_header_level(node) - 1);
Josef Bacikbfb484d2020-11-05 10:45:09 -05007098}