blob: 9e81d25dea70e8bc6908bddb5cefc164956c2cb2 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
David Sterbac1d7c512018-04-03 19:23:33 +02002
Chris Masond1310b22008-01-24 16:13:08 -05003#include <linux/bitops.h>
4#include <linux/slab.h>
5#include <linux/bio.h>
6#include <linux/mm.h>
Chris Masond1310b22008-01-24 16:13:08 -05007#include <linux/pagemap.h>
8#include <linux/page-flags.h>
Chris Masond1310b22008-01-24 16:13:08 -05009#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
Chris Masond1310b22008-01-24 16:13:08 -050012#include <linux/writeback.h>
13#include <linux/pagevec.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070014#include <linux/prefetch.h>
Dan Magenheimer90a887c2011-05-26 10:01:56 -060015#include <linux/cleancache.h>
Johannes Thumshirncea62802021-03-16 19:04:01 +090016#include "misc.h"
Chris Masond1310b22008-01-24 16:13:08 -050017#include "extent_io.h"
Josef Bacik9c7d3a52019-09-23 10:05:19 -040018#include "extent-io-tree.h"
Chris Masond1310b22008-01-24 16:13:08 -050019#include "extent_map.h"
David Woodhouse902b22f2008-08-20 08:51:49 -040020#include "ctree.h"
21#include "btrfs_inode.h"
Jan Schmidt4a54c8c2011-07-22 15:41:52 +020022#include "volumes.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010023#include "check-integrity.h"
Josef Bacik0b32f4b2012-03-13 09:38:00 -040024#include "locking.h"
Josef Bacik606686e2012-06-04 14:03:51 -040025#include "rcu-string.h"
Liu Bofe09e162013-09-22 12:54:23 +080026#include "backref.h"
David Sterba6af49db2017-06-23 04:09:57 +020027#include "disk-io.h"
Qu Wenruo760f9912021-01-26 16:33:48 +080028#include "subpage.h"
Naohiro Aotad35751562021-02-04 19:21:54 +090029#include "zoned.h"
Naohiro Aota0bc09ca2021-02-04 19:22:08 +090030#include "block-group.h"
Chris Masond1310b22008-01-24 16:13:08 -050031
Chris Masond1310b22008-01-24 16:13:08 -050032static struct kmem_cache *extent_state_cache;
33static struct kmem_cache *extent_buffer_cache;
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -040034static struct bio_set btrfs_bioset;
Chris Masond1310b22008-01-24 16:13:08 -050035
Filipe Manana27a35072014-07-06 20:09:59 +010036static inline bool extent_state_in_tree(const struct extent_state *state)
37{
38 return !RB_EMPTY_NODE(&state->rb_node);
39}
40
Eric Sandeen6d49ba12013-04-22 16:12:31 +000041#ifdef CONFIG_BTRFS_DEBUG
Chris Masond1310b22008-01-24 16:13:08 -050042static LIST_HEAD(states);
Chris Masond3977122009-01-05 21:25:51 -050043static DEFINE_SPINLOCK(leak_lock);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000044
Josef Bacik3fd63722020-02-14 16:11:40 -050045static inline void btrfs_leak_debug_add(spinlock_t *lock,
46 struct list_head *new,
47 struct list_head *head)
Eric Sandeen6d49ba12013-04-22 16:12:31 +000048{
49 unsigned long flags;
50
Josef Bacik3fd63722020-02-14 16:11:40 -050051 spin_lock_irqsave(lock, flags);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000052 list_add(new, head);
Josef Bacik3fd63722020-02-14 16:11:40 -050053 spin_unlock_irqrestore(lock, flags);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000054}
55
Josef Bacik3fd63722020-02-14 16:11:40 -050056static inline void btrfs_leak_debug_del(spinlock_t *lock,
57 struct list_head *entry)
Eric Sandeen6d49ba12013-04-22 16:12:31 +000058{
59 unsigned long flags;
60
Josef Bacik3fd63722020-02-14 16:11:40 -050061 spin_lock_irqsave(lock, flags);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000062 list_del(entry);
Josef Bacik3fd63722020-02-14 16:11:40 -050063 spin_unlock_irqrestore(lock, flags);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000064}
65
Josef Bacik3fd63722020-02-14 16:11:40 -050066void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
Josef Bacik33ca8322019-09-23 10:05:17 -040067{
68 struct extent_buffer *eb;
Josef Bacik3fd63722020-02-14 16:11:40 -050069 unsigned long flags;
Josef Bacik33ca8322019-09-23 10:05:17 -040070
Josef Bacik8c389382020-02-14 16:11:42 -050071 /*
72 * If we didn't get into open_ctree our allocated_ebs will not be
73 * initialized, so just skip this.
74 */
75 if (!fs_info->allocated_ebs.next)
76 return;
77
Josef Bacik3fd63722020-02-14 16:11:40 -050078 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
79 while (!list_empty(&fs_info->allocated_ebs)) {
80 eb = list_first_entry(&fs_info->allocated_ebs,
81 struct extent_buffer, leak_list);
Josef Bacik8c389382020-02-14 16:11:42 -050082 pr_err(
83 "BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
84 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
85 btrfs_header_owner(eb));
Josef Bacik33ca8322019-09-23 10:05:17 -040086 list_del(&eb->leak_list);
87 kmem_cache_free(extent_buffer_cache, eb);
88 }
Josef Bacik3fd63722020-02-14 16:11:40 -050089 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
Josef Bacik33ca8322019-09-23 10:05:17 -040090}
91
92static inline void btrfs_extent_state_leak_debug_check(void)
Eric Sandeen6d49ba12013-04-22 16:12:31 +000093{
94 struct extent_state *state;
Eric Sandeen6d49ba12013-04-22 16:12:31 +000095
96 while (!list_empty(&states)) {
97 state = list_entry(states.next, struct extent_state, leak_list);
David Sterba9ee49a042015-01-14 19:52:13 +010098 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
Filipe Manana27a35072014-07-06 20:09:59 +010099 state->start, state->end, state->state,
100 extent_state_in_tree(state),
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200101 refcount_read(&state->refs));
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000102 list_del(&state->leak_list);
103 kmem_cache_free(extent_state_cache, state);
104 }
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000105}
David Sterba8d599ae2013-04-30 15:22:23 +0000106
Josef Bacika5dee372013-12-13 10:02:44 -0500107#define btrfs_debug_check_extent_io_range(tree, start, end) \
108 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
David Sterba8d599ae2013-04-30 15:22:23 +0000109static inline void __btrfs_debug_check_extent_io_range(const char *caller,
Josef Bacika5dee372013-12-13 10:02:44 -0500110 struct extent_io_tree *tree, u64 start, u64 end)
David Sterba8d599ae2013-04-30 15:22:23 +0000111{
Nikolay Borisov65a680f2018-11-01 14:09:49 +0200112 struct inode *inode = tree->private_data;
113 u64 isize;
114
115 if (!inode || !is_data_inode(inode))
116 return;
117
118 isize = i_size_read(inode);
119 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
120 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
121 "%s: ino %llu isize %llu odd range [%llu,%llu]",
122 caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
123 }
David Sterba8d599ae2013-04-30 15:22:23 +0000124}
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000125#else
Josef Bacik3fd63722020-02-14 16:11:40 -0500126#define btrfs_leak_debug_add(lock, new, head) do {} while (0)
127#define btrfs_leak_debug_del(lock, entry) do {} while (0)
Josef Bacik33ca8322019-09-23 10:05:17 -0400128#define btrfs_extent_state_leak_debug_check() do {} while (0)
David Sterba8d599ae2013-04-30 15:22:23 +0000129#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
Chris Mason4bef0842008-09-08 11:18:08 -0400130#endif
Chris Masond1310b22008-01-24 16:13:08 -0500131
Chris Masond1310b22008-01-24 16:13:08 -0500132struct tree_entry {
133 u64 start;
134 u64 end;
Chris Masond1310b22008-01-24 16:13:08 -0500135 struct rb_node rb_node;
136};
137
138struct extent_page_data {
Qu Wenruo390ed292021-04-14 16:42:15 +0800139 struct btrfs_bio_ctrl bio_ctrl;
Chris Mason771ed682008-11-06 22:02:51 -0500140 /* tells writepage not to lock the state bits for this range
141 * it still does the unlocking
142 */
Chris Masonffbd5172009-04-20 15:50:09 -0400143 unsigned int extent_locked:1;
144
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600145 /* tells the submit_bio code to use REQ_SYNC */
Chris Masonffbd5172009-04-20 15:50:09 -0400146 unsigned int sync_io:1;
Chris Masond1310b22008-01-24 16:13:08 -0500147};
148
Qu Wenruof97e27e2020-11-13 20:51:40 +0800149static int add_extent_changeset(struct extent_state *state, u32 bits,
Qu Wenruod38ed272015-10-12 14:53:37 +0800150 struct extent_changeset *changeset,
151 int set)
152{
153 int ret;
154
155 if (!changeset)
David Sterba57599c72018-03-01 17:56:34 +0100156 return 0;
Qu Wenruod38ed272015-10-12 14:53:37 +0800157 if (set && (state->state & bits) == bits)
David Sterba57599c72018-03-01 17:56:34 +0100158 return 0;
Qu Wenruofefdc552015-10-12 15:35:38 +0800159 if (!set && (state->state & bits) == 0)
David Sterba57599c72018-03-01 17:56:34 +0100160 return 0;
Qu Wenruod38ed272015-10-12 14:53:37 +0800161 changeset->bytes_changed += state->end - state->start + 1;
David Sterba53d32352017-02-13 13:42:29 +0100162 ret = ulist_add(&changeset->range_changed, state->start, state->end,
Qu Wenruod38ed272015-10-12 14:53:37 +0800163 GFP_ATOMIC);
David Sterba57599c72018-03-01 17:56:34 +0100164 return ret;
Qu Wenruod38ed272015-10-12 14:53:37 +0800165}
166
Nikolay Borisovc1be9c12020-09-14 12:37:08 +0300167int __must_check submit_one_bio(struct bio *bio, int mirror_num,
168 unsigned long bio_flags)
Qu Wenruobb58eb92019-01-25 13:09:15 +0800169{
170 blk_status_t ret = 0;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800171 struct extent_io_tree *tree = bio->bi_private;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800172
173 bio->bi_private = NULL;
174
Nikolay Borisov908930f2020-09-18 16:34:37 +0300175 if (is_data_inode(tree->private_data))
176 ret = btrfs_submit_data_bio(tree->private_data, bio, mirror_num,
177 bio_flags);
178 else
Nikolay Borisov1b362942020-09-18 16:34:38 +0300179 ret = btrfs_submit_metadata_bio(tree->private_data, bio,
180 mirror_num, bio_flags);
Qu Wenruobb58eb92019-01-25 13:09:15 +0800181
182 return blk_status_to_errno(ret);
183}
184
Qu Wenruo30659762019-03-20 14:27:42 +0800185/* Cleanup unsubmitted bios */
186static void end_write_bio(struct extent_page_data *epd, int ret)
187{
Qu Wenruo390ed292021-04-14 16:42:15 +0800188 struct bio *bio = epd->bio_ctrl.bio;
189
190 if (bio) {
191 bio->bi_status = errno_to_blk_status(ret);
192 bio_endio(bio);
193 epd->bio_ctrl.bio = NULL;
Qu Wenruo30659762019-03-20 14:27:42 +0800194 }
195}
196
Qu Wenruof4340622019-03-20 14:27:41 +0800197/*
198 * Submit bio from extent page data via submit_one_bio
199 *
200 * Return 0 if everything is OK.
201 * Return <0 for error.
202 */
203static int __must_check flush_write_bio(struct extent_page_data *epd)
Qu Wenruobb58eb92019-01-25 13:09:15 +0800204{
Qu Wenruof4340622019-03-20 14:27:41 +0800205 int ret = 0;
Qu Wenruo390ed292021-04-14 16:42:15 +0800206 struct bio *bio = epd->bio_ctrl.bio;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800207
Qu Wenruo390ed292021-04-14 16:42:15 +0800208 if (bio) {
209 ret = submit_one_bio(bio, 0, 0);
Qu Wenruof4340622019-03-20 14:27:41 +0800210 /*
211 * Clean up of epd->bio is handled by its endio function.
212 * And endio is either triggered by successful bio execution
213 * or the error handler of submit bio hook.
214 * So at this point, no matter what happened, we don't need
215 * to clean up epd->bio.
216 */
Qu Wenruo390ed292021-04-14 16:42:15 +0800217 epd->bio_ctrl.bio = NULL;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800218 }
Qu Wenruof4340622019-03-20 14:27:41 +0800219 return ret;
Qu Wenruobb58eb92019-01-25 13:09:15 +0800220}
David Sterbae2932ee2017-06-23 04:16:17 +0200221
Josef Bacik6f0d04f2019-09-23 10:05:18 -0400222int __init extent_state_cache_init(void)
Chris Masond1310b22008-01-24 16:13:08 -0500223{
David Sterba837e1972012-09-07 03:00:48 -0600224 extent_state_cache = kmem_cache_create("btrfs_extent_state",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +0200225 sizeof(struct extent_state), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300226 SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500227 if (!extent_state_cache)
228 return -ENOMEM;
Josef Bacik6f0d04f2019-09-23 10:05:18 -0400229 return 0;
230}
Chris Masond1310b22008-01-24 16:13:08 -0500231
Josef Bacik6f0d04f2019-09-23 10:05:18 -0400232int __init extent_io_init(void)
233{
David Sterba837e1972012-09-07 03:00:48 -0600234 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +0200235 sizeof(struct extent_buffer), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300236 SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500237 if (!extent_buffer_cache)
Josef Bacik6f0d04f2019-09-23 10:05:18 -0400238 return -ENOMEM;
Chris Mason9be33952013-05-17 18:30:14 -0400239
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400240 if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
241 offsetof(struct btrfs_io_bio, bio),
242 BIOSET_NEED_BVECS))
Chris Mason9be33952013-05-17 18:30:14 -0400243 goto free_buffer_cache;
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700244
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400245 if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700246 goto free_bioset;
247
Chris Masond1310b22008-01-24 16:13:08 -0500248 return 0;
249
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700250free_bioset:
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400251 bioset_exit(&btrfs_bioset);
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700252
Chris Mason9be33952013-05-17 18:30:14 -0400253free_buffer_cache:
254 kmem_cache_destroy(extent_buffer_cache);
255 extent_buffer_cache = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500256 return -ENOMEM;
257}
258
Josef Bacik6f0d04f2019-09-23 10:05:18 -0400259void __cold extent_state_cache_exit(void)
260{
261 btrfs_extent_state_leak_debug_check();
262 kmem_cache_destroy(extent_state_cache);
263}
264
David Sterbae67c7182018-02-19 17:24:18 +0100265void __cold extent_io_exit(void)
Chris Masond1310b22008-01-24 16:13:08 -0500266{
Kirill A. Shutemov8c0a8532012-09-26 11:33:07 +1000267 /*
268 * Make sure all delayed rcu free are flushed before we
269 * destroy caches.
270 */
271 rcu_barrier();
Kinglong Mee5598e902016-01-29 21:36:35 +0800272 kmem_cache_destroy(extent_buffer_cache);
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -0400273 bioset_exit(&btrfs_bioset);
Chris Masond1310b22008-01-24 16:13:08 -0500274}
275
Josef Bacik41a2ee72020-01-17 09:02:21 -0500276/*
277 * For the file_extent_tree, we want to hold the inode lock when we lookup and
278 * update the disk_i_size, but lockdep will complain because our io_tree we hold
279 * the tree lock and get the inode lock when setting delalloc. These two things
280 * are unrelated, so make a class for the file_extent_tree so we don't get the
281 * two locking patterns mixed up.
282 */
283static struct lock_class_key file_extent_tree_class;
284
Qu Wenruoc258d6e2019-03-01 10:47:58 +0800285void extent_io_tree_init(struct btrfs_fs_info *fs_info,
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800286 struct extent_io_tree *tree, unsigned int owner,
287 void *private_data)
Chris Masond1310b22008-01-24 16:13:08 -0500288{
Qu Wenruoc258d6e2019-03-01 10:47:58 +0800289 tree->fs_info = fs_info;
Eric Paris6bef4d32010-02-23 19:43:04 +0000290 tree->state = RB_ROOT;
Chris Masond1310b22008-01-24 16:13:08 -0500291 tree->dirty_bytes = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500292 spin_lock_init(&tree->lock);
Josef Bacikc6100a42017-05-05 11:57:13 -0400293 tree->private_data = private_data;
Qu Wenruo43eb5f22019-03-01 10:47:59 +0800294 tree->owner = owner;
Josef Bacik41a2ee72020-01-17 09:02:21 -0500295 if (owner == IO_TREE_INODE_FILE_EXTENT)
296 lockdep_set_class(&tree->lock, &file_extent_tree_class);
Chris Masond1310b22008-01-24 16:13:08 -0500297}
Chris Masond1310b22008-01-24 16:13:08 -0500298
Nikolay Borisov41e7acd2019-03-25 14:31:24 +0200299void extent_io_tree_release(struct extent_io_tree *tree)
300{
301 spin_lock(&tree->lock);
302 /*
303 * Do a single barrier for the waitqueue_active check here, the state
304 * of the waitqueue should not change once extent_io_tree_release is
305 * called.
306 */
307 smp_mb();
308 while (!RB_EMPTY_ROOT(&tree->state)) {
309 struct rb_node *node;
310 struct extent_state *state;
311
312 node = rb_first(&tree->state);
313 state = rb_entry(node, struct extent_state, rb_node);
314 rb_erase(&state->rb_node, &tree->state);
315 RB_CLEAR_NODE(&state->rb_node);
316 /*
317 * btree io trees aren't supposed to have tasks waiting for
318 * changes in the flags of extent states ever.
319 */
320 ASSERT(!waitqueue_active(&state->wq));
321 free_extent_state(state);
322
323 cond_resched_lock(&tree->lock);
324 }
325 spin_unlock(&tree->lock);
326}
327
Christoph Hellwigb2950862008-12-02 09:54:17 -0500328static struct extent_state *alloc_extent_state(gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500329{
330 struct extent_state *state;
Chris Masond1310b22008-01-24 16:13:08 -0500331
Michal Hocko3ba7ab22017-01-09 15:39:02 +0100332 /*
333 * The given mask might be not appropriate for the slab allocator,
334 * drop the unsupported bits
335 */
336 mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
Chris Masond1310b22008-01-24 16:13:08 -0500337 state = kmem_cache_alloc(extent_state_cache, mask);
Peter2b114d12008-04-01 11:21:40 -0400338 if (!state)
Chris Masond1310b22008-01-24 16:13:08 -0500339 return state;
340 state->state = 0;
David Sterba47dc1962016-02-11 13:24:13 +0100341 state->failrec = NULL;
Filipe Manana27a35072014-07-06 20:09:59 +0100342 RB_CLEAR_NODE(&state->rb_node);
Josef Bacik3fd63722020-02-14 16:11:40 -0500343 btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states);
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200344 refcount_set(&state->refs, 1);
Chris Masond1310b22008-01-24 16:13:08 -0500345 init_waitqueue_head(&state->wq);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100346 trace_alloc_extent_state(state, mask, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500347 return state;
348}
Chris Masond1310b22008-01-24 16:13:08 -0500349
Chris Mason4845e442010-05-25 20:56:50 -0400350void free_extent_state(struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500351{
Chris Masond1310b22008-01-24 16:13:08 -0500352 if (!state)
353 return;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200354 if (refcount_dec_and_test(&state->refs)) {
Filipe Manana27a35072014-07-06 20:09:59 +0100355 WARN_ON(extent_state_in_tree(state));
Josef Bacik3fd63722020-02-14 16:11:40 -0500356 btrfs_leak_debug_del(&leak_lock, &state->leak_list);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100357 trace_free_extent_state(state, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500358 kmem_cache_free(extent_state_cache, state);
359 }
360}
Chris Masond1310b22008-01-24 16:13:08 -0500361
Filipe Mananaf2071b22014-02-12 15:05:53 +0000362static struct rb_node *tree_insert(struct rb_root *root,
363 struct rb_node *search_start,
364 u64 offset,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000365 struct rb_node *node,
366 struct rb_node ***p_in,
367 struct rb_node **parent_in)
Chris Masond1310b22008-01-24 16:13:08 -0500368{
Filipe Mananaf2071b22014-02-12 15:05:53 +0000369 struct rb_node **p;
Chris Masond3977122009-01-05 21:25:51 -0500370 struct rb_node *parent = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500371 struct tree_entry *entry;
372
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000373 if (p_in && parent_in) {
374 p = *p_in;
375 parent = *parent_in;
376 goto do_insert;
377 }
378
Filipe Mananaf2071b22014-02-12 15:05:53 +0000379 p = search_start ? &search_start : &root->rb_node;
Chris Masond3977122009-01-05 21:25:51 -0500380 while (*p) {
Chris Masond1310b22008-01-24 16:13:08 -0500381 parent = *p;
382 entry = rb_entry(parent, struct tree_entry, rb_node);
383
384 if (offset < entry->start)
385 p = &(*p)->rb_left;
386 else if (offset > entry->end)
387 p = &(*p)->rb_right;
388 else
389 return parent;
390 }
391
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000392do_insert:
Chris Masond1310b22008-01-24 16:13:08 -0500393 rb_link_node(node, parent, p);
394 rb_insert_color(node, root);
395 return NULL;
396}
397
Nikolay Borisov8666e632019-06-05 14:50:04 +0300398/**
Nikolay Borisov3bed2da2021-01-22 11:58:03 +0200399 * Search @tree for an entry that contains @offset. Such entry would have
400 * entry->start <= offset && entry->end >= offset.
Nikolay Borisov8666e632019-06-05 14:50:04 +0300401 *
Nikolay Borisov3bed2da2021-01-22 11:58:03 +0200402 * @tree: the tree to search
403 * @offset: offset that should fall within an entry in @tree
404 * @next_ret: pointer to the first entry whose range ends after @offset
405 * @prev_ret: pointer to the first entry whose range begins before @offset
406 * @p_ret: pointer where new node should be anchored (used when inserting an
407 * entry in the tree)
408 * @parent_ret: points to entry which would have been the parent of the entry,
Nikolay Borisov8666e632019-06-05 14:50:04 +0300409 * containing @offset
410 *
411 * This function returns a pointer to the entry that contains @offset byte
412 * address. If no such entry exists, then NULL is returned and the other
413 * pointer arguments to the function are filled, otherwise the found entry is
414 * returned and other pointers are left untouched.
415 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500416static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000417 struct rb_node **next_ret,
Nikolay Borisov352646c2019-01-30 16:51:00 +0200418 struct rb_node **prev_ret,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000419 struct rb_node ***p_ret,
420 struct rb_node **parent_ret)
Chris Masond1310b22008-01-24 16:13:08 -0500421{
Chris Mason80ea96b2008-02-01 14:51:59 -0500422 struct rb_root *root = &tree->state;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000423 struct rb_node **n = &root->rb_node;
Chris Masond1310b22008-01-24 16:13:08 -0500424 struct rb_node *prev = NULL;
425 struct rb_node *orig_prev = NULL;
426 struct tree_entry *entry;
427 struct tree_entry *prev_entry = NULL;
428
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000429 while (*n) {
430 prev = *n;
431 entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500432 prev_entry = entry;
433
434 if (offset < entry->start)
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000435 n = &(*n)->rb_left;
Chris Masond1310b22008-01-24 16:13:08 -0500436 else if (offset > entry->end)
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000437 n = &(*n)->rb_right;
Chris Masond3977122009-01-05 21:25:51 -0500438 else
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000439 return *n;
Chris Masond1310b22008-01-24 16:13:08 -0500440 }
441
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000442 if (p_ret)
443 *p_ret = n;
444 if (parent_ret)
445 *parent_ret = prev;
446
Nikolay Borisov352646c2019-01-30 16:51:00 +0200447 if (next_ret) {
Chris Masond1310b22008-01-24 16:13:08 -0500448 orig_prev = prev;
Chris Masond3977122009-01-05 21:25:51 -0500449 while (prev && offset > prev_entry->end) {
Chris Masond1310b22008-01-24 16:13:08 -0500450 prev = rb_next(prev);
451 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
452 }
Nikolay Borisov352646c2019-01-30 16:51:00 +0200453 *next_ret = prev;
Chris Masond1310b22008-01-24 16:13:08 -0500454 prev = orig_prev;
455 }
456
Nikolay Borisov352646c2019-01-30 16:51:00 +0200457 if (prev_ret) {
Chris Masond1310b22008-01-24 16:13:08 -0500458 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500459 while (prev && offset < prev_entry->start) {
Chris Masond1310b22008-01-24 16:13:08 -0500460 prev = rb_prev(prev);
461 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
462 }
Nikolay Borisov352646c2019-01-30 16:51:00 +0200463 *prev_ret = prev;
Chris Masond1310b22008-01-24 16:13:08 -0500464 }
465 return NULL;
466}
467
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000468static inline struct rb_node *
469tree_search_for_insert(struct extent_io_tree *tree,
470 u64 offset,
471 struct rb_node ***p_ret,
472 struct rb_node **parent_ret)
Chris Masond1310b22008-01-24 16:13:08 -0500473{
Nikolay Borisov352646c2019-01-30 16:51:00 +0200474 struct rb_node *next= NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500475 struct rb_node *ret;
Chris Mason70dec802008-01-29 09:59:12 -0500476
Nikolay Borisov352646c2019-01-30 16:51:00 +0200477 ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
Chris Masond3977122009-01-05 21:25:51 -0500478 if (!ret)
Nikolay Borisov352646c2019-01-30 16:51:00 +0200479 return next;
Chris Masond1310b22008-01-24 16:13:08 -0500480 return ret;
481}
482
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000483static inline struct rb_node *tree_search(struct extent_io_tree *tree,
484 u64 offset)
485{
486 return tree_search_for_insert(tree, offset, NULL, NULL);
487}
488
Chris Masond1310b22008-01-24 16:13:08 -0500489/*
490 * utility function to look for merge candidates inside a given range.
491 * Any extents with matching state are merged together into a single
492 * extent in the tree. Extents with EXTENT_IO in their state field
493 * are not merged because the end_io handlers need to be able to do
494 * operations on them without sleeping (or doing allocations/splits).
495 *
496 * This should be called with the tree lock held.
497 */
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000498static void merge_state(struct extent_io_tree *tree,
499 struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500500{
501 struct extent_state *other;
502 struct rb_node *other_node;
503
Nikolay Borisov88826792019-03-14 15:28:31 +0200504 if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000505 return;
Chris Masond1310b22008-01-24 16:13:08 -0500506
507 other_node = rb_prev(&state->rb_node);
508 if (other_node) {
509 other = rb_entry(other_node, struct extent_state, rb_node);
510 if (other->end == state->start - 1 &&
511 other->state == state->state) {
Nikolay Borisov5c848192018-11-01 14:09:52 +0200512 if (tree->private_data &&
513 is_data_inode(tree->private_data))
514 btrfs_merge_delalloc_extent(tree->private_data,
515 state, other);
Chris Masond1310b22008-01-24 16:13:08 -0500516 state->start = other->start;
Chris Masond1310b22008-01-24 16:13:08 -0500517 rb_erase(&other->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100518 RB_CLEAR_NODE(&other->rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500519 free_extent_state(other);
520 }
521 }
522 other_node = rb_next(&state->rb_node);
523 if (other_node) {
524 other = rb_entry(other_node, struct extent_state, rb_node);
525 if (other->start == state->end + 1 &&
526 other->state == state->state) {
Nikolay Borisov5c848192018-11-01 14:09:52 +0200527 if (tree->private_data &&
528 is_data_inode(tree->private_data))
529 btrfs_merge_delalloc_extent(tree->private_data,
530 state, other);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400531 state->end = other->end;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400532 rb_erase(&other->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100533 RB_CLEAR_NODE(&other->rb_node);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400534 free_extent_state(other);
Chris Masond1310b22008-01-24 16:13:08 -0500535 }
536 }
Chris Masond1310b22008-01-24 16:13:08 -0500537}
538
Xiao Guangrong3150b692011-07-14 03:19:08 +0000539static void set_state_bits(struct extent_io_tree *tree,
Qu Wenruof97e27e2020-11-13 20:51:40 +0800540 struct extent_state *state, u32 *bits,
Qu Wenruod38ed272015-10-12 14:53:37 +0800541 struct extent_changeset *changeset);
Xiao Guangrong3150b692011-07-14 03:19:08 +0000542
Chris Masond1310b22008-01-24 16:13:08 -0500543/*
544 * insert an extent_state struct into the tree. 'bits' are set on the
545 * struct before it is inserted.
546 *
547 * This may return -EEXIST if the extent is already there, in which case the
548 * state struct is freed.
549 *
550 * The tree lock is not taken internally. This is a utility function and
551 * probably isn't what you want to call (see set/clear_extent_bit).
552 */
553static int insert_state(struct extent_io_tree *tree,
554 struct extent_state *state, u64 start, u64 end,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000555 struct rb_node ***p,
556 struct rb_node **parent,
Qu Wenruof97e27e2020-11-13 20:51:40 +0800557 u32 *bits, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500558{
559 struct rb_node *node;
560
David Sterba27922372019-06-18 20:00:05 +0200561 if (end < start) {
562 btrfs_err(tree->fs_info,
563 "insert state: end < start %llu %llu", end, start);
564 WARN_ON(1);
565 }
Chris Masond1310b22008-01-24 16:13:08 -0500566 state->start = start;
567 state->end = end;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400568
Qu Wenruod38ed272015-10-12 14:53:37 +0800569 set_state_bits(tree, state, bits, changeset);
Xiao Guangrong3150b692011-07-14 03:19:08 +0000570
Filipe Mananaf2071b22014-02-12 15:05:53 +0000571 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
Chris Masond1310b22008-01-24 16:13:08 -0500572 if (node) {
573 struct extent_state *found;
574 found = rb_entry(node, struct extent_state, rb_node);
David Sterba27922372019-06-18 20:00:05 +0200575 btrfs_err(tree->fs_info,
576 "found node %llu %llu on insert of %llu %llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200577 found->start, found->end, start, end);
Chris Masond1310b22008-01-24 16:13:08 -0500578 return -EEXIST;
579 }
580 merge_state(tree, state);
581 return 0;
582}
583
584/*
585 * split a given extent state struct in two, inserting the preallocated
586 * struct 'prealloc' as the newly created second half. 'split' indicates an
587 * offset inside 'orig' where it should be split.
588 *
589 * Before calling,
590 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
591 * are two extent state structs in the tree:
592 * prealloc: [orig->start, split - 1]
593 * orig: [ split, orig->end ]
594 *
595 * The tree locks are not taken by this function. They need to be held
596 * by the caller.
597 */
598static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
599 struct extent_state *prealloc, u64 split)
600{
601 struct rb_node *node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400602
Nikolay Borisovabbb55f2018-11-01 14:09:53 +0200603 if (tree->private_data && is_data_inode(tree->private_data))
604 btrfs_split_delalloc_extent(tree->private_data, orig, split);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400605
Chris Masond1310b22008-01-24 16:13:08 -0500606 prealloc->start = orig->start;
607 prealloc->end = split - 1;
608 prealloc->state = orig->state;
609 orig->start = split;
610
Filipe Mananaf2071b22014-02-12 15:05:53 +0000611 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
612 &prealloc->rb_node, NULL, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500613 if (node) {
Chris Masond1310b22008-01-24 16:13:08 -0500614 free_extent_state(prealloc);
615 return -EEXIST;
616 }
617 return 0;
618}
619
Li Zefancdc6a392012-03-12 16:39:48 +0800620static struct extent_state *next_state(struct extent_state *state)
621{
622 struct rb_node *next = rb_next(&state->rb_node);
623 if (next)
624 return rb_entry(next, struct extent_state, rb_node);
625 else
626 return NULL;
627}
628
Chris Masond1310b22008-01-24 16:13:08 -0500629/*
630 * utility function to clear some bits in an extent state struct.
Andrea Gelmini52042d82018-11-28 12:05:13 +0100631 * it will optionally wake up anyone waiting on this state (wake == 1).
Chris Masond1310b22008-01-24 16:13:08 -0500632 *
633 * If no bits are set on the state struct after clearing things, the
634 * struct is freed and removed from the tree
635 */
Li Zefancdc6a392012-03-12 16:39:48 +0800636static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
637 struct extent_state *state,
Qu Wenruof97e27e2020-11-13 20:51:40 +0800638 u32 *bits, int wake,
Qu Wenruofefdc552015-10-12 15:35:38 +0800639 struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500640{
Li Zefancdc6a392012-03-12 16:39:48 +0800641 struct extent_state *next;
Qu Wenruof97e27e2020-11-13 20:51:40 +0800642 u32 bits_to_clear = *bits & ~EXTENT_CTLBITS;
David Sterba57599c72018-03-01 17:56:34 +0100643 int ret;
Chris Masond1310b22008-01-24 16:13:08 -0500644
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400645 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500646 u64 range = state->end - state->start + 1;
647 WARN_ON(range > tree->dirty_bytes);
648 tree->dirty_bytes -= range;
649 }
Nikolay Borisova36bb5f2018-11-01 14:09:51 +0200650
651 if (tree->private_data && is_data_inode(tree->private_data))
652 btrfs_clear_delalloc_extent(tree->private_data, state, bits);
653
David Sterba57599c72018-03-01 17:56:34 +0100654 ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
655 BUG_ON(ret < 0);
Josef Bacik32c00af2009-10-08 13:34:05 -0400656 state->state &= ~bits_to_clear;
Chris Masond1310b22008-01-24 16:13:08 -0500657 if (wake)
658 wake_up(&state->wq);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400659 if (state->state == 0) {
Li Zefancdc6a392012-03-12 16:39:48 +0800660 next = next_state(state);
Filipe Manana27a35072014-07-06 20:09:59 +0100661 if (extent_state_in_tree(state)) {
Chris Masond1310b22008-01-24 16:13:08 -0500662 rb_erase(&state->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100663 RB_CLEAR_NODE(&state->rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500664 free_extent_state(state);
665 } else {
666 WARN_ON(1);
667 }
668 } else {
669 merge_state(tree, state);
Li Zefancdc6a392012-03-12 16:39:48 +0800670 next = next_state(state);
Chris Masond1310b22008-01-24 16:13:08 -0500671 }
Li Zefancdc6a392012-03-12 16:39:48 +0800672 return next;
Chris Masond1310b22008-01-24 16:13:08 -0500673}
674
Xiao Guangrong82337672011-04-20 06:44:57 +0000675static struct extent_state *
676alloc_extent_state_atomic(struct extent_state *prealloc)
677{
678 if (!prealloc)
679 prealloc = alloc_extent_state(GFP_ATOMIC);
680
681 return prealloc;
682}
683
Eric Sandeen48a3b632013-04-25 20:41:01 +0000684static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400685{
Su Yue29b665c2021-01-03 17:28:03 +0800686 btrfs_panic(tree->fs_info, err,
David Sterba05912a32018-07-18 19:23:45 +0200687 "locking error: extent tree was modified by another thread while locked");
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400688}
689
Chris Masond1310b22008-01-24 16:13:08 -0500690/*
691 * clear some bits on a range in the tree. This may require splitting
692 * or inserting elements in the tree, so the gfp mask is used to
693 * indicate which allocations or sleeping are allowed.
694 *
695 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
696 * the given range from the tree regardless of state (ie for truncate).
697 *
698 * the range [start, end] is inclusive.
699 *
Jeff Mahoney6763af82012-03-01 14:56:29 +0100700 * This takes the tree lock, and returns 0 on success and < 0 on error.
Chris Masond1310b22008-01-24 16:13:08 -0500701 */
David Sterba66b0c882017-10-31 16:30:47 +0100702int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +0800703 u32 bits, int wake, int delete,
704 struct extent_state **cached_state,
705 gfp_t mask, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500706{
707 struct extent_state *state;
Chris Mason2c64c532009-09-02 15:04:12 -0400708 struct extent_state *cached;
Chris Masond1310b22008-01-24 16:13:08 -0500709 struct extent_state *prealloc = NULL;
710 struct rb_node *node;
Yan Zheng5c939df2009-05-27 09:16:03 -0400711 u64 last_end;
Chris Masond1310b22008-01-24 16:13:08 -0500712 int err;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000713 int clear = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500714
Josef Bacika5dee372013-12-13 10:02:44 -0500715 btrfs_debug_check_extent_io_range(tree, start, end);
Qu Wenruoa1d19842019-03-01 10:48:00 +0800716 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
David Sterba8d599ae2013-04-30 15:22:23 +0000717
Josef Bacik7ee9e442013-06-21 16:37:03 -0400718 if (bits & EXTENT_DELALLOC)
719 bits |= EXTENT_NORESERVE;
720
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400721 if (delete)
722 bits |= ~EXTENT_CTLBITS;
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400723
Nikolay Borisov88826792019-03-14 15:28:31 +0200724 if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
Josef Bacik2ac55d42010-02-03 19:33:23 +0000725 clear = 1;
Chris Masond1310b22008-01-24 16:13:08 -0500726again:
Mel Gormand0164ad2015-11-06 16:28:21 -0800727 if (!prealloc && gfpflags_allow_blocking(mask)) {
Filipe Mananac7bc6312014-11-03 14:12:57 +0000728 /*
729 * Don't care for allocation failure here because we might end
730 * up not needing the pre-allocated extent state at all, which
731 * is the case if we only have in the tree extent states that
732 * cover our input range and don't cover too any other range.
733 * If we end up needing a new extent state we allocate it later.
734 */
Chris Masond1310b22008-01-24 16:13:08 -0500735 prealloc = alloc_extent_state(mask);
Chris Masond1310b22008-01-24 16:13:08 -0500736 }
737
Chris Masoncad321a2008-12-17 14:51:42 -0500738 spin_lock(&tree->lock);
Chris Mason2c64c532009-09-02 15:04:12 -0400739 if (cached_state) {
740 cached = *cached_state;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000741
742 if (clear) {
743 *cached_state = NULL;
744 cached_state = NULL;
745 }
746
Filipe Manana27a35072014-07-06 20:09:59 +0100747 if (cached && extent_state_in_tree(cached) &&
748 cached->start <= start && cached->end > start) {
Josef Bacik2ac55d42010-02-03 19:33:23 +0000749 if (clear)
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200750 refcount_dec(&cached->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400751 state = cached;
Chris Mason42daec22009-09-23 19:51:09 -0400752 goto hit_next;
Chris Mason2c64c532009-09-02 15:04:12 -0400753 }
Josef Bacik2ac55d42010-02-03 19:33:23 +0000754 if (clear)
755 free_extent_state(cached);
Chris Mason2c64c532009-09-02 15:04:12 -0400756 }
Chris Masond1310b22008-01-24 16:13:08 -0500757 /*
758 * this search will find the extents that end after
759 * our range starts
760 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500761 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500762 if (!node)
763 goto out;
764 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason2c64c532009-09-02 15:04:12 -0400765hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500766 if (state->start > end)
767 goto out;
768 WARN_ON(state->end < start);
Yan Zheng5c939df2009-05-27 09:16:03 -0400769 last_end = state->end;
Chris Masond1310b22008-01-24 16:13:08 -0500770
Liu Bo04493142012-02-16 18:34:37 +0800771 /* the state doesn't have the wanted bits, go ahead */
Li Zefancdc6a392012-03-12 16:39:48 +0800772 if (!(state->state & bits)) {
773 state = next_state(state);
Liu Bo04493142012-02-16 18:34:37 +0800774 goto next;
Li Zefancdc6a392012-03-12 16:39:48 +0800775 }
Liu Bo04493142012-02-16 18:34:37 +0800776
Chris Masond1310b22008-01-24 16:13:08 -0500777 /*
778 * | ---- desired range ---- |
779 * | state | or
780 * | ------------- state -------------- |
781 *
782 * We need to split the extent we found, and may flip
783 * bits on second half.
784 *
785 * If the extent we found extends past our range, we
786 * just split and search again. It'll get split again
787 * the next time though.
788 *
789 * If the extent we found is inside our range, we clear
790 * the desired bit on it.
791 */
792
793 if (state->start < start) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000794 prealloc = alloc_extent_state_atomic(prealloc);
795 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500796 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400797 if (err)
798 extent_io_tree_panic(tree, err);
799
Chris Masond1310b22008-01-24 16:13:08 -0500800 prealloc = NULL;
801 if (err)
802 goto out;
803 if (state->end <= end) {
Qu Wenruofefdc552015-10-12 15:35:38 +0800804 state = clear_state_bit(tree, state, &bits, wake,
805 changeset);
Liu Bod1ac6e42012-05-10 18:10:39 +0800806 goto next;
Chris Masond1310b22008-01-24 16:13:08 -0500807 }
808 goto search_again;
809 }
810 /*
811 * | ---- desired range ---- |
812 * | state |
813 * We need to split the extent, and clear the bit
814 * on the first half
815 */
816 if (state->start <= end && state->end > end) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000817 prealloc = alloc_extent_state_atomic(prealloc);
818 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500819 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400820 if (err)
821 extent_io_tree_panic(tree, err);
822
Chris Masond1310b22008-01-24 16:13:08 -0500823 if (wake)
824 wake_up(&state->wq);
Chris Mason42daec22009-09-23 19:51:09 -0400825
Qu Wenruofefdc552015-10-12 15:35:38 +0800826 clear_state_bit(tree, prealloc, &bits, wake, changeset);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400827
Chris Masond1310b22008-01-24 16:13:08 -0500828 prealloc = NULL;
829 goto out;
830 }
Chris Mason42daec22009-09-23 19:51:09 -0400831
Qu Wenruofefdc552015-10-12 15:35:38 +0800832 state = clear_state_bit(tree, state, &bits, wake, changeset);
Liu Bo04493142012-02-16 18:34:37 +0800833next:
Yan Zheng5c939df2009-05-27 09:16:03 -0400834 if (last_end == (u64)-1)
835 goto out;
836 start = last_end + 1;
Li Zefancdc6a392012-03-12 16:39:48 +0800837 if (start <= end && state && !need_resched())
Liu Bo692e5752012-02-16 18:34:36 +0800838 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500839
840search_again:
841 if (start > end)
842 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500843 spin_unlock(&tree->lock);
Mel Gormand0164ad2015-11-06 16:28:21 -0800844 if (gfpflags_allow_blocking(mask))
Chris Masond1310b22008-01-24 16:13:08 -0500845 cond_resched();
846 goto again;
David Sterba7ab5cb22016-04-27 01:02:15 +0200847
848out:
849 spin_unlock(&tree->lock);
850 if (prealloc)
851 free_extent_state(prealloc);
852
853 return 0;
854
Chris Masond1310b22008-01-24 16:13:08 -0500855}
Chris Masond1310b22008-01-24 16:13:08 -0500856
Jeff Mahoney143bede2012-03-01 14:56:26 +0100857static void wait_on_state(struct extent_io_tree *tree,
858 struct extent_state *state)
Christoph Hellwig641f5212008-12-02 06:36:10 -0500859 __releases(tree->lock)
860 __acquires(tree->lock)
Chris Masond1310b22008-01-24 16:13:08 -0500861{
862 DEFINE_WAIT(wait);
863 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
Chris Masoncad321a2008-12-17 14:51:42 -0500864 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500865 schedule();
Chris Masoncad321a2008-12-17 14:51:42 -0500866 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500867 finish_wait(&state->wq, &wait);
Chris Masond1310b22008-01-24 16:13:08 -0500868}
869
870/*
871 * waits for one or more bits to clear on a range in the state tree.
872 * The range [start, end] is inclusive.
873 * The tree lock is taken by this function
874 */
David Sterba41074882013-04-29 13:38:46 +0000875static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +0800876 u32 bits)
Chris Masond1310b22008-01-24 16:13:08 -0500877{
878 struct extent_state *state;
879 struct rb_node *node;
880
Josef Bacika5dee372013-12-13 10:02:44 -0500881 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +0000882
Chris Masoncad321a2008-12-17 14:51:42 -0500883 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500884again:
885 while (1) {
886 /*
887 * this search will find all the extents that end after
888 * our range starts
889 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500890 node = tree_search(tree, start);
Filipe Mananac50d3e72014-03-31 14:53:25 +0100891process_node:
Chris Masond1310b22008-01-24 16:13:08 -0500892 if (!node)
893 break;
894
895 state = rb_entry(node, struct extent_state, rb_node);
896
897 if (state->start > end)
898 goto out;
899
900 if (state->state & bits) {
901 start = state->start;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200902 refcount_inc(&state->refs);
Chris Masond1310b22008-01-24 16:13:08 -0500903 wait_on_state(tree, state);
904 free_extent_state(state);
905 goto again;
906 }
907 start = state->end + 1;
908
909 if (start > end)
910 break;
911
Filipe Mananac50d3e72014-03-31 14:53:25 +0100912 if (!cond_resched_lock(&tree->lock)) {
913 node = rb_next(node);
914 goto process_node;
915 }
Chris Masond1310b22008-01-24 16:13:08 -0500916 }
917out:
Chris Masoncad321a2008-12-17 14:51:42 -0500918 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500919}
Chris Masond1310b22008-01-24 16:13:08 -0500920
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000921static void set_state_bits(struct extent_io_tree *tree,
Chris Masond1310b22008-01-24 16:13:08 -0500922 struct extent_state *state,
Qu Wenruof97e27e2020-11-13 20:51:40 +0800923 u32 *bits, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500924{
Qu Wenruof97e27e2020-11-13 20:51:40 +0800925 u32 bits_to_set = *bits & ~EXTENT_CTLBITS;
David Sterba57599c72018-03-01 17:56:34 +0100926 int ret;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400927
Nikolay Borisove06a1fc2018-11-01 14:09:50 +0200928 if (tree->private_data && is_data_inode(tree->private_data))
929 btrfs_set_delalloc_extent(tree->private_data, state, bits);
930
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400931 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500932 u64 range = state->end - state->start + 1;
933 tree->dirty_bytes += range;
934 }
David Sterba57599c72018-03-01 17:56:34 +0100935 ret = add_extent_changeset(state, bits_to_set, changeset, 1);
936 BUG_ON(ret < 0);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400937 state->state |= bits_to_set;
Chris Masond1310b22008-01-24 16:13:08 -0500938}
939
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100940static void cache_state_if_flags(struct extent_state *state,
941 struct extent_state **cached_ptr,
David Sterba9ee49a042015-01-14 19:52:13 +0100942 unsigned flags)
Chris Mason2c64c532009-09-02 15:04:12 -0400943{
944 if (cached_ptr && !(*cached_ptr)) {
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100945 if (!flags || (state->state & flags)) {
Chris Mason2c64c532009-09-02 15:04:12 -0400946 *cached_ptr = state;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +0200947 refcount_inc(&state->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400948 }
949 }
950}
951
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100952static void cache_state(struct extent_state *state,
953 struct extent_state **cached_ptr)
954{
955 return cache_state_if_flags(state, cached_ptr,
Nikolay Borisov88826792019-03-14 15:28:31 +0200956 EXTENT_LOCKED | EXTENT_BOUNDARY);
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100957}
958
Chris Masond1310b22008-01-24 16:13:08 -0500959/*
Chris Mason1edbb732009-09-02 13:24:36 -0400960 * set some bits on a range in the tree. This may require allocations or
961 * sleeping, so the gfp mask is used to indicate what is allowed.
Chris Masond1310b22008-01-24 16:13:08 -0500962 *
Chris Mason1edbb732009-09-02 13:24:36 -0400963 * If any of the exclusive bits are set, this will fail with -EEXIST if some
964 * part of the range already has the desired bits set. The start of the
965 * existing range is returned in failed_start in this case.
Chris Masond1310b22008-01-24 16:13:08 -0500966 *
Chris Mason1edbb732009-09-02 13:24:36 -0400967 * [start, end] is inclusive This takes the tree lock.
Chris Masond1310b22008-01-24 16:13:08 -0500968 */
Qu Wenruof97e27e2020-11-13 20:51:40 +0800969int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
970 u32 exclusive_bits, u64 *failed_start,
Nikolay Borisov1cab5e72020-11-05 11:08:00 +0200971 struct extent_state **cached_state, gfp_t mask,
972 struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500973{
974 struct extent_state *state;
975 struct extent_state *prealloc = NULL;
976 struct rb_node *node;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000977 struct rb_node **p;
978 struct rb_node *parent;
Chris Masond1310b22008-01-24 16:13:08 -0500979 int err = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500980 u64 last_start;
981 u64 last_end;
Chris Mason42daec22009-09-23 19:51:09 -0400982
Josef Bacika5dee372013-12-13 10:02:44 -0500983 btrfs_debug_check_extent_io_range(tree, start, end);
Qu Wenruoa1d19842019-03-01 10:48:00 +0800984 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
David Sterba8d599ae2013-04-30 15:22:23 +0000985
Qu Wenruo3f6bb4a2020-10-21 14:24:51 +0800986 if (exclusive_bits)
987 ASSERT(failed_start);
988 else
989 ASSERT(failed_start == NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500990again:
Mel Gormand0164ad2015-11-06 16:28:21 -0800991 if (!prealloc && gfpflags_allow_blocking(mask)) {
David Sterba059f7912016-04-27 01:03:45 +0200992 /*
993 * Don't care for allocation failure here because we might end
994 * up not needing the pre-allocated extent state at all, which
995 * is the case if we only have in the tree extent states that
996 * cover our input range and don't cover too any other range.
997 * If we end up needing a new extent state we allocate it later.
998 */
Chris Masond1310b22008-01-24 16:13:08 -0500999 prealloc = alloc_extent_state(mask);
Chris Masond1310b22008-01-24 16:13:08 -05001000 }
1001
Chris Masoncad321a2008-12-17 14:51:42 -05001002 spin_lock(&tree->lock);
Chris Mason9655d292009-09-02 15:22:30 -04001003 if (cached_state && *cached_state) {
1004 state = *cached_state;
Josef Bacikdf98b6e2011-06-20 14:53:48 -04001005 if (state->start <= start && state->end > start &&
Filipe Manana27a35072014-07-06 20:09:59 +01001006 extent_state_in_tree(state)) {
Chris Mason9655d292009-09-02 15:22:30 -04001007 node = &state->rb_node;
1008 goto hit_next;
1009 }
1010 }
Chris Masond1310b22008-01-24 16:13:08 -05001011 /*
1012 * this search will find all the extents that end after
1013 * our range starts.
1014 */
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001015 node = tree_search_for_insert(tree, start, &p, &parent);
Chris Masond1310b22008-01-24 16:13:08 -05001016 if (!node) {
Xiao Guangrong82337672011-04-20 06:44:57 +00001017 prealloc = alloc_extent_state_atomic(prealloc);
1018 BUG_ON(!prealloc);
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001019 err = insert_state(tree, prealloc, start, end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001020 &p, &parent, &bits, changeset);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001021 if (err)
1022 extent_io_tree_panic(tree, err);
1023
Filipe David Borba Mananac42ac0b2013-11-26 15:01:34 +00001024 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001025 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05001026 goto out;
1027 }
Chris Masond1310b22008-01-24 16:13:08 -05001028 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason40431d62009-08-05 12:57:59 -04001029hit_next:
Chris Masond1310b22008-01-24 16:13:08 -05001030 last_start = state->start;
1031 last_end = state->end;
1032
1033 /*
1034 * | ---- desired range ---- |
1035 * | state |
1036 *
1037 * Just lock what we found and keep going
1038 */
1039 if (state->start == start && state->end <= end) {
Chris Mason1edbb732009-09-02 13:24:36 -04001040 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001041 *failed_start = state->start;
1042 err = -EEXIST;
1043 goto out;
1044 }
Chris Mason42daec22009-09-23 19:51:09 -04001045
Qu Wenruod38ed272015-10-12 14:53:37 +08001046 set_state_bits(tree, state, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -04001047 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001048 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -04001049 if (last_end == (u64)-1)
1050 goto out;
1051 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001052 state = next_state(state);
1053 if (start < end && state && state->start == start &&
1054 !need_resched())
1055 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -05001056 goto search_again;
1057 }
1058
1059 /*
1060 * | ---- desired range ---- |
1061 * | state |
1062 * or
1063 * | ------------- state -------------- |
1064 *
1065 * We need to split the extent we found, and may flip bits on
1066 * second half.
1067 *
1068 * If the extent we found extends past our
1069 * range, we just split and search again. It'll get split
1070 * again the next time though.
1071 *
1072 * If the extent we found is inside our range, we set the
1073 * desired bit on it.
1074 */
1075 if (state->start < start) {
Chris Mason1edbb732009-09-02 13:24:36 -04001076 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001077 *failed_start = start;
1078 err = -EEXIST;
1079 goto out;
1080 }
Xiao Guangrong82337672011-04-20 06:44:57 +00001081
Filipe Manana55ffaab2020-02-13 10:20:02 +00001082 /*
1083 * If this extent already has all the bits we want set, then
1084 * skip it, not necessary to split it or do anything with it.
1085 */
1086 if ((state->state & bits) == bits) {
1087 start = state->end + 1;
1088 cache_state(state, cached_state);
1089 goto search_again;
1090 }
1091
Xiao Guangrong82337672011-04-20 06:44:57 +00001092 prealloc = alloc_extent_state_atomic(prealloc);
1093 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -05001094 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001095 if (err)
1096 extent_io_tree_panic(tree, err);
1097
Chris Masond1310b22008-01-24 16:13:08 -05001098 prealloc = NULL;
1099 if (err)
1100 goto out;
1101 if (state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001102 set_state_bits(tree, state, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -04001103 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001104 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -04001105 if (last_end == (u64)-1)
1106 goto out;
1107 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001108 state = next_state(state);
1109 if (start < end && state && state->start == start &&
1110 !need_resched())
1111 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -05001112 }
1113 goto search_again;
1114 }
1115 /*
1116 * | ---- desired range ---- |
1117 * | state | or | state |
1118 *
1119 * There's a hole, we need to insert something in it and
1120 * ignore the extent we found.
1121 */
1122 if (state->start > start) {
1123 u64 this_end;
1124 if (end < last_start)
1125 this_end = end;
1126 else
Chris Masond3977122009-01-05 21:25:51 -05001127 this_end = last_start - 1;
Xiao Guangrong82337672011-04-20 06:44:57 +00001128
1129 prealloc = alloc_extent_state_atomic(prealloc);
1130 BUG_ON(!prealloc);
Xiao Guangrongc7f895a2011-04-20 06:45:49 +00001131
1132 /*
1133 * Avoid to free 'prealloc' if it can be merged with
1134 * the later extent.
1135 */
Chris Masond1310b22008-01-24 16:13:08 -05001136 err = insert_state(tree, prealloc, start, this_end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001137 NULL, NULL, &bits, changeset);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001138 if (err)
1139 extent_io_tree_panic(tree, err);
1140
Chris Mason2c64c532009-09-02 15:04:12 -04001141 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001142 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05001143 start = this_end + 1;
1144 goto search_again;
1145 }
1146 /*
1147 * | ---- desired range ---- |
1148 * | state |
1149 * We need to split the extent, and set the bit
1150 * on the first half
1151 */
1152 if (state->start <= end && state->end > end) {
Chris Mason1edbb732009-09-02 13:24:36 -04001153 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001154 *failed_start = start;
1155 err = -EEXIST;
1156 goto out;
1157 }
Xiao Guangrong82337672011-04-20 06:44:57 +00001158
1159 prealloc = alloc_extent_state_atomic(prealloc);
1160 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -05001161 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001162 if (err)
1163 extent_io_tree_panic(tree, err);
Chris Masond1310b22008-01-24 16:13:08 -05001164
Qu Wenruod38ed272015-10-12 14:53:37 +08001165 set_state_bits(tree, prealloc, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -04001166 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001167 merge_state(tree, prealloc);
1168 prealloc = NULL;
1169 goto out;
1170 }
1171
David Sterbab5a4ba142016-04-27 01:02:15 +02001172search_again:
1173 if (start > end)
1174 goto out;
1175 spin_unlock(&tree->lock);
1176 if (gfpflags_allow_blocking(mask))
1177 cond_resched();
1178 goto again;
Chris Masond1310b22008-01-24 16:13:08 -05001179
1180out:
Chris Masoncad321a2008-12-17 14:51:42 -05001181 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001182 if (prealloc)
1183 free_extent_state(prealloc);
1184
1185 return err;
1186
Chris Masond1310b22008-01-24 16:13:08 -05001187}
Chris Masond1310b22008-01-24 16:13:08 -05001188
Josef Bacik462d6fa2011-09-26 13:56:12 -04001189/**
Liu Bo10983f22012-07-11 15:26:19 +08001190 * convert_extent_bit - convert all bits in a given range from one bit to
1191 * another
Josef Bacik462d6fa2011-09-26 13:56:12 -04001192 * @tree: the io tree to search
1193 * @start: the start offset in bytes
1194 * @end: the end offset in bytes (inclusive)
1195 * @bits: the bits to set in this range
1196 * @clear_bits: the bits to clear in this range
Josef Bacike6138872012-09-27 17:07:30 -04001197 * @cached_state: state that we're going to cache
Josef Bacik462d6fa2011-09-26 13:56:12 -04001198 *
1199 * This will go through and set bits for the given range. If any states exist
1200 * already in this range they are set with the given bit and cleared of the
1201 * clear_bits. This is only meant to be used by things that are mergeable, ie
1202 * converting from say DELALLOC to DIRTY. This is not meant to be used with
1203 * boundary bits like LOCK.
David Sterba210aa272016-04-26 23:54:39 +02001204 *
1205 * All allocations are done with GFP_NOFS.
Josef Bacik462d6fa2011-09-26 13:56:12 -04001206 */
1207int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001208 u32 bits, u32 clear_bits,
David Sterba210aa272016-04-26 23:54:39 +02001209 struct extent_state **cached_state)
Josef Bacik462d6fa2011-09-26 13:56:12 -04001210{
1211 struct extent_state *state;
1212 struct extent_state *prealloc = NULL;
1213 struct rb_node *node;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001214 struct rb_node **p;
1215 struct rb_node *parent;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001216 int err = 0;
1217 u64 last_start;
1218 u64 last_end;
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001219 bool first_iteration = true;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001220
Josef Bacika5dee372013-12-13 10:02:44 -05001221 btrfs_debug_check_extent_io_range(tree, start, end);
Qu Wenruoa1d19842019-03-01 10:48:00 +08001222 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1223 clear_bits);
David Sterba8d599ae2013-04-30 15:22:23 +00001224
Josef Bacik462d6fa2011-09-26 13:56:12 -04001225again:
David Sterba210aa272016-04-26 23:54:39 +02001226 if (!prealloc) {
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001227 /*
1228 * Best effort, don't worry if extent state allocation fails
1229 * here for the first iteration. We might have a cached state
1230 * that matches exactly the target range, in which case no
1231 * extent state allocations are needed. We'll only know this
1232 * after locking the tree.
1233 */
David Sterba210aa272016-04-26 23:54:39 +02001234 prealloc = alloc_extent_state(GFP_NOFS);
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001235 if (!prealloc && !first_iteration)
Josef Bacik462d6fa2011-09-26 13:56:12 -04001236 return -ENOMEM;
1237 }
1238
1239 spin_lock(&tree->lock);
Josef Bacike6138872012-09-27 17:07:30 -04001240 if (cached_state && *cached_state) {
1241 state = *cached_state;
1242 if (state->start <= start && state->end > start &&
Filipe Manana27a35072014-07-06 20:09:59 +01001243 extent_state_in_tree(state)) {
Josef Bacike6138872012-09-27 17:07:30 -04001244 node = &state->rb_node;
1245 goto hit_next;
1246 }
1247 }
1248
Josef Bacik462d6fa2011-09-26 13:56:12 -04001249 /*
1250 * this search will find all the extents that end after
1251 * our range starts.
1252 */
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001253 node = tree_search_for_insert(tree, start, &p, &parent);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001254 if (!node) {
1255 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001256 if (!prealloc) {
1257 err = -ENOMEM;
1258 goto out;
1259 }
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001260 err = insert_state(tree, prealloc, start, end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001261 &p, &parent, &bits, NULL);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001262 if (err)
1263 extent_io_tree_panic(tree, err);
Filipe David Borba Mananac42ac0b2013-11-26 15:01:34 +00001264 cache_state(prealloc, cached_state);
1265 prealloc = NULL;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001266 goto out;
1267 }
1268 state = rb_entry(node, struct extent_state, rb_node);
1269hit_next:
1270 last_start = state->start;
1271 last_end = state->end;
1272
1273 /*
1274 * | ---- desired range ---- |
1275 * | state |
1276 *
1277 * Just lock what we found and keep going
1278 */
1279 if (state->start == start && state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001280 set_state_bits(tree, state, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001281 cache_state(state, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001282 state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001283 if (last_end == (u64)-1)
1284 goto out;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001285 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001286 if (start < end && state && state->start == start &&
1287 !need_resched())
1288 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001289 goto search_again;
1290 }
1291
1292 /*
1293 * | ---- desired range ---- |
1294 * | state |
1295 * or
1296 * | ------------- state -------------- |
1297 *
1298 * We need to split the extent we found, and may flip bits on
1299 * second half.
1300 *
1301 * If the extent we found extends past our
1302 * range, we just split and search again. It'll get split
1303 * again the next time though.
1304 *
1305 * If the extent we found is inside our range, we set the
1306 * desired bit on it.
1307 */
1308 if (state->start < start) {
1309 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001310 if (!prealloc) {
1311 err = -ENOMEM;
1312 goto out;
1313 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001314 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001315 if (err)
1316 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001317 prealloc = NULL;
1318 if (err)
1319 goto out;
1320 if (state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001321 set_state_bits(tree, state, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001322 cache_state(state, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001323 state = clear_state_bit(tree, state, &clear_bits, 0,
1324 NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001325 if (last_end == (u64)-1)
1326 goto out;
1327 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001328 if (start < end && state && state->start == start &&
1329 !need_resched())
1330 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001331 }
1332 goto search_again;
1333 }
1334 /*
1335 * | ---- desired range ---- |
1336 * | state | or | state |
1337 *
1338 * There's a hole, we need to insert something in it and
1339 * ignore the extent we found.
1340 */
1341 if (state->start > start) {
1342 u64 this_end;
1343 if (end < last_start)
1344 this_end = end;
1345 else
1346 this_end = last_start - 1;
1347
1348 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001349 if (!prealloc) {
1350 err = -ENOMEM;
1351 goto out;
1352 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001353
1354 /*
1355 * Avoid to free 'prealloc' if it can be merged with
1356 * the later extent.
1357 */
1358 err = insert_state(tree, prealloc, start, this_end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001359 NULL, NULL, &bits, NULL);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001360 if (err)
1361 extent_io_tree_panic(tree, err);
Josef Bacike6138872012-09-27 17:07:30 -04001362 cache_state(prealloc, cached_state);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001363 prealloc = NULL;
1364 start = this_end + 1;
1365 goto search_again;
1366 }
1367 /*
1368 * | ---- desired range ---- |
1369 * | state |
1370 * We need to split the extent, and set the bit
1371 * on the first half
1372 */
1373 if (state->start <= end && state->end > end) {
1374 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001375 if (!prealloc) {
1376 err = -ENOMEM;
1377 goto out;
1378 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001379
1380 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001381 if (err)
1382 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001383
Qu Wenruod38ed272015-10-12 14:53:37 +08001384 set_state_bits(tree, prealloc, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001385 cache_state(prealloc, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001386 clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001387 prealloc = NULL;
1388 goto out;
1389 }
1390
Josef Bacik462d6fa2011-09-26 13:56:12 -04001391search_again:
1392 if (start > end)
1393 goto out;
1394 spin_unlock(&tree->lock);
David Sterba210aa272016-04-26 23:54:39 +02001395 cond_resched();
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001396 first_iteration = false;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001397 goto again;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001398
1399out:
1400 spin_unlock(&tree->lock);
1401 if (prealloc)
1402 free_extent_state(prealloc);
1403
1404 return err;
1405}
1406
Chris Masond1310b22008-01-24 16:13:08 -05001407/* wrappers around set/clear extent bit */
Qu Wenruod38ed272015-10-12 14:53:37 +08001408int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001409 u32 bits, struct extent_changeset *changeset)
Qu Wenruod38ed272015-10-12 14:53:37 +08001410{
1411 /*
1412 * We don't support EXTENT_LOCKED yet, as current changeset will
1413 * record any bits changed, so for EXTENT_LOCKED case, it will
1414 * either fail with -EEXIST or changeset will record the whole
1415 * range.
1416 */
1417 BUG_ON(bits & EXTENT_LOCKED);
1418
Nikolay Borisov1cab5e72020-11-05 11:08:00 +02001419 return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1420 changeset);
Qu Wenruod38ed272015-10-12 14:53:37 +08001421}
1422
Nikolay Borisov4ca73652019-03-27 14:24:10 +02001423int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001424 u32 bits)
Nikolay Borisov4ca73652019-03-27 14:24:10 +02001425{
Nikolay Borisov1cab5e72020-11-05 11:08:00 +02001426 return set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1427 GFP_NOWAIT, NULL);
Nikolay Borisov4ca73652019-03-27 14:24:10 +02001428}
1429
Qu Wenruofefdc552015-10-12 15:35:38 +08001430int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001431 u32 bits, int wake, int delete,
David Sterbaae0f1622017-10-31 16:37:52 +01001432 struct extent_state **cached)
Qu Wenruofefdc552015-10-12 15:35:38 +08001433{
1434 return __clear_extent_bit(tree, start, end, bits, wake, delete,
David Sterbaae0f1622017-10-31 16:37:52 +01001435 cached, GFP_NOFS, NULL);
Qu Wenruofefdc552015-10-12 15:35:38 +08001436}
1437
Qu Wenruofefdc552015-10-12 15:35:38 +08001438int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001439 u32 bits, struct extent_changeset *changeset)
Qu Wenruofefdc552015-10-12 15:35:38 +08001440{
1441 /*
1442 * Don't support EXTENT_LOCKED case, same reason as
1443 * set_record_extent_bits().
1444 */
1445 BUG_ON(bits & EXTENT_LOCKED);
1446
David Sterbaf734c442016-04-26 23:54:39 +02001447 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
Qu Wenruofefdc552015-10-12 15:35:38 +08001448 changeset);
1449}
1450
Chris Masond352ac62008-09-29 15:18:18 -04001451/*
1452 * either insert or lock state struct between start and end use mask to tell
1453 * us if waiting is desired.
1454 */
Chris Mason1edbb732009-09-02 13:24:36 -04001455int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaff13db42015-12-03 14:30:40 +01001456 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001457{
1458 int err;
1459 u64 failed_start;
David Sterba9ee49a042015-01-14 19:52:13 +01001460
Chris Masond1310b22008-01-24 16:13:08 -05001461 while (1) {
Nikolay Borisov1cab5e72020-11-05 11:08:00 +02001462 err = set_extent_bit(tree, start, end, EXTENT_LOCKED,
1463 EXTENT_LOCKED, &failed_start,
1464 cached_state, GFP_NOFS, NULL);
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001465 if (err == -EEXIST) {
Chris Masond1310b22008-01-24 16:13:08 -05001466 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1467 start = failed_start;
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001468 } else
Chris Masond1310b22008-01-24 16:13:08 -05001469 break;
Chris Masond1310b22008-01-24 16:13:08 -05001470 WARN_ON(start > end);
1471 }
1472 return err;
1473}
Chris Masond1310b22008-01-24 16:13:08 -05001474
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001475int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
Josef Bacik25179202008-10-29 14:49:05 -04001476{
1477 int err;
1478 u64 failed_start;
1479
Nikolay Borisov1cab5e72020-11-05 11:08:00 +02001480 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1481 &failed_start, NULL, GFP_NOFS, NULL);
Yan Zheng66435582008-10-30 14:19:50 -04001482 if (err == -EEXIST) {
1483 if (failed_start > start)
1484 clear_extent_bit(tree, start, failed_start - 1,
David Sterbaae0f1622017-10-31 16:37:52 +01001485 EXTENT_LOCKED, 1, 0, NULL);
Josef Bacik25179202008-10-29 14:49:05 -04001486 return 0;
Yan Zheng66435582008-10-30 14:19:50 -04001487 }
Josef Bacik25179202008-10-29 14:49:05 -04001488 return 1;
1489}
Josef Bacik25179202008-10-29 14:49:05 -04001490
David Sterbabd1fa4f2015-12-03 13:08:59 +01001491void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
Chris Mason4adaa612013-03-26 13:07:00 -04001492{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001493 unsigned long index = start >> PAGE_SHIFT;
1494 unsigned long end_index = end >> PAGE_SHIFT;
Chris Mason4adaa612013-03-26 13:07:00 -04001495 struct page *page;
1496
1497 while (index <= end_index) {
1498 page = find_get_page(inode->i_mapping, index);
1499 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1500 clear_page_dirty_for_io(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001501 put_page(page);
Chris Mason4adaa612013-03-26 13:07:00 -04001502 index++;
1503 }
Chris Mason4adaa612013-03-26 13:07:00 -04001504}
1505
David Sterbaf6311572015-12-03 13:08:59 +01001506void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
Chris Mason4adaa612013-03-26 13:07:00 -04001507{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001508 unsigned long index = start >> PAGE_SHIFT;
1509 unsigned long end_index = end >> PAGE_SHIFT;
Chris Mason4adaa612013-03-26 13:07:00 -04001510 struct page *page;
1511
1512 while (index <= end_index) {
1513 page = find_get_page(inode->i_mapping, index);
1514 BUG_ON(!page); /* Pages should be in the extent_io_tree */
Chris Mason4adaa612013-03-26 13:07:00 -04001515 __set_page_dirty_nobuffers(page);
Konstantin Khebnikov8d386332015-02-11 15:26:55 -08001516 account_page_redirty(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001517 put_page(page);
Chris Mason4adaa612013-03-26 13:07:00 -04001518 index++;
1519 }
Chris Mason4adaa612013-03-26 13:07:00 -04001520}
1521
Chris Masond352ac62008-09-29 15:18:18 -04001522/* find the first state struct with 'bits' set after 'start', and
1523 * return it. tree->lock must be held. NULL will returned if
1524 * nothing was found after 'start'
1525 */
Eric Sandeen48a3b632013-04-25 20:41:01 +00001526static struct extent_state *
Qu Wenruof97e27e2020-11-13 20:51:40 +08001527find_first_extent_bit_state(struct extent_io_tree *tree, u64 start, u32 bits)
Chris Masond7fc6402008-02-18 12:12:38 -05001528{
1529 struct rb_node *node;
1530 struct extent_state *state;
1531
1532 /*
1533 * this search will find all the extents that end after
1534 * our range starts.
1535 */
1536 node = tree_search(tree, start);
Chris Masond3977122009-01-05 21:25:51 -05001537 if (!node)
Chris Masond7fc6402008-02-18 12:12:38 -05001538 goto out;
Chris Masond7fc6402008-02-18 12:12:38 -05001539
Chris Masond3977122009-01-05 21:25:51 -05001540 while (1) {
Chris Masond7fc6402008-02-18 12:12:38 -05001541 state = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -05001542 if (state->end >= start && (state->state & bits))
Chris Masond7fc6402008-02-18 12:12:38 -05001543 return state;
Chris Masond3977122009-01-05 21:25:51 -05001544
Chris Masond7fc6402008-02-18 12:12:38 -05001545 node = rb_next(node);
1546 if (!node)
1547 break;
1548 }
1549out:
1550 return NULL;
1551}
Chris Masond7fc6402008-02-18 12:12:38 -05001552
Chris Masond352ac62008-09-29 15:18:18 -04001553/*
Qu Wenruo03509b72020-10-21 14:24:50 +08001554 * Find the first offset in the io tree with one or more @bits set.
Xiao Guangrong69261c42011-07-14 03:19:45 +00001555 *
Qu Wenruo03509b72020-10-21 14:24:50 +08001556 * Note: If there are multiple bits set in @bits, any of them will match.
1557 *
1558 * Return 0 if we find something, and update @start_ret and @end_ret.
1559 * Return 1 if we found nothing.
Xiao Guangrong69261c42011-07-14 03:19:45 +00001560 */
1561int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001562 u64 *start_ret, u64 *end_ret, u32 bits,
Josef Bacike6138872012-09-27 17:07:30 -04001563 struct extent_state **cached_state)
Xiao Guangrong69261c42011-07-14 03:19:45 +00001564{
1565 struct extent_state *state;
1566 int ret = 1;
1567
1568 spin_lock(&tree->lock);
Josef Bacike6138872012-09-27 17:07:30 -04001569 if (cached_state && *cached_state) {
1570 state = *cached_state;
Filipe Manana27a35072014-07-06 20:09:59 +01001571 if (state->end == start - 1 && extent_state_in_tree(state)) {
Liu Bo9688e9a2018-08-23 03:14:53 +08001572 while ((state = next_state(state)) != NULL) {
Josef Bacike6138872012-09-27 17:07:30 -04001573 if (state->state & bits)
1574 goto got_it;
Josef Bacike6138872012-09-27 17:07:30 -04001575 }
1576 free_extent_state(*cached_state);
1577 *cached_state = NULL;
1578 goto out;
1579 }
1580 free_extent_state(*cached_state);
1581 *cached_state = NULL;
1582 }
1583
Xiao Guangrong69261c42011-07-14 03:19:45 +00001584 state = find_first_extent_bit_state(tree, start, bits);
Josef Bacike6138872012-09-27 17:07:30 -04001585got_it:
Xiao Guangrong69261c42011-07-14 03:19:45 +00001586 if (state) {
Filipe Mananae38e2ed2014-10-13 12:28:38 +01001587 cache_state_if_flags(state, cached_state, 0);
Xiao Guangrong69261c42011-07-14 03:19:45 +00001588 *start_ret = state->start;
1589 *end_ret = state->end;
1590 ret = 0;
1591 }
Josef Bacike6138872012-09-27 17:07:30 -04001592out:
Xiao Guangrong69261c42011-07-14 03:19:45 +00001593 spin_unlock(&tree->lock);
1594 return ret;
1595}
1596
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001597/**
Nikolay Borisov3bed2da2021-01-22 11:58:03 +02001598 * Find a contiguous area of bits
1599 *
1600 * @tree: io tree to check
1601 * @start: offset to start the search from
1602 * @start_ret: the first offset we found with the bits set
1603 * @end_ret: the final contiguous range of the bits that were set
1604 * @bits: bits to look for
Josef Bacik41a2ee72020-01-17 09:02:21 -05001605 *
1606 * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
1607 * to set bits appropriately, and then merge them again. During this time it
1608 * will drop the tree->lock, so use this helper if you want to find the actual
1609 * contiguous area for given bits. We will search to the first bit we find, and
1610 * then walk down the tree until we find a non-contiguous area. The area
1611 * returned will be the full contiguous area with the bits set.
1612 */
1613int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001614 u64 *start_ret, u64 *end_ret, u32 bits)
Josef Bacik41a2ee72020-01-17 09:02:21 -05001615{
1616 struct extent_state *state;
1617 int ret = 1;
1618
1619 spin_lock(&tree->lock);
1620 state = find_first_extent_bit_state(tree, start, bits);
1621 if (state) {
1622 *start_ret = state->start;
1623 *end_ret = state->end;
1624 while ((state = next_state(state)) != NULL) {
1625 if (state->start > (*end_ret + 1))
1626 break;
1627 *end_ret = state->end;
1628 }
1629 ret = 0;
1630 }
1631 spin_unlock(&tree->lock);
1632 return ret;
1633}
1634
1635/**
Nikolay Borisov3bed2da2021-01-22 11:58:03 +02001636 * Find the first range that has @bits not set. This range could start before
1637 * @start.
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001638 *
Nikolay Borisov3bed2da2021-01-22 11:58:03 +02001639 * @tree: the tree to search
1640 * @start: offset at/after which the found extent should start
1641 * @start_ret: records the beginning of the range
1642 * @end_ret: records the end of the range (inclusive)
1643 * @bits: the set of bits which must be unset
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001644 *
1645 * Since unallocated range is also considered one which doesn't have the bits
1646 * set it's possible that @end_ret contains -1, this happens in case the range
1647 * spans (last_range_end, end of device]. In this case it's up to the caller to
1648 * trim @end_ret to the appropriate size.
1649 */
1650void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
Qu Wenruof97e27e2020-11-13 20:51:40 +08001651 u64 *start_ret, u64 *end_ret, u32 bits)
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001652{
1653 struct extent_state *state;
1654 struct rb_node *node, *prev = NULL, *next;
1655
1656 spin_lock(&tree->lock);
1657
1658 /* Find first extent with bits cleared */
1659 while (1) {
1660 node = __etree_search(tree, start, &next, &prev, NULL, NULL);
Nikolay Borisov5750c372020-01-27 11:59:26 +02001661 if (!node && !next && !prev) {
1662 /*
1663 * Tree is completely empty, send full range and let
1664 * caller deal with it
1665 */
1666 *start_ret = 0;
1667 *end_ret = -1;
1668 goto out;
1669 } else if (!node && !next) {
1670 /*
1671 * We are past the last allocated chunk, set start at
1672 * the end of the last extent.
1673 */
1674 state = rb_entry(prev, struct extent_state, rb_node);
1675 *start_ret = state->end + 1;
1676 *end_ret = -1;
1677 goto out;
1678 } else if (!node) {
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001679 node = next;
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001680 }
Nikolay Borisov1eaebb32019-06-03 13:06:02 +03001681 /*
1682 * At this point 'node' either contains 'start' or start is
1683 * before 'node'
1684 */
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001685 state = rb_entry(node, struct extent_state, rb_node);
Nikolay Borisov1eaebb32019-06-03 13:06:02 +03001686
1687 if (in_range(start, state->start, state->end - state->start + 1)) {
1688 if (state->state & bits) {
1689 /*
1690 * |--range with bits sets--|
1691 * |
1692 * start
1693 */
1694 start = state->end + 1;
1695 } else {
1696 /*
1697 * 'start' falls within a range that doesn't
1698 * have the bits set, so take its start as
1699 * the beginning of the desired range
1700 *
1701 * |--range with bits cleared----|
1702 * |
1703 * start
1704 */
1705 *start_ret = state->start;
1706 break;
1707 }
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001708 } else {
Nikolay Borisov1eaebb32019-06-03 13:06:02 +03001709 /*
1710 * |---prev range---|---hole/unset---|---node range---|
1711 * |
1712 * start
1713 *
1714 * or
1715 *
1716 * |---hole/unset--||--first node--|
1717 * 0 |
1718 * start
1719 */
1720 if (prev) {
1721 state = rb_entry(prev, struct extent_state,
1722 rb_node);
1723 *start_ret = state->end + 1;
1724 } else {
1725 *start_ret = 0;
1726 }
Nikolay Borisov45bfcfc2019-03-27 14:24:17 +02001727 break;
1728 }
1729 }
1730
1731 /*
1732 * Find the longest stretch from start until an entry which has the
1733 * bits set
1734 */
1735 while (1) {
1736 state = rb_entry(node, struct extent_state, rb_node);
1737 if (state->end >= start && !(state->state & bits)) {
1738 *end_ret = state->end;
1739 } else {
1740 *end_ret = state->start - 1;
1741 break;
1742 }
1743
1744 node = rb_next(node);
1745 if (!node)
1746 break;
1747 }
1748out:
1749 spin_unlock(&tree->lock);
1750}
1751
Xiao Guangrong69261c42011-07-14 03:19:45 +00001752/*
Chris Masond352ac62008-09-29 15:18:18 -04001753 * find a contiguous range of bytes in the file marked as delalloc, not
1754 * more than 'max_bytes'. start and end are used to return the range,
1755 *
Lu Fengqi3522e902018-11-29 11:33:38 +08001756 * true is returned if we find something, false if nothing was in the tree
Chris Masond352ac62008-09-29 15:18:18 -04001757 */
Josef Bacik083e75e2019-09-23 10:05:20 -04001758bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
1759 u64 *end, u64 max_bytes,
1760 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001761{
1762 struct rb_node *node;
1763 struct extent_state *state;
1764 u64 cur_start = *start;
Lu Fengqi3522e902018-11-29 11:33:38 +08001765 bool found = false;
Chris Masond1310b22008-01-24 16:13:08 -05001766 u64 total_bytes = 0;
1767
Chris Masoncad321a2008-12-17 14:51:42 -05001768 spin_lock(&tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -04001769
Chris Masond1310b22008-01-24 16:13:08 -05001770 /*
1771 * this search will find all the extents that end after
1772 * our range starts.
1773 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001774 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001775 if (!node) {
Lu Fengqi3522e902018-11-29 11:33:38 +08001776 *end = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05001777 goto out;
1778 }
1779
Chris Masond3977122009-01-05 21:25:51 -05001780 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001781 state = rb_entry(node, struct extent_state, rb_node);
Zheng Yan5b21f2e2008-09-26 10:05:38 -04001782 if (found && (state->start != cur_start ||
1783 (state->state & EXTENT_BOUNDARY))) {
Chris Masond1310b22008-01-24 16:13:08 -05001784 goto out;
1785 }
1786 if (!(state->state & EXTENT_DELALLOC)) {
1787 if (!found)
1788 *end = state->end;
1789 goto out;
1790 }
Josef Bacikc2a128d2010-02-02 21:19:11 +00001791 if (!found) {
Chris Masond1310b22008-01-24 16:13:08 -05001792 *start = state->start;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001793 *cached_state = state;
Elena Reshetovab7ac31b2017-03-03 10:55:19 +02001794 refcount_inc(&state->refs);
Josef Bacikc2a128d2010-02-02 21:19:11 +00001795 }
Lu Fengqi3522e902018-11-29 11:33:38 +08001796 found = true;
Chris Masond1310b22008-01-24 16:13:08 -05001797 *end = state->end;
1798 cur_start = state->end + 1;
1799 node = rb_next(node);
Chris Masond1310b22008-01-24 16:13:08 -05001800 total_bytes += state->end - state->start + 1;
Josef Bacik7bf811a52013-10-07 22:11:09 -04001801 if (total_bytes >= max_bytes)
Josef Bacik573aeca2013-08-30 14:38:49 -04001802 break;
Josef Bacik573aeca2013-08-30 14:38:49 -04001803 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001804 break;
1805 }
1806out:
Chris Masoncad321a2008-12-17 14:51:42 -05001807 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001808 return found;
1809}
1810
Qu Wenruoed8f13b2021-05-31 16:50:38 +08001811/*
1812 * Process one page for __process_pages_contig().
1813 *
1814 * Return >0 if we hit @page == @locked_page.
1815 * Return 0 if we updated the page status.
1816 * Return -EGAIN if the we need to try again.
1817 * (For PAGE_LOCK case but got dirty page or page not belong to mapping)
1818 */
Qu Wenruoe38992b2021-05-31 16:50:42 +08001819static int process_one_page(struct btrfs_fs_info *fs_info,
1820 struct address_space *mapping,
Qu Wenruoed8f13b2021-05-31 16:50:38 +08001821 struct page *page, struct page *locked_page,
Qu Wenruoe38992b2021-05-31 16:50:42 +08001822 unsigned long page_ops, u64 start, u64 end)
Qu Wenruoed8f13b2021-05-31 16:50:38 +08001823{
Qu Wenruoe38992b2021-05-31 16:50:42 +08001824 u32 len;
1825
1826 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
1827 len = end + 1 - start;
1828
Qu Wenruoed8f13b2021-05-31 16:50:38 +08001829 if (page_ops & PAGE_SET_ORDERED)
Qu Wenruob945a462021-05-31 16:50:46 +08001830 btrfs_page_clamp_set_ordered(fs_info, page, start, len);
Qu Wenruoed8f13b2021-05-31 16:50:38 +08001831 if (page_ops & PAGE_SET_ERROR)
Qu Wenruoe38992b2021-05-31 16:50:42 +08001832 btrfs_page_clamp_set_error(fs_info, page, start, len);
Qu Wenruoed8f13b2021-05-31 16:50:38 +08001833 if (page_ops & PAGE_START_WRITEBACK) {
Qu Wenruoe38992b2021-05-31 16:50:42 +08001834 btrfs_page_clamp_clear_dirty(fs_info, page, start, len);
1835 btrfs_page_clamp_set_writeback(fs_info, page, start, len);
Qu Wenruoed8f13b2021-05-31 16:50:38 +08001836 }
1837 if (page_ops & PAGE_END_WRITEBACK)
Qu Wenruoe38992b2021-05-31 16:50:42 +08001838 btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
Qu Wenruoa33a8e92021-05-31 16:50:47 +08001839
1840 if (page == locked_page)
1841 return 1;
1842
Qu Wenruoed8f13b2021-05-31 16:50:38 +08001843 if (page_ops & PAGE_LOCK) {
Qu Wenruo1e1de382021-05-31 16:50:44 +08001844 int ret;
1845
1846 ret = btrfs_page_start_writer_lock(fs_info, page, start, len);
1847 if (ret)
1848 return ret;
Qu Wenruoed8f13b2021-05-31 16:50:38 +08001849 if (!PageDirty(page) || page->mapping != mapping) {
Qu Wenruo1e1de382021-05-31 16:50:44 +08001850 btrfs_page_end_writer_lock(fs_info, page, start, len);
Qu Wenruoed8f13b2021-05-31 16:50:38 +08001851 return -EAGAIN;
1852 }
1853 }
1854 if (page_ops & PAGE_UNLOCK)
Qu Wenruo1e1de382021-05-31 16:50:44 +08001855 btrfs_page_end_writer_lock(fs_info, page, start, len);
Qu Wenruoed8f13b2021-05-31 16:50:38 +08001856 return 0;
1857}
1858
Liu Boda2c7002017-02-10 16:41:05 +01001859static int __process_pages_contig(struct address_space *mapping,
1860 struct page *locked_page,
Qu Wenruo98af9ab2021-05-31 16:50:37 +08001861 u64 start, u64 end, unsigned long page_ops,
Qu Wenruoed8f13b2021-05-31 16:50:38 +08001862 u64 *processed_end)
1863{
Qu Wenruoe38992b2021-05-31 16:50:42 +08001864 struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
Qu Wenruoed8f13b2021-05-31 16:50:38 +08001865 pgoff_t start_index = start >> PAGE_SHIFT;
1866 pgoff_t end_index = end >> PAGE_SHIFT;
1867 pgoff_t index = start_index;
1868 unsigned long nr_pages = end_index - start_index + 1;
1869 unsigned long pages_processed = 0;
1870 struct page *pages[16];
1871 int err = 0;
1872 int i;
1873
1874 if (page_ops & PAGE_LOCK) {
1875 ASSERT(page_ops == PAGE_LOCK);
1876 ASSERT(processed_end && *processed_end == start);
1877 }
1878
1879 if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1880 mapping_set_error(mapping, -EIO);
1881
1882 while (nr_pages > 0) {
1883 int found_pages;
1884
1885 found_pages = find_get_pages_contig(mapping, index,
1886 min_t(unsigned long,
1887 nr_pages, ARRAY_SIZE(pages)), pages);
1888 if (found_pages == 0) {
1889 /*
1890 * Only if we're going to lock these pages, we can find
1891 * nothing at @index.
1892 */
1893 ASSERT(page_ops & PAGE_LOCK);
1894 err = -EAGAIN;
1895 goto out;
1896 }
1897
1898 for (i = 0; i < found_pages; i++) {
1899 int process_ret;
1900
Qu Wenruoe38992b2021-05-31 16:50:42 +08001901 process_ret = process_one_page(fs_info, mapping,
1902 pages[i], locked_page, page_ops,
1903 start, end);
Qu Wenruoed8f13b2021-05-31 16:50:38 +08001904 if (process_ret < 0) {
1905 for (; i < found_pages; i++)
1906 put_page(pages[i]);
1907 err = -EAGAIN;
1908 goto out;
1909 }
1910 put_page(pages[i]);
1911 pages_processed++;
1912 }
1913 nr_pages -= found_pages;
1914 index += found_pages;
1915 cond_resched();
1916 }
1917out:
1918 if (err && processed_end) {
1919 /*
1920 * Update @processed_end. I know this is awful since it has
1921 * two different return value patterns (inclusive vs exclusive).
1922 *
1923 * But the exclusive pattern is necessary if @start is 0, or we
1924 * underflow and check against processed_end won't work as
1925 * expected.
1926 */
1927 if (pages_processed)
1928 *processed_end = min(end,
1929 ((u64)(start_index + pages_processed) << PAGE_SHIFT) - 1);
1930 else
1931 *processed_end = start;
1932 }
1933 return err;
1934}
Liu Boda2c7002017-02-10 16:41:05 +01001935
Jeff Mahoney143bede2012-03-01 14:56:26 +01001936static noinline void __unlock_for_delalloc(struct inode *inode,
1937 struct page *locked_page,
1938 u64 start, u64 end)
Chris Masonc8b97812008-10-29 14:49:59 -04001939{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001940 unsigned long index = start >> PAGE_SHIFT;
1941 unsigned long end_index = end >> PAGE_SHIFT;
Chris Masonc8b97812008-10-29 14:49:59 -04001942
Liu Bo76c00212017-02-10 16:42:14 +01001943 ASSERT(locked_page);
Chris Masonc8b97812008-10-29 14:49:59 -04001944 if (index == locked_page->index && end_index == index)
Jeff Mahoney143bede2012-03-01 14:56:26 +01001945 return;
Chris Masonc8b97812008-10-29 14:49:59 -04001946
Qu Wenruo98af9ab2021-05-31 16:50:37 +08001947 __process_pages_contig(inode->i_mapping, locked_page, start, end,
Liu Bo76c00212017-02-10 16:42:14 +01001948 PAGE_UNLOCK, NULL);
Chris Masonc8b97812008-10-29 14:49:59 -04001949}
1950
1951static noinline int lock_delalloc_pages(struct inode *inode,
1952 struct page *locked_page,
1953 u64 delalloc_start,
1954 u64 delalloc_end)
1955{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001956 unsigned long index = delalloc_start >> PAGE_SHIFT;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001957 unsigned long end_index = delalloc_end >> PAGE_SHIFT;
Qu Wenruo98af9ab2021-05-31 16:50:37 +08001958 u64 processed_end = delalloc_start;
Chris Masonc8b97812008-10-29 14:49:59 -04001959 int ret;
Chris Masonc8b97812008-10-29 14:49:59 -04001960
Liu Bo76c00212017-02-10 16:42:14 +01001961 ASSERT(locked_page);
Chris Masonc8b97812008-10-29 14:49:59 -04001962 if (index == locked_page->index && index == end_index)
1963 return 0;
1964
Qu Wenruo98af9ab2021-05-31 16:50:37 +08001965 ret = __process_pages_contig(inode->i_mapping, locked_page, delalloc_start,
1966 delalloc_end, PAGE_LOCK, &processed_end);
1967 if (ret == -EAGAIN && processed_end > delalloc_start)
Liu Bo76c00212017-02-10 16:42:14 +01001968 __unlock_for_delalloc(inode, locked_page, delalloc_start,
Qu Wenruo98af9ab2021-05-31 16:50:37 +08001969 processed_end);
Chris Masonc8b97812008-10-29 14:49:59 -04001970 return ret;
1971}
1972
1973/*
Lu Fengqi3522e902018-11-29 11:33:38 +08001974 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
1975 * more than @max_bytes. @Start and @end are used to return the range,
Chris Masonc8b97812008-10-29 14:49:59 -04001976 *
Lu Fengqi3522e902018-11-29 11:33:38 +08001977 * Return: true if we find something
1978 * false if nothing was in the tree
Chris Masonc8b97812008-10-29 14:49:59 -04001979 */
Johannes Thumshirnce9f9672018-11-19 10:38:17 +01001980EXPORT_FOR_TESTS
Lu Fengqi3522e902018-11-29 11:33:38 +08001981noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
Josef Bacik294e30f2013-10-09 12:00:56 -04001982 struct page *locked_page, u64 *start,
Nikolay Borisov917aace2018-10-26 14:43:20 +03001983 u64 *end)
Chris Masonc8b97812008-10-29 14:49:59 -04001984{
Goldwyn Rodrigues99780592019-06-21 10:02:54 -05001985 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
Nikolay Borisov917aace2018-10-26 14:43:20 +03001986 u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -04001987 u64 delalloc_start;
1988 u64 delalloc_end;
Lu Fengqi3522e902018-11-29 11:33:38 +08001989 bool found;
Chris Mason9655d292009-09-02 15:22:30 -04001990 struct extent_state *cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001991 int ret;
1992 int loops = 0;
1993
1994again:
1995 /* step one, find a bunch of delalloc bytes starting at start */
1996 delalloc_start = *start;
1997 delalloc_end = 0;
Josef Bacik083e75e2019-09-23 10:05:20 -04001998 found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1999 max_bytes, &cached_state);
Chris Mason70b99e62008-10-31 12:46:39 -04002000 if (!found || delalloc_end <= *start) {
Chris Masonc8b97812008-10-29 14:49:59 -04002001 *start = delalloc_start;
2002 *end = delalloc_end;
Josef Bacikc2a128d2010-02-02 21:19:11 +00002003 free_extent_state(cached_state);
Lu Fengqi3522e902018-11-29 11:33:38 +08002004 return false;
Chris Masonc8b97812008-10-29 14:49:59 -04002005 }
2006
2007 /*
Chris Mason70b99e62008-10-31 12:46:39 -04002008 * start comes from the offset of locked_page. We have to lock
2009 * pages in order, so we can't process delalloc bytes before
2010 * locked_page
2011 */
Chris Masond3977122009-01-05 21:25:51 -05002012 if (delalloc_start < *start)
Chris Mason70b99e62008-10-31 12:46:39 -04002013 delalloc_start = *start;
Chris Mason70b99e62008-10-31 12:46:39 -04002014
2015 /*
Chris Masonc8b97812008-10-29 14:49:59 -04002016 * make sure to limit the number of pages we try to lock down
Chris Masonc8b97812008-10-29 14:49:59 -04002017 */
Josef Bacik7bf811a52013-10-07 22:11:09 -04002018 if (delalloc_end + 1 - delalloc_start > max_bytes)
2019 delalloc_end = delalloc_start + max_bytes - 1;
Chris Masond3977122009-01-05 21:25:51 -05002020
Chris Masonc8b97812008-10-29 14:49:59 -04002021 /* step two, lock all the pages after the page that has start */
2022 ret = lock_delalloc_pages(inode, locked_page,
2023 delalloc_start, delalloc_end);
Nikolay Borisov9bfd61d2018-10-26 14:43:21 +03002024 ASSERT(!ret || ret == -EAGAIN);
Chris Masonc8b97812008-10-29 14:49:59 -04002025 if (ret == -EAGAIN) {
2026 /* some of the pages are gone, lets avoid looping by
2027 * shortening the size of the delalloc range we're searching
2028 */
Chris Mason9655d292009-09-02 15:22:30 -04002029 free_extent_state(cached_state);
Chris Mason7d788742014-05-21 05:49:54 -07002030 cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04002031 if (!loops) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002032 max_bytes = PAGE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -04002033 loops = 1;
2034 goto again;
2035 } else {
Lu Fengqi3522e902018-11-29 11:33:38 +08002036 found = false;
Chris Masonc8b97812008-10-29 14:49:59 -04002037 goto out_failed;
2038 }
2039 }
Chris Masonc8b97812008-10-29 14:49:59 -04002040
2041 /* step three, lock the state bits for the whole range */
David Sterbaff13db42015-12-03 14:30:40 +01002042 lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04002043
2044 /* then test to make sure it is all still delalloc */
2045 ret = test_range_bit(tree, delalloc_start, delalloc_end,
Chris Mason9655d292009-09-02 15:22:30 -04002046 EXTENT_DELALLOC, 1, cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04002047 if (!ret) {
Chris Mason9655d292009-09-02 15:22:30 -04002048 unlock_extent_cached(tree, delalloc_start, delalloc_end,
David Sterbae43bbe52017-12-12 21:43:52 +01002049 &cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04002050 __unlock_for_delalloc(inode, locked_page,
2051 delalloc_start, delalloc_end);
2052 cond_resched();
2053 goto again;
2054 }
Chris Mason9655d292009-09-02 15:22:30 -04002055 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04002056 *start = delalloc_start;
2057 *end = delalloc_end;
2058out_failed:
2059 return found;
2060}
2061
Nikolay Borisovad7ff172020-06-03 08:55:06 +03002062void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
Nikolay Borisov74e91942019-07-17 16:18:16 +03002063 struct page *locked_page,
Qu Wenruof97e27e2020-11-13 20:51:40 +08002064 u32 clear_bits, unsigned long page_ops)
Liu Bo873695b2017-02-02 17:49:22 -08002065{
Nikolay Borisovad7ff172020-06-03 08:55:06 +03002066 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
Liu Bo873695b2017-02-02 17:49:22 -08002067
Nikolay Borisovad7ff172020-06-03 08:55:06 +03002068 __process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
Qu Wenruo98af9ab2021-05-31 16:50:37 +08002069 start, end, page_ops, NULL);
Liu Bo873695b2017-02-02 17:49:22 -08002070}
2071
Chris Masond352ac62008-09-29 15:18:18 -04002072/*
2073 * count the number of bytes in the tree that have a given bit(s)
2074 * set. This can be fairly slow, except for EXTENT_DIRTY which is
2075 * cached. The total number found is returned.
2076 */
Chris Masond1310b22008-01-24 16:13:08 -05002077u64 count_range_bits(struct extent_io_tree *tree,
2078 u64 *start, u64 search_end, u64 max_bytes,
Qu Wenruof97e27e2020-11-13 20:51:40 +08002079 u32 bits, int contig)
Chris Masond1310b22008-01-24 16:13:08 -05002080{
2081 struct rb_node *node;
2082 struct extent_state *state;
2083 u64 cur_start = *start;
2084 u64 total_bytes = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05002085 u64 last = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002086 int found = 0;
2087
Dulshani Gunawardhanafae7f212013-10-31 10:30:08 +05302088 if (WARN_ON(search_end <= cur_start))
Chris Masond1310b22008-01-24 16:13:08 -05002089 return 0;
Chris Masond1310b22008-01-24 16:13:08 -05002090
Chris Masoncad321a2008-12-17 14:51:42 -05002091 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002092 if (cur_start == 0 && bits == EXTENT_DIRTY) {
2093 total_bytes = tree->dirty_bytes;
2094 goto out;
2095 }
2096 /*
2097 * this search will find all the extents that end after
2098 * our range starts.
2099 */
Chris Mason80ea96b2008-02-01 14:51:59 -05002100 node = tree_search(tree, cur_start);
Chris Masond3977122009-01-05 21:25:51 -05002101 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05002102 goto out;
Chris Masond1310b22008-01-24 16:13:08 -05002103
Chris Masond3977122009-01-05 21:25:51 -05002104 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05002105 state = rb_entry(node, struct extent_state, rb_node);
2106 if (state->start > search_end)
2107 break;
Chris Masonec29ed52011-02-23 16:23:20 -05002108 if (contig && found && state->start > last + 1)
2109 break;
2110 if (state->end >= cur_start && (state->state & bits) == bits) {
Chris Masond1310b22008-01-24 16:13:08 -05002111 total_bytes += min(search_end, state->end) + 1 -
2112 max(cur_start, state->start);
2113 if (total_bytes >= max_bytes)
2114 break;
2115 if (!found) {
Josef Bacikaf60bed2011-05-04 11:11:17 -04002116 *start = max(cur_start, state->start);
Chris Masond1310b22008-01-24 16:13:08 -05002117 found = 1;
2118 }
Chris Masonec29ed52011-02-23 16:23:20 -05002119 last = state->end;
2120 } else if (contig && found) {
2121 break;
Chris Masond1310b22008-01-24 16:13:08 -05002122 }
2123 node = rb_next(node);
2124 if (!node)
2125 break;
2126 }
2127out:
Chris Masoncad321a2008-12-17 14:51:42 -05002128 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002129 return total_bytes;
2130}
Christoph Hellwigb2950862008-12-02 09:54:17 -05002131
Chris Masond352ac62008-09-29 15:18:18 -04002132/*
2133 * set the private field for a given byte offset in the tree. If there isn't
2134 * an extent_state there already, this does nothing.
2135 */
Josef Bacikb3f167a2019-09-23 10:05:21 -04002136int set_state_failrec(struct extent_io_tree *tree, u64 start,
2137 struct io_failure_record *failrec)
Chris Masond1310b22008-01-24 16:13:08 -05002138{
2139 struct rb_node *node;
2140 struct extent_state *state;
2141 int ret = 0;
2142
Chris Masoncad321a2008-12-17 14:51:42 -05002143 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002144 /*
2145 * this search will find all the extents that end after
2146 * our range starts.
2147 */
Chris Mason80ea96b2008-02-01 14:51:59 -05002148 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04002149 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05002150 ret = -ENOENT;
2151 goto out;
2152 }
2153 state = rb_entry(node, struct extent_state, rb_node);
2154 if (state->start != start) {
2155 ret = -ENOENT;
2156 goto out;
2157 }
David Sterba47dc1962016-02-11 13:24:13 +01002158 state->failrec = failrec;
Chris Masond1310b22008-01-24 16:13:08 -05002159out:
Chris Masoncad321a2008-12-17 14:51:42 -05002160 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002161 return ret;
2162}
2163
Nikolay Borisov2279a272020-07-02 15:23:28 +03002164struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start)
Chris Masond1310b22008-01-24 16:13:08 -05002165{
2166 struct rb_node *node;
2167 struct extent_state *state;
Nikolay Borisov2279a272020-07-02 15:23:28 +03002168 struct io_failure_record *failrec;
Chris Masond1310b22008-01-24 16:13:08 -05002169
Chris Masoncad321a2008-12-17 14:51:42 -05002170 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002171 /*
2172 * this search will find all the extents that end after
2173 * our range starts.
2174 */
Chris Mason80ea96b2008-02-01 14:51:59 -05002175 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04002176 if (!node) {
Nikolay Borisov2279a272020-07-02 15:23:28 +03002177 failrec = ERR_PTR(-ENOENT);
Chris Masond1310b22008-01-24 16:13:08 -05002178 goto out;
2179 }
2180 state = rb_entry(node, struct extent_state, rb_node);
2181 if (state->start != start) {
Nikolay Borisov2279a272020-07-02 15:23:28 +03002182 failrec = ERR_PTR(-ENOENT);
Chris Masond1310b22008-01-24 16:13:08 -05002183 goto out;
2184 }
Nikolay Borisov2279a272020-07-02 15:23:28 +03002185
2186 failrec = state->failrec;
Chris Masond1310b22008-01-24 16:13:08 -05002187out:
Chris Masoncad321a2008-12-17 14:51:42 -05002188 spin_unlock(&tree->lock);
Nikolay Borisov2279a272020-07-02 15:23:28 +03002189 return failrec;
Chris Masond1310b22008-01-24 16:13:08 -05002190}
2191
2192/*
2193 * searches a range in the state tree for a given mask.
Chris Mason70dec802008-01-29 09:59:12 -05002194 * If 'filled' == 1, this returns 1 only if every extent in the tree
Chris Masond1310b22008-01-24 16:13:08 -05002195 * has the bits set. Otherwise, 1 is returned if any bit in the
2196 * range is found set.
2197 */
2198int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
Qu Wenruof97e27e2020-11-13 20:51:40 +08002199 u32 bits, int filled, struct extent_state *cached)
Chris Masond1310b22008-01-24 16:13:08 -05002200{
2201 struct extent_state *state = NULL;
2202 struct rb_node *node;
2203 int bitset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002204
Chris Masoncad321a2008-12-17 14:51:42 -05002205 spin_lock(&tree->lock);
Filipe Manana27a35072014-07-06 20:09:59 +01002206 if (cached && extent_state_in_tree(cached) && cached->start <= start &&
Josef Bacikdf98b6e2011-06-20 14:53:48 -04002207 cached->end > start)
Chris Mason9655d292009-09-02 15:22:30 -04002208 node = &cached->rb_node;
2209 else
2210 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -05002211 while (node && start <= end) {
2212 state = rb_entry(node, struct extent_state, rb_node);
2213
2214 if (filled && state->start > start) {
2215 bitset = 0;
2216 break;
2217 }
2218
2219 if (state->start > end)
2220 break;
2221
2222 if (state->state & bits) {
2223 bitset = 1;
2224 if (!filled)
2225 break;
2226 } else if (filled) {
2227 bitset = 0;
2228 break;
2229 }
Chris Mason46562ce2009-09-23 20:23:16 -04002230
2231 if (state->end == (u64)-1)
2232 break;
2233
Chris Masond1310b22008-01-24 16:13:08 -05002234 start = state->end + 1;
2235 if (start > end)
2236 break;
2237 node = rb_next(node);
2238 if (!node) {
2239 if (filled)
2240 bitset = 0;
2241 break;
2242 }
2243 }
Chris Masoncad321a2008-12-17 14:51:42 -05002244 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05002245 return bitset;
2246}
Chris Masond1310b22008-01-24 16:13:08 -05002247
2248/*
2249 * helper function to set a given page up to date if all the
2250 * extents in the tree for that page are up to date
2251 */
Jeff Mahoney143bede2012-03-01 14:56:26 +01002252static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
Chris Masond1310b22008-01-24 16:13:08 -05002253{
Miao Xie4eee4fa2012-12-21 09:17:45 +00002254 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002255 u64 end = start + PAGE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04002256 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05002257 SetPageUptodate(page);
Chris Masond1310b22008-01-24 16:13:08 -05002258}
2259
Josef Bacik7870d082017-05-05 11:57:15 -04002260int free_io_failure(struct extent_io_tree *failure_tree,
2261 struct extent_io_tree *io_tree,
2262 struct io_failure_record *rec)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002263{
2264 int ret;
2265 int err = 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002266
David Sterba47dc1962016-02-11 13:24:13 +01002267 set_state_failrec(failure_tree, rec->start, NULL);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002268 ret = clear_extent_bits(failure_tree, rec->start,
2269 rec->start + rec->len - 1,
David Sterba91166212016-04-26 23:54:39 +02002270 EXTENT_LOCKED | EXTENT_DIRTY);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002271 if (ret)
2272 err = ret;
2273
Josef Bacik7870d082017-05-05 11:57:15 -04002274 ret = clear_extent_bits(io_tree, rec->start,
David Woodhouse53b381b2013-01-29 18:40:14 -05002275 rec->start + rec->len - 1,
David Sterba91166212016-04-26 23:54:39 +02002276 EXTENT_DAMAGED);
David Woodhouse53b381b2013-01-29 18:40:14 -05002277 if (ret && !err)
2278 err = ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002279
2280 kfree(rec);
2281 return err;
2282}
2283
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002284/*
2285 * this bypasses the standard btrfs submit functions deliberately, as
2286 * the standard behavior is to write all copies in a raid setup. here we only
2287 * want to write the one bad copy. so we do the mapping for ourselves and issue
2288 * submit_bio directly.
Stefan Behrens3ec706c2012-11-05 15:46:42 +01002289 * to avoid any synchronization issues, wait for the data after writing, which
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002290 * actually prevents the read that triggered the error from finishing.
2291 * currently, there can be no more than two copies of every data bit. thus,
2292 * exactly one rewrite is required.
2293 */
Josef Bacik6ec656b2017-05-05 11:57:14 -04002294int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2295 u64 length, u64 logical, struct page *page,
2296 unsigned int pg_offset, int mirror_num)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002297{
2298 struct bio *bio;
2299 struct btrfs_device *dev;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002300 u64 map_length = 0;
2301 u64 sector;
2302 struct btrfs_bio *bbio = NULL;
2303 int ret;
2304
Linus Torvalds1751e8a2017-11-27 13:05:09 -08002305 ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002306 BUG_ON(!mirror_num);
2307
Naohiro Aotaf7ef5282021-02-04 19:22:16 +09002308 if (btrfs_is_zoned(fs_info))
2309 return btrfs_repair_one_zone(fs_info, logical);
2310
David Sterbac5e4c3d2017-06-12 17:29:41 +02002311 bio = btrfs_io_bio_alloc(1);
Kent Overstreet4f024f32013-10-11 15:44:27 -07002312 bio->bi_iter.bi_size = 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002313 map_length = length;
2314
Filipe Mananab5de8d02016-05-27 22:21:27 +01002315 /*
2316 * Avoid races with device replace and make sure our bbio has devices
2317 * associated to its stripes that don't go away while we are doing the
2318 * read repair operation.
2319 */
2320 btrfs_bio_counter_inc_blocked(fs_info);
Nikolay Borisove4ff5fb2017-07-19 10:48:42 +03002321 if (btrfs_is_parity_mirror(fs_info, logical, length)) {
Liu Boc7253282017-03-29 10:53:58 -07002322 /*
2323 * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2324 * to update all raid stripes, but here we just want to correct
2325 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2326 * stripe's dev and sector.
2327 */
2328 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2329 &map_length, &bbio, 0);
2330 if (ret) {
2331 btrfs_bio_counter_dec(fs_info);
2332 bio_put(bio);
2333 return -EIO;
2334 }
2335 ASSERT(bbio->mirror_num == 1);
2336 } else {
2337 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2338 &map_length, &bbio, mirror_num);
2339 if (ret) {
2340 btrfs_bio_counter_dec(fs_info);
2341 bio_put(bio);
2342 return -EIO;
2343 }
2344 BUG_ON(mirror_num != bbio->mirror_num);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002345 }
Liu Boc7253282017-03-29 10:53:58 -07002346
2347 sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002348 bio->bi_iter.bi_sector = sector;
Liu Boc7253282017-03-29 10:53:58 -07002349 dev = bbio->stripes[bbio->mirror_num - 1].dev;
Zhao Lei6e9606d2015-01-20 15:11:34 +08002350 btrfs_put_bbio(bbio);
Anand Jainebbede42017-12-04 12:54:52 +08002351 if (!dev || !dev->bdev ||
2352 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
Filipe Mananab5de8d02016-05-27 22:21:27 +01002353 btrfs_bio_counter_dec(fs_info);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002354 bio_put(bio);
2355 return -EIO;
2356 }
Christoph Hellwig74d46992017-08-23 19:10:32 +02002357 bio_set_dev(bio, dev->bdev);
Christoph Hellwig70fd7612016-11-01 07:40:10 -06002358 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
Miao Xieffdd2012014-09-12 18:44:00 +08002359 bio_add_page(bio, page, length, pg_offset);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002360
Mike Christie4e49ea42016-06-05 14:31:41 -05002361 if (btrfsic_submit_bio_wait(bio)) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002362 /* try to remap that extent elsewhere? */
Filipe Mananab5de8d02016-05-27 22:21:27 +01002363 btrfs_bio_counter_dec(fs_info);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002364 bio_put(bio);
Stefan Behrens442a4f62012-05-25 16:06:08 +02002365 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002366 return -EIO;
2367 }
2368
David Sterbab14af3b2015-10-08 10:43:10 +02002369 btrfs_info_rl_in_rcu(fs_info,
2370 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
Josef Bacik6ec656b2017-05-05 11:57:14 -04002371 ino, start,
Miao Xie1203b682014-09-12 18:44:01 +08002372 rcu_str_deref(dev->name), sector);
Filipe Mananab5de8d02016-05-27 22:21:27 +01002373 btrfs_bio_counter_dec(fs_info);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002374 bio_put(bio);
2375 return 0;
2376}
2377
David Sterba2b489662020-04-29 03:04:10 +02002378int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num)
Josef Bacikea466792012-03-26 21:57:36 -04002379{
David Sterba20a1fbf92019-03-20 11:23:44 +01002380 struct btrfs_fs_info *fs_info = eb->fs_info;
Josef Bacikea466792012-03-26 21:57:36 -04002381 u64 start = eb->start;
David Sterbacc5e31a2018-03-01 18:20:27 +01002382 int i, num_pages = num_extent_pages(eb);
Chris Masond95603b2012-04-12 15:55:15 -04002383 int ret = 0;
Josef Bacikea466792012-03-26 21:57:36 -04002384
David Howellsbc98a422017-07-17 08:45:34 +01002385 if (sb_rdonly(fs_info->sb))
Ilya Dryomov908960c2013-11-03 19:06:39 +02002386 return -EROFS;
2387
Josef Bacikea466792012-03-26 21:57:36 -04002388 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02002389 struct page *p = eb->pages[i];
Miao Xie1203b682014-09-12 18:44:01 +08002390
Josef Bacik6ec656b2017-05-05 11:57:14 -04002391 ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
Miao Xie1203b682014-09-12 18:44:01 +08002392 start - page_offset(p), mirror_num);
Josef Bacikea466792012-03-26 21:57:36 -04002393 if (ret)
2394 break;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002395 start += PAGE_SIZE;
Josef Bacikea466792012-03-26 21:57:36 -04002396 }
2397
2398 return ret;
2399}
2400
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002401/*
2402 * each time an IO finishes, we do a fast check in the IO failure tree
2403 * to see if we need to process or clean up an io_failure_record
2404 */
Josef Bacik7870d082017-05-05 11:57:15 -04002405int clean_io_failure(struct btrfs_fs_info *fs_info,
2406 struct extent_io_tree *failure_tree,
2407 struct extent_io_tree *io_tree, u64 start,
2408 struct page *page, u64 ino, unsigned int pg_offset)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002409{
2410 u64 private;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002411 struct io_failure_record *failrec;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002412 struct extent_state *state;
2413 int num_copies;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002414 int ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002415
2416 private = 0;
Josef Bacik7870d082017-05-05 11:57:15 -04002417 ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2418 EXTENT_DIRTY, 0);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002419 if (!ret)
2420 return 0;
2421
Nikolay Borisov2279a272020-07-02 15:23:28 +03002422 failrec = get_state_failrec(failure_tree, start);
2423 if (IS_ERR(failrec))
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002424 return 0;
2425
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002426 BUG_ON(!failrec->this_mirror);
2427
David Howellsbc98a422017-07-17 08:45:34 +01002428 if (sb_rdonly(fs_info->sb))
Ilya Dryomov908960c2013-11-03 19:06:39 +02002429 goto out;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002430
Josef Bacik7870d082017-05-05 11:57:15 -04002431 spin_lock(&io_tree->lock);
2432 state = find_first_extent_bit_state(io_tree,
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002433 failrec->start,
2434 EXTENT_LOCKED);
Josef Bacik7870d082017-05-05 11:57:15 -04002435 spin_unlock(&io_tree->lock);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002436
Miao Xie883d0de2013-07-25 19:22:35 +08002437 if (state && state->start <= failrec->start &&
2438 state->end >= failrec->start + failrec->len - 1) {
Stefan Behrens3ec706c2012-11-05 15:46:42 +01002439 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2440 failrec->len);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002441 if (num_copies > 1) {
Josef Bacik7870d082017-05-05 11:57:15 -04002442 repair_io_failure(fs_info, ino, start, failrec->len,
2443 failrec->logical, page, pg_offset,
2444 failrec->failed_mirror);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002445 }
2446 }
2447
2448out:
Josef Bacik7870d082017-05-05 11:57:15 -04002449 free_io_failure(failure_tree, io_tree, failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002450
Miao Xie454ff3d2014-09-12 18:43:58 +08002451 return 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002452}
2453
Miao Xief6124962014-09-12 18:44:04 +08002454/*
2455 * Can be called when
2456 * - hold extent lock
2457 * - under ordered extent
2458 * - the inode is freeing
2459 */
Nikolay Borisov7ab79562017-02-20 13:50:57 +02002460void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
Miao Xief6124962014-09-12 18:44:04 +08002461{
Nikolay Borisov7ab79562017-02-20 13:50:57 +02002462 struct extent_io_tree *failure_tree = &inode->io_failure_tree;
Miao Xief6124962014-09-12 18:44:04 +08002463 struct io_failure_record *failrec;
2464 struct extent_state *state, *next;
2465
2466 if (RB_EMPTY_ROOT(&failure_tree->state))
2467 return;
2468
2469 spin_lock(&failure_tree->lock);
2470 state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2471 while (state) {
2472 if (state->start > end)
2473 break;
2474
2475 ASSERT(state->end <= end);
2476
2477 next = next_state(state);
2478
David Sterba47dc1962016-02-11 13:24:13 +01002479 failrec = state->failrec;
Miao Xief6124962014-09-12 18:44:04 +08002480 free_extent_state(state);
2481 kfree(failrec);
2482
2483 state = next;
2484 }
2485 spin_unlock(&failure_tree->lock);
2486}
2487
Nikolay Borisov35263022020-07-02 15:23:29 +03002488static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode,
Qu Wenruo150e4b02021-05-03 10:08:55 +08002489 u64 start)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002490{
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002491 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie2fe63032014-09-12 18:43:59 +08002492 struct io_failure_record *failrec;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002493 struct extent_map *em;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002494 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2495 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2496 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
Qu Wenruo150e4b02021-05-03 10:08:55 +08002497 const u32 sectorsize = fs_info->sectorsize;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002498 int ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002499 u64 logical;
2500
Nikolay Borisov2279a272020-07-02 15:23:28 +03002501 failrec = get_state_failrec(failure_tree, start);
Nikolay Borisov35263022020-07-02 15:23:29 +03002502 if (!IS_ERR(failrec)) {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002503 btrfs_debug(fs_info,
Qu Wenruo12458352021-05-03 10:08:56 +08002504 "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu",
2505 failrec->logical, failrec->start, failrec->len);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002506 /*
2507 * when data can be on disk more than twice, add to failrec here
2508 * (e.g. with a list for failed_mirror) to make
2509 * clean_io_failure() clean all those errors at once.
2510 */
Nikolay Borisov35263022020-07-02 15:23:29 +03002511
2512 return failrec;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002513 }
Miao Xie2fe63032014-09-12 18:43:59 +08002514
Nikolay Borisov35263022020-07-02 15:23:29 +03002515 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2516 if (!failrec)
2517 return ERR_PTR(-ENOMEM);
Miao Xie2fe63032014-09-12 18:43:59 +08002518
Nikolay Borisov35263022020-07-02 15:23:29 +03002519 failrec->start = start;
Qu Wenruo150e4b02021-05-03 10:08:55 +08002520 failrec->len = sectorsize;
Nikolay Borisov35263022020-07-02 15:23:29 +03002521 failrec->this_mirror = 0;
2522 failrec->bio_flags = 0;
Nikolay Borisov35263022020-07-02 15:23:29 +03002523
2524 read_lock(&em_tree->lock);
2525 em = lookup_extent_mapping(em_tree, start, failrec->len);
2526 if (!em) {
2527 read_unlock(&em_tree->lock);
2528 kfree(failrec);
2529 return ERR_PTR(-EIO);
2530 }
2531
2532 if (em->start > start || em->start + em->len <= start) {
2533 free_extent_map(em);
2534 em = NULL;
2535 }
2536 read_unlock(&em_tree->lock);
2537 if (!em) {
2538 kfree(failrec);
2539 return ERR_PTR(-EIO);
2540 }
2541
2542 logical = start - em->start;
2543 logical = em->block_start + logical;
2544 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2545 logical = em->block_start;
2546 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2547 extent_set_compress_type(&failrec->bio_flags, em->compress_type);
2548 }
2549
2550 btrfs_debug(fs_info,
2551 "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2552 logical, start, failrec->len);
2553
2554 failrec->logical = logical;
2555 free_extent_map(em);
2556
2557 /* Set the bits in the private failure tree */
Qu Wenruo150e4b02021-05-03 10:08:55 +08002558 ret = set_extent_bits(failure_tree, start, start + sectorsize - 1,
Nikolay Borisov35263022020-07-02 15:23:29 +03002559 EXTENT_LOCKED | EXTENT_DIRTY);
2560 if (ret >= 0) {
2561 ret = set_state_failrec(failure_tree, start, failrec);
2562 /* Set the bits in the inode's tree */
Qu Wenruo150e4b02021-05-03 10:08:55 +08002563 ret = set_extent_bits(tree, start, start + sectorsize - 1,
2564 EXTENT_DAMAGED);
Nikolay Borisov35263022020-07-02 15:23:29 +03002565 } else if (ret < 0) {
2566 kfree(failrec);
2567 return ERR_PTR(ret);
2568 }
2569
2570 return failrec;
Miao Xie2fe63032014-09-12 18:43:59 +08002571}
2572
Qu Wenruo12458352021-05-03 10:08:56 +08002573static bool btrfs_check_repairable(struct inode *inode,
Omar Sandovalce06d3e2020-04-16 14:46:18 -07002574 struct io_failure_record *failrec,
2575 int failed_mirror)
Miao Xie2fe63032014-09-12 18:43:59 +08002576{
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002577 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie2fe63032014-09-12 18:43:59 +08002578 int num_copies;
2579
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002580 num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002581 if (num_copies == 1) {
2582 /*
2583 * we only have a single copy of the data, so don't bother with
2584 * all the retry and error correction code that follows. no
2585 * matter what the error is, it is very likely to persist.
2586 */
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002587 btrfs_debug(fs_info,
2588 "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2589 num_copies, failrec->this_mirror, failed_mirror);
Liu Boc3cfb652017-07-13 15:00:50 -07002590 return false;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002591 }
2592
Qu Wenruo12458352021-05-03 10:08:56 +08002593 /* The failure record should only contain one sector */
2594 ASSERT(failrec->len == fs_info->sectorsize);
2595
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002596 /*
Qu Wenruo12458352021-05-03 10:08:56 +08002597 * There are two premises:
2598 * a) deliver good data to the caller
2599 * b) correct the bad sectors on disk
2600 *
2601 * Since we're only doing repair for one sector, we only need to get
2602 * a good copy of the failed sector and if we succeed, we have setup
2603 * everything for repair_io_failure to do the rest for us.
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002604 */
Qu Wenruo12458352021-05-03 10:08:56 +08002605 failrec->failed_mirror = failed_mirror;
2606 failrec->this_mirror++;
2607 if (failrec->this_mirror == failed_mirror)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002608 failrec->this_mirror++;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002609
Miao Xiefacc8a222013-07-25 19:22:34 +08002610 if (failrec->this_mirror > num_copies) {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002611 btrfs_debug(fs_info,
2612 "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2613 num_copies, failrec->this_mirror, failed_mirror);
Liu Boc3cfb652017-07-13 15:00:50 -07002614 return false;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002615 }
2616
Liu Boc3cfb652017-07-13 15:00:50 -07002617 return true;
Miao Xie2fe63032014-09-12 18:43:59 +08002618}
2619
Qu Wenruo150e4b02021-05-03 10:08:55 +08002620int btrfs_repair_one_sector(struct inode *inode,
2621 struct bio *failed_bio, u32 bio_offset,
2622 struct page *page, unsigned int pgoff,
2623 u64 start, int failed_mirror,
2624 submit_bio_hook_t *submit_bio_hook)
Miao Xie2fe63032014-09-12 18:43:59 +08002625{
2626 struct io_failure_record *failrec;
Omar Sandoval77d5d682020-04-16 14:46:25 -07002627 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie2fe63032014-09-12 18:43:59 +08002628 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
Josef Bacik7870d082017-05-05 11:57:15 -04002629 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
Omar Sandoval77d5d682020-04-16 14:46:25 -07002630 struct btrfs_io_bio *failed_io_bio = btrfs_io_bio(failed_bio);
Qu Wenruo7ffd27e2020-12-02 14:47:58 +08002631 const int icsum = bio_offset >> fs_info->sectorsize_bits;
Omar Sandoval77d5d682020-04-16 14:46:25 -07002632 struct bio *repair_bio;
2633 struct btrfs_io_bio *repair_io_bio;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002634 blk_status_t status;
Miao Xie2fe63032014-09-12 18:43:59 +08002635
Omar Sandoval77d5d682020-04-16 14:46:25 -07002636 btrfs_debug(fs_info,
2637 "repair read error: read error at %llu", start);
Miao Xie2fe63032014-09-12 18:43:59 +08002638
Mike Christie1f7ad752016-06-05 14:31:51 -05002639 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
Miao Xie2fe63032014-09-12 18:43:59 +08002640
Qu Wenruo150e4b02021-05-03 10:08:55 +08002641 failrec = btrfs_get_io_failure_record(inode, start);
Nikolay Borisov35263022020-07-02 15:23:29 +03002642 if (IS_ERR(failrec))
Qu Wenruo150e4b02021-05-03 10:08:55 +08002643 return PTR_ERR(failrec);
Miao Xie2fe63032014-09-12 18:43:59 +08002644
Qu Wenruo12458352021-05-03 10:08:56 +08002645
2646 if (!btrfs_check_repairable(inode, failrec, failed_mirror)) {
Josef Bacik7870d082017-05-05 11:57:15 -04002647 free_io_failure(failure_tree, tree, failrec);
Qu Wenruo150e4b02021-05-03 10:08:55 +08002648 return -EIO;
Miao Xie2fe63032014-09-12 18:43:59 +08002649 }
2650
Omar Sandoval77d5d682020-04-16 14:46:25 -07002651 repair_bio = btrfs_io_bio_alloc(1);
2652 repair_io_bio = btrfs_io_bio(repair_bio);
2653 repair_bio->bi_opf = REQ_OP_READ;
Omar Sandoval77d5d682020-04-16 14:46:25 -07002654 repair_bio->bi_end_io = failed_bio->bi_end_io;
2655 repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
2656 repair_bio->bi_private = failed_bio->bi_private;
Miao Xie2fe63032014-09-12 18:43:59 +08002657
Omar Sandoval77d5d682020-04-16 14:46:25 -07002658 if (failed_io_bio->csum) {
David Sterba223486c2020-07-02 11:27:30 +02002659 const u32 csum_size = fs_info->csum_size;
Omar Sandoval77d5d682020-04-16 14:46:25 -07002660
2661 repair_io_bio->csum = repair_io_bio->csum_inline;
2662 memcpy(repair_io_bio->csum,
2663 failed_io_bio->csum + csum_size * icsum, csum_size);
2664 }
2665
2666 bio_add_page(repair_bio, page, failrec->len, pgoff);
2667 repair_io_bio->logical = failrec->start;
2668 repair_io_bio->iter = repair_bio->bi_iter;
Miao Xie2fe63032014-09-12 18:43:59 +08002669
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002670 btrfs_debug(btrfs_sb(inode->i_sb),
Qu Wenruo12458352021-05-03 10:08:56 +08002671 "repair read error: submitting new read to mirror %d",
2672 failrec->this_mirror);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002673
Omar Sandoval77d5d682020-04-16 14:46:25 -07002674 status = submit_bio_hook(inode, repair_bio, failrec->this_mirror,
2675 failrec->bio_flags);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002676 if (status) {
Josef Bacik7870d082017-05-05 11:57:15 -04002677 free_io_failure(failure_tree, tree, failrec);
Omar Sandoval77d5d682020-04-16 14:46:25 -07002678 bio_put(repair_bio);
Miao Xie6c387ab2014-09-12 18:43:57 +08002679 }
Qu Wenruo150e4b02021-05-03 10:08:55 +08002680 return blk_status_to_errno(status);
2681}
2682
2683static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
2684{
2685 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
2686
2687 ASSERT(page_offset(page) <= start &&
2688 start + len <= page_offset(page) + PAGE_SIZE);
2689
Qu Wenruo150e4b02021-05-03 10:08:55 +08002690 if (uptodate) {
2691 btrfs_page_set_uptodate(fs_info, page, start, len);
2692 } else {
2693 btrfs_page_clear_uptodate(fs_info, page, start, len);
2694 btrfs_page_set_error(fs_info, page, start, len);
2695 }
2696
2697 if (fs_info->sectorsize == PAGE_SIZE)
2698 unlock_page(page);
Qu Wenruo3d078ef2021-06-07 17:02:58 +08002699 else
Qu Wenruo150e4b02021-05-03 10:08:55 +08002700 btrfs_subpage_end_reader(fs_info, page, start, len);
2701}
2702
2703static blk_status_t submit_read_repair(struct inode *inode,
2704 struct bio *failed_bio, u32 bio_offset,
2705 struct page *page, unsigned int pgoff,
2706 u64 start, u64 end, int failed_mirror,
2707 unsigned int error_bitmap,
2708 submit_bio_hook_t *submit_bio_hook)
2709{
2710 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2711 const u32 sectorsize = fs_info->sectorsize;
2712 const int nr_bits = (end + 1 - start) >> fs_info->sectorsize_bits;
2713 int error = 0;
2714 int i;
2715
2716 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2717
2718 /* We're here because we had some read errors or csum mismatch */
2719 ASSERT(error_bitmap);
2720
2721 /*
2722 * We only get called on buffered IO, thus page must be mapped and bio
2723 * must not be cloned.
2724 */
2725 ASSERT(page->mapping && !bio_flagged(failed_bio, BIO_CLONED));
2726
2727 /* Iterate through all the sectors in the range */
2728 for (i = 0; i < nr_bits; i++) {
2729 const unsigned int offset = i * sectorsize;
2730 struct extent_state *cached = NULL;
2731 bool uptodate = false;
2732 int ret;
2733
2734 if (!(error_bitmap & (1U << i))) {
2735 /*
2736 * This sector has no error, just end the page read
2737 * and unlock the range.
2738 */
2739 uptodate = true;
2740 goto next;
2741 }
2742
2743 ret = btrfs_repair_one_sector(inode, failed_bio,
2744 bio_offset + offset,
2745 page, pgoff + offset, start + offset,
2746 failed_mirror, submit_bio_hook);
2747 if (!ret) {
2748 /*
2749 * We have submitted the read repair, the page release
2750 * will be handled by the endio function of the
2751 * submitted repair bio.
2752 * Thus we don't need to do any thing here.
2753 */
2754 continue;
2755 }
2756 /*
2757 * Repair failed, just record the error but still continue.
2758 * Or the remaining sectors will not be properly unlocked.
2759 */
2760 if (!error)
2761 error = ret;
2762next:
2763 end_page_read(page, uptodate, start + offset, sectorsize);
2764 if (uptodate)
2765 set_extent_uptodate(&BTRFS_I(inode)->io_tree,
2766 start + offset,
2767 start + offset + sectorsize - 1,
2768 &cached, GFP_ATOMIC);
2769 unlock_extent_cached_atomic(&BTRFS_I(inode)->io_tree,
2770 start + offset,
2771 start + offset + sectorsize - 1,
2772 &cached);
2773 }
2774 return errno_to_blk_status(error);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002775}
2776
Chris Masond1310b22008-01-24 16:13:08 -05002777/* lots and lots of room for performance fixes in the end_bio funcs */
2778
David Sterbab5227c02015-12-03 13:08:59 +01002779void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
Jeff Mahoney87826df2012-02-15 16:23:57 +01002780{
Qu Wenruo38a39ac72021-04-08 20:32:27 +08002781 struct btrfs_inode *inode;
Jeff Mahoney87826df2012-02-15 16:23:57 +01002782 int uptodate = (err == 0);
Eric Sandeen3e2426b2014-06-12 00:39:58 -05002783 int ret = 0;
Jeff Mahoney87826df2012-02-15 16:23:57 +01002784
Qu Wenruo38a39ac72021-04-08 20:32:27 +08002785 ASSERT(page && page->mapping);
2786 inode = BTRFS_I(page->mapping->host);
2787 btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
Jeff Mahoney87826df2012-02-15 16:23:57 +01002788
Jeff Mahoney87826df2012-02-15 16:23:57 +01002789 if (!uptodate) {
Jeff Mahoney87826df2012-02-15 16:23:57 +01002790 ClearPageUptodate(page);
2791 SetPageError(page);
Colin Ian Kingbff5baf2017-05-09 18:14:01 +01002792 ret = err < 0 ? err : -EIO;
Liu Bo5dca6ee2014-05-12 12:47:36 +08002793 mapping_set_error(page->mapping, ret);
Jeff Mahoney87826df2012-02-15 16:23:57 +01002794 }
Jeff Mahoney87826df2012-02-15 16:23:57 +01002795}
2796
Chris Masond1310b22008-01-24 16:13:08 -05002797/*
2798 * after a writepage IO is done, we need to:
2799 * clear the uptodate bits on error
2800 * clear the writeback bits in the extent tree for this IO
2801 * end_page_writeback if the page has no more pending IO
2802 *
2803 * Scheduling is not allowed, so the extent state tree is expected
2804 * to have one and only one object corresponding to this IO.
2805 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002806static void end_bio_extent_writepage(struct bio *bio)
Chris Masond1310b22008-01-24 16:13:08 -05002807{
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002808 int error = blk_status_to_errno(bio->bi_status);
Kent Overstreet2c30c712013-11-07 12:20:26 -08002809 struct bio_vec *bvec;
Chris Masond1310b22008-01-24 16:13:08 -05002810 u64 start;
2811 u64 end;
Ming Lei6dc4f102019-02-15 19:13:19 +08002812 struct bvec_iter_all iter_all;
Naohiro Aotad8e3fb12021-02-04 19:22:05 +09002813 bool first_bvec = true;
Chris Masond1310b22008-01-24 16:13:08 -05002814
David Sterbac09abff2017-07-13 18:10:07 +02002815 ASSERT(!bio_flagged(bio, BIO_CLONED));
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02002816 bio_for_each_segment_all(bvec, bio, iter_all) {
Chris Masond1310b22008-01-24 16:13:08 -05002817 struct page *page = bvec->bv_page;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002818 struct inode *inode = page->mapping->host;
2819 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Qu Wenruo321a02d2021-05-31 16:50:40 +08002820 const u32 sectorsize = fs_info->sectorsize;
David Woodhouse902b22f2008-08-20 08:51:49 -04002821
Qu Wenruo321a02d2021-05-31 16:50:40 +08002822 /* Our read/write should always be sector aligned. */
2823 if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
2824 btrfs_err(fs_info,
2825 "partial page write in btrfs with offset %u and length %u",
2826 bvec->bv_offset, bvec->bv_len);
2827 else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
2828 btrfs_info(fs_info,
2829 "incomplete page write with offset %u and length %u",
2830 bvec->bv_offset, bvec->bv_len);
Chris Masond1310b22008-01-24 16:13:08 -05002831
Qu Wenruo321a02d2021-05-31 16:50:40 +08002832 start = page_offset(page) + bvec->bv_offset;
2833 end = start + bvec->bv_len - 1;
Chris Masond1310b22008-01-24 16:13:08 -05002834
Naohiro Aotad8e3fb12021-02-04 19:22:05 +09002835 if (first_bvec) {
2836 btrfs_record_physical_zoned(inode, start, bio);
2837 first_bvec = false;
2838 }
2839
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002840 end_extent_writepage(page, error, start, end);
Qu Wenruo9047e312021-05-31 16:50:43 +08002841
2842 btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len);
Kent Overstreet2c30c712013-11-07 12:20:26 -08002843 }
Chris Mason2b1f55b2008-09-24 11:48:04 -04002844
Chris Masond1310b22008-01-24 16:13:08 -05002845 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002846}
2847
Qu Wenruo94e8c952020-11-13 20:51:28 +08002848/*
2849 * Record previously processed extent range
2850 *
2851 * For endio_readpage_release_extent() to handle a full extent range, reducing
2852 * the extent io operations.
2853 */
2854struct processed_extent {
2855 struct btrfs_inode *inode;
2856 /* Start of the range in @inode */
2857 u64 start;
Nigel Christian2e626e52021-01-24 20:41:41 -05002858 /* End of the range in @inode */
Qu Wenruo94e8c952020-11-13 20:51:28 +08002859 u64 end;
2860 bool uptodate;
2861};
2862
2863/*
2864 * Try to release processed extent range
2865 *
2866 * May not release the extent range right now if the current range is
2867 * contiguous to processed extent.
2868 *
2869 * Will release processed extent when any of @inode, @uptodate, the range is
2870 * no longer contiguous to the processed range.
2871 *
2872 * Passing @inode == NULL will force processed extent to be released.
2873 */
2874static void endio_readpage_release_extent(struct processed_extent *processed,
2875 struct btrfs_inode *inode, u64 start, u64 end,
2876 bool uptodate)
Miao Xie883d0de2013-07-25 19:22:35 +08002877{
2878 struct extent_state *cached = NULL;
Qu Wenruo94e8c952020-11-13 20:51:28 +08002879 struct extent_io_tree *tree;
Miao Xie883d0de2013-07-25 19:22:35 +08002880
Qu Wenruo94e8c952020-11-13 20:51:28 +08002881 /* The first extent, initialize @processed */
2882 if (!processed->inode)
2883 goto update;
2884
2885 /*
2886 * Contiguous to processed extent, just uptodate the end.
2887 *
2888 * Several things to notice:
2889 *
2890 * - bio can be merged as long as on-disk bytenr is contiguous
2891 * This means we can have page belonging to other inodes, thus need to
2892 * check if the inode still matches.
2893 * - bvec can contain range beyond current page for multi-page bvec
2894 * Thus we need to do processed->end + 1 >= start check
2895 */
2896 if (processed->inode == inode && processed->uptodate == uptodate &&
2897 processed->end + 1 >= start && end >= processed->end) {
2898 processed->end = end;
2899 return;
2900 }
2901
2902 tree = &processed->inode->io_tree;
2903 /*
2904 * Now we don't have range contiguous to the processed range, release
2905 * the processed range now.
2906 */
2907 if (processed->uptodate && tree->track_uptodate)
2908 set_extent_uptodate(tree, processed->start, processed->end,
2909 &cached, GFP_ATOMIC);
2910 unlock_extent_cached_atomic(tree, processed->start, processed->end,
2911 &cached);
2912
2913update:
2914 /* Update processed to current range */
2915 processed->inode = inode;
2916 processed->start = start;
2917 processed->end = end;
2918 processed->uptodate = uptodate;
Miao Xie883d0de2013-07-25 19:22:35 +08002919}
2920
Qu Wenruo92082d42021-02-02 10:28:36 +08002921static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
2922{
2923 ASSERT(PageLocked(page));
2924 if (fs_info->sectorsize == PAGE_SIZE)
2925 return;
2926
2927 ASSERT(PagePrivate(page));
2928 btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
2929}
2930
Chris Masond1310b22008-01-24 16:13:08 -05002931/*
Qu Wenruod9bb77d2021-03-15 13:39:14 +08002932 * Find extent buffer for a givne bytenr.
2933 *
2934 * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
2935 * in endio context.
2936 */
2937static struct extent_buffer *find_extent_buffer_readpage(
2938 struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
2939{
2940 struct extent_buffer *eb;
2941
2942 /*
2943 * For regular sectorsize, we can use page->private to grab extent
2944 * buffer
2945 */
2946 if (fs_info->sectorsize == PAGE_SIZE) {
2947 ASSERT(PagePrivate(page) && page->private);
2948 return (struct extent_buffer *)page->private;
2949 }
2950
2951 /* For subpage case, we need to lookup buffer radix tree */
2952 rcu_read_lock();
2953 eb = radix_tree_lookup(&fs_info->buffer_radix,
2954 bytenr >> fs_info->sectorsize_bits);
2955 rcu_read_unlock();
2956 ASSERT(eb);
2957 return eb;
2958}
2959
2960/*
Chris Masond1310b22008-01-24 16:13:08 -05002961 * after a readpage IO is done, we need to:
2962 * clear the uptodate bits on error
2963 * set the uptodate bits if things worked
2964 * set the page up to date if all extents in the tree are uptodate
2965 * clear the lock bit in the extent tree
2966 * unlock the page if there are no other extents locked for it
2967 *
2968 * Scheduling is not allowed, so the extent state tree is expected
2969 * to have one and only one object corresponding to this IO.
2970 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002971static void end_bio_extent_readpage(struct bio *bio)
Chris Masond1310b22008-01-24 16:13:08 -05002972{
Kent Overstreet2c30c712013-11-07 12:20:26 -08002973 struct bio_vec *bvec;
Miao Xiefacc8a222013-07-25 19:22:34 +08002974 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
Josef Bacik7870d082017-05-05 11:57:15 -04002975 struct extent_io_tree *tree, *failure_tree;
Qu Wenruo94e8c952020-11-13 20:51:28 +08002976 struct processed_extent processed = { 0 };
Qu Wenruo7ffd27e2020-12-02 14:47:58 +08002977 /*
2978 * The offset to the beginning of a bio, since one bio can never be
2979 * larger than UINT_MAX, u32 here is enough.
2980 */
2981 u32 bio_offset = 0;
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002982 int mirror;
Chris Masond1310b22008-01-24 16:13:08 -05002983 int ret;
Ming Lei6dc4f102019-02-15 19:13:19 +08002984 struct bvec_iter_all iter_all;
Chris Masond1310b22008-01-24 16:13:08 -05002985
David Sterbac09abff2017-07-13 18:10:07 +02002986 ASSERT(!bio_flagged(bio, BIO_CLONED));
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02002987 bio_for_each_segment_all(bvec, bio, iter_all) {
Qu Wenruo150e4b02021-05-03 10:08:55 +08002988 bool uptodate = !bio->bi_status;
Chris Masond1310b22008-01-24 16:13:08 -05002989 struct page *page = bvec->bv_page;
Josef Bacika71754f2013-06-17 17:14:39 -04002990 struct inode *inode = page->mapping->host;
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002991 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Qu Wenruo7ffd27e2020-12-02 14:47:58 +08002992 const u32 sectorsize = fs_info->sectorsize;
Qu Wenruo150e4b02021-05-03 10:08:55 +08002993 unsigned int error_bitmap = (unsigned int)-1;
Qu Wenruo7ffd27e2020-12-02 14:47:58 +08002994 u64 start;
2995 u64 end;
2996 u32 len;
Arne Jansen507903b2011-04-06 10:02:20 +00002997
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04002998 btrfs_debug(fs_info,
2999 "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
David Sterba1201b582020-11-26 15:41:27 +01003000 bio->bi_iter.bi_sector, bio->bi_status,
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04003001 io_bio->mirror_num);
Josef Bacika71754f2013-06-17 17:14:39 -04003002 tree = &BTRFS_I(inode)->io_tree;
Josef Bacik7870d082017-05-05 11:57:15 -04003003 failure_tree = &BTRFS_I(inode)->io_failure_tree;
David Woodhouse902b22f2008-08-20 08:51:49 -04003004
Qu Wenruo8b8bbd42020-10-21 14:24:58 +08003005 /*
3006 * We always issue full-sector reads, but if some block in a
3007 * page fails to read, blk_update_request() will advance
3008 * bv_offset and adjust bv_len to compensate. Print a warning
3009 * for unaligned offsets, and an error if they don't add up to
3010 * a full sector.
3011 */
3012 if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
3013 btrfs_err(fs_info,
3014 "partial page read in btrfs with offset %u and length %u",
3015 bvec->bv_offset, bvec->bv_len);
3016 else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
3017 sectorsize))
3018 btrfs_info(fs_info,
3019 "incomplete page read with offset %u and length %u",
3020 bvec->bv_offset, bvec->bv_len);
Chris Masond1310b22008-01-24 16:13:08 -05003021
Qu Wenruo8b8bbd42020-10-21 14:24:58 +08003022 start = page_offset(page) + bvec->bv_offset;
3023 end = start + bvec->bv_len - 1;
Miao Xiefacc8a222013-07-25 19:22:34 +08003024 len = bvec->bv_len;
Chris Masond1310b22008-01-24 16:13:08 -05003025
Chris Mason9be33952013-05-17 18:30:14 -04003026 mirror = io_bio->mirror_num;
Nikolay Borisov78e62c02018-11-22 10:17:49 +02003027 if (likely(uptodate)) {
Qu Wenruo150e4b02021-05-03 10:08:55 +08003028 if (is_data_inode(inode)) {
3029 error_bitmap = btrfs_verify_data_csum(io_bio,
Goldwyn Rodrigues5e295762021-03-03 06:55:37 -06003030 bio_offset, page, start, end);
Qu Wenruo150e4b02021-05-03 10:08:55 +08003031 ret = error_bitmap;
3032 } else {
Nikolay Borisov9a446d62020-09-18 16:34:33 +03003033 ret = btrfs_validate_metadata_buffer(io_bio,
Qu Wenruo8e1dc982020-11-12 16:47:57 +08003034 page, start, end, mirror);
Qu Wenruo150e4b02021-05-03 10:08:55 +08003035 }
Stefan Behrens5ee08442012-08-27 08:30:03 -06003036 if (ret)
Qu Wenruo150e4b02021-05-03 10:08:55 +08003037 uptodate = false;
Stefan Behrens5ee08442012-08-27 08:30:03 -06003038 else
Josef Bacik7870d082017-05-05 11:57:15 -04003039 clean_io_failure(BTRFS_I(inode)->root->fs_info,
3040 failure_tree, tree, start,
3041 page,
3042 btrfs_ino(BTRFS_I(inode)), 0);
Chris Masond1310b22008-01-24 16:13:08 -05003043 }
Josef Bacikea466792012-03-26 21:57:36 -04003044
Miao Xief2a09da2013-07-25 19:22:33 +08003045 if (likely(uptodate))
3046 goto readpage_ok;
3047
Nikolay Borisovbe17b3a2020-09-18 16:34:36 +03003048 if (is_data_inode(inode)) {
Liu Bo9d0d1c82017-03-24 15:04:50 -07003049 /*
Qu Wenruo150e4b02021-05-03 10:08:55 +08003050 * btrfs_submit_read_repair() will handle all the good
3051 * and bad sectors, we just continue to the next bvec.
Liu Bo9d0d1c82017-03-24 15:04:50 -07003052 */
Qu Wenruo150e4b02021-05-03 10:08:55 +08003053 submit_read_repair(inode, bio, bio_offset, page,
3054 start - page_offset(page), start,
3055 end, mirror, error_bitmap,
3056 btrfs_submit_data_bio);
3057
3058 ASSERT(bio_offset + len > bio_offset);
3059 bio_offset += len;
3060 continue;
Nikolay Borisov78e62c02018-11-22 10:17:49 +02003061 } else {
3062 struct extent_buffer *eb;
3063
Qu Wenruod9bb77d2021-03-15 13:39:14 +08003064 eb = find_extent_buffer_readpage(fs_info, page, start);
Nikolay Borisov78e62c02018-11-22 10:17:49 +02003065 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3066 eb->read_mirror = mirror;
3067 atomic_dec(&eb->io_pages);
3068 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD,
3069 &eb->bflags))
3070 btree_readahead_hook(eb, -EIO);
Chris Mason7e383262008-04-09 16:28:12 -04003071 }
Miao Xief2a09da2013-07-25 19:22:33 +08003072readpage_ok:
Miao Xie883d0de2013-07-25 19:22:35 +08003073 if (likely(uptodate)) {
Josef Bacika71754f2013-06-17 17:14:39 -04003074 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003075 pgoff_t end_index = i_size >> PAGE_SHIFT;
Josef Bacika71754f2013-06-17 17:14:39 -04003076
Qu Wenruoc28ea612021-03-01 16:44:22 +08003077 /*
3078 * Zero out the remaining part if this range straddles
3079 * i_size.
3080 *
3081 * Here we should only zero the range inside the bvec,
3082 * not touch anything else.
3083 *
3084 * NOTE: i_size is exclusive while end is inclusive.
3085 */
3086 if (page->index == end_index && i_size <= end) {
3087 u32 zero_start = max(offset_in_page(i_size),
Qu Wenruod2dcc8e2021-03-08 17:20:17 +08003088 offset_in_page(start));
Qu Wenruoc28ea612021-03-01 16:44:22 +08003089
3090 zero_user_segment(page, zero_start,
3091 offset_in_page(end) + 1);
3092 }
Chris Mason70dec802008-01-29 09:59:12 -05003093 }
Qu Wenruo7ffd27e2020-12-02 14:47:58 +08003094 ASSERT(bio_offset + len > bio_offset);
3095 bio_offset += len;
Miao Xie883d0de2013-07-25 19:22:35 +08003096
Qu Wenruoe09caaf2020-11-13 20:51:29 +08003097 /* Update page status and unlock */
Qu Wenruo92082d42021-02-02 10:28:36 +08003098 end_page_read(page, uptodate, start, len);
Qu Wenruo94e8c952020-11-13 20:51:28 +08003099 endio_readpage_release_extent(&processed, BTRFS_I(inode),
3100 start, end, uptodate);
Kent Overstreet2c30c712013-11-07 12:20:26 -08003101 }
Qu Wenruo94e8c952020-11-13 20:51:28 +08003102 /* Release the last extent */
3103 endio_readpage_release_extent(&processed, NULL, 0, 0, false);
David Sterbab3a0dd52018-11-22 17:16:49 +01003104 btrfs_io_bio_free_csum(io_bio);
Chris Masond1310b22008-01-24 16:13:08 -05003105 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05003106}
3107
Chris Mason9be33952013-05-17 18:30:14 -04003108/*
David Sterba184f9992017-06-12 17:29:39 +02003109 * Initialize the members up to but not including 'bio'. Use after allocating a
3110 * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
3111 * 'bio' because use of __GFP_ZERO is not supported.
Chris Mason9be33952013-05-17 18:30:14 -04003112 */
David Sterba184f9992017-06-12 17:29:39 +02003113static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
Chris Masond1310b22008-01-24 16:13:08 -05003114{
David Sterba184f9992017-06-12 17:29:39 +02003115 memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
3116}
3117
3118/*
David Sterba6e707bc2017-06-02 17:26:26 +02003119 * The following helpers allocate a bio. As it's backed by a bioset, it'll
3120 * never fail. We're returning a bio right now but you can call btrfs_io_bio
3121 * for the appropriate container_of magic
Chris Masond1310b22008-01-24 16:13:08 -05003122 */
David Sterbae749af442019-06-18 20:00:16 +02003123struct bio *btrfs_bio_alloc(u64 first_byte)
Chris Masond1310b22008-01-24 16:13:08 -05003124{
3125 struct bio *bio;
3126
Christoph Hellwiga8affc02021-03-11 12:01:37 +01003127 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &btrfs_bioset);
David Sterbac821e7f32017-06-02 18:35:36 +02003128 bio->bi_iter.bi_sector = first_byte >> 9;
David Sterba184f9992017-06-12 17:29:39 +02003129 btrfs_io_bio_init(btrfs_io_bio(bio));
Chris Masond1310b22008-01-24 16:13:08 -05003130 return bio;
3131}
3132
David Sterba8b6c1d52017-06-02 17:48:13 +02003133struct bio *btrfs_bio_clone(struct bio *bio)
Chris Mason9be33952013-05-17 18:30:14 -04003134{
Miao Xie23ea8e52014-09-12 18:43:54 +08003135 struct btrfs_io_bio *btrfs_bio;
3136 struct bio *new;
Chris Mason9be33952013-05-17 18:30:14 -04003137
David Sterba6e707bc2017-06-02 17:26:26 +02003138 /* Bio allocation backed by a bioset does not fail */
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -04003139 new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
David Sterba6e707bc2017-06-02 17:26:26 +02003140 btrfs_bio = btrfs_io_bio(new);
David Sterba184f9992017-06-12 17:29:39 +02003141 btrfs_io_bio_init(btrfs_bio);
David Sterba6e707bc2017-06-02 17:26:26 +02003142 btrfs_bio->iter = bio->bi_iter;
Miao Xie23ea8e52014-09-12 18:43:54 +08003143 return new;
3144}
Chris Mason9be33952013-05-17 18:30:14 -04003145
David Sterbac5e4c3d2017-06-12 17:29:41 +02003146struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
Chris Mason9be33952013-05-17 18:30:14 -04003147{
Miao Xiefacc8a222013-07-25 19:22:34 +08003148 struct bio *bio;
3149
David Sterba6e707bc2017-06-02 17:26:26 +02003150 /* Bio allocation backed by a bioset does not fail */
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -04003151 bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
David Sterba184f9992017-06-12 17:29:39 +02003152 btrfs_io_bio_init(btrfs_io_bio(bio));
Miao Xiefacc8a222013-07-25 19:22:34 +08003153 return bio;
Chris Mason9be33952013-05-17 18:30:14 -04003154}
3155
Liu Boe4770942017-05-16 10:57:14 -07003156struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
Liu Bo2f8e9142017-05-15 17:43:31 -07003157{
3158 struct bio *bio;
3159 struct btrfs_io_bio *btrfs_bio;
3160
3161 /* this will never fail when it's backed by a bioset */
Kent Overstreet8ac9f7c2018-05-20 18:25:56 -04003162 bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
Liu Bo2f8e9142017-05-15 17:43:31 -07003163 ASSERT(bio);
3164
3165 btrfs_bio = btrfs_io_bio(bio);
David Sterba184f9992017-06-12 17:29:39 +02003166 btrfs_io_bio_init(btrfs_bio);
Liu Bo2f8e9142017-05-15 17:43:31 -07003167
3168 bio_trim(bio, offset >> 9, size >> 9);
Liu Bo17347ce2017-05-15 15:33:27 -07003169 btrfs_bio->iter = bio->bi_iter;
Liu Bo2f8e9142017-05-15 17:43:31 -07003170 return bio;
3171}
Chris Mason9be33952013-05-17 18:30:14 -04003172
Naohiro Aota953651e2021-02-04 19:21:57 +09003173/**
3174 * Attempt to add a page to bio
3175 *
3176 * @bio: destination bio
3177 * @page: page to add to the bio
3178 * @disk_bytenr: offset of the new bio or to check whether we are adding
3179 * a contiguous page to the previous one
3180 * @pg_offset: starting offset in the page
3181 * @size: portion of page that we want to write
3182 * @prev_bio_flags: flags of previous bio to see if we can merge the current one
3183 * @bio_flags: flags of the current bio to see if we can merge them
3184 * @return: true if page was added, false otherwise
3185 *
3186 * Attempt to add a page to bio considering stripe alignment etc.
3187 *
3188 * Return true if successfully page added. Otherwise, return false.
3189 */
Qu Wenruo390ed292021-04-14 16:42:15 +08003190static bool btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
3191 struct page *page,
Naohiro Aota953651e2021-02-04 19:21:57 +09003192 u64 disk_bytenr, unsigned int size,
3193 unsigned int pg_offset,
Naohiro Aota953651e2021-02-04 19:21:57 +09003194 unsigned long bio_flags)
3195{
Qu Wenruo390ed292021-04-14 16:42:15 +08003196 struct bio *bio = bio_ctrl->bio;
3197 u32 bio_size = bio->bi_iter.bi_size;
Naohiro Aota953651e2021-02-04 19:21:57 +09003198 const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
3199 bool contig;
Naohiro Aotae1326f02021-02-04 19:21:58 +09003200 int ret;
Naohiro Aota953651e2021-02-04 19:21:57 +09003201
Qu Wenruo390ed292021-04-14 16:42:15 +08003202 ASSERT(bio);
3203 /* The limit should be calculated when bio_ctrl->bio is allocated */
3204 ASSERT(bio_ctrl->len_to_oe_boundary && bio_ctrl->len_to_stripe_boundary);
3205 if (bio_ctrl->bio_flags != bio_flags)
Naohiro Aota953651e2021-02-04 19:21:57 +09003206 return false;
3207
Qu Wenruo390ed292021-04-14 16:42:15 +08003208 if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED)
Naohiro Aota953651e2021-02-04 19:21:57 +09003209 contig = bio->bi_iter.bi_sector == sector;
3210 else
3211 contig = bio_end_sector(bio) == sector;
3212 if (!contig)
3213 return false;
3214
Qu Wenruo390ed292021-04-14 16:42:15 +08003215 if (bio_size + size > bio_ctrl->len_to_oe_boundary ||
3216 bio_size + size > bio_ctrl->len_to_stripe_boundary)
Naohiro Aota953651e2021-02-04 19:21:57 +09003217 return false;
3218
Qu Wenruo390ed292021-04-14 16:42:15 +08003219 if (bio_op(bio) == REQ_OP_ZONE_APPEND)
Naohiro Aotae1326f02021-02-04 19:21:58 +09003220 ret = bio_add_zone_append_page(bio, page, size, pg_offset);
Qu Wenruo390ed292021-04-14 16:42:15 +08003221 else
Naohiro Aotae1326f02021-02-04 19:21:58 +09003222 ret = bio_add_page(bio, page, size, pg_offset);
3223
3224 return ret == size;
Naohiro Aota953651e2021-02-04 19:21:57 +09003225}
3226
Qu Wenruo390ed292021-04-14 16:42:15 +08003227static int calc_bio_boundaries(struct btrfs_bio_ctrl *bio_ctrl,
3228 struct btrfs_inode *inode)
3229{
3230 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3231 struct btrfs_io_geometry geom;
3232 struct btrfs_ordered_extent *ordered;
3233 struct extent_map *em;
3234 u64 logical = (bio_ctrl->bio->bi_iter.bi_sector << SECTOR_SHIFT);
3235 int ret;
3236
3237 /*
3238 * Pages for compressed extent are never submitted to disk directly,
3239 * thus it has no real boundary, just set them to U32_MAX.
3240 *
3241 * The split happens for real compressed bio, which happens in
3242 * btrfs_submit_compressed_read/write().
3243 */
3244 if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED) {
3245 bio_ctrl->len_to_oe_boundary = U32_MAX;
3246 bio_ctrl->len_to_stripe_boundary = U32_MAX;
3247 return 0;
3248 }
3249 em = btrfs_get_chunk_map(fs_info, logical, fs_info->sectorsize);
3250 if (IS_ERR(em))
3251 return PTR_ERR(em);
3252 ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio_ctrl->bio),
3253 logical, &geom);
3254 free_extent_map(em);
3255 if (ret < 0) {
3256 return ret;
3257 }
3258 if (geom.len > U32_MAX)
3259 bio_ctrl->len_to_stripe_boundary = U32_MAX;
3260 else
3261 bio_ctrl->len_to_stripe_boundary = (u32)geom.len;
3262
3263 if (!btrfs_is_zoned(fs_info) ||
3264 bio_op(bio_ctrl->bio) != REQ_OP_ZONE_APPEND) {
3265 bio_ctrl->len_to_oe_boundary = U32_MAX;
3266 return 0;
3267 }
3268
3269 ASSERT(fs_info->max_zone_append_size > 0);
3270 /* Ordered extent not yet created, so we're good */
3271 ordered = btrfs_lookup_ordered_extent(inode, logical);
3272 if (!ordered) {
3273 bio_ctrl->len_to_oe_boundary = U32_MAX;
3274 return 0;
3275 }
3276
3277 bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
3278 ordered->disk_bytenr + ordered->disk_num_bytes - logical);
3279 btrfs_put_ordered_extent(ordered);
3280 return 0;
3281}
3282
David Sterba4b81ba42017-06-06 19:14:26 +02003283/*
3284 * @opf: bio REQ_OP_* and REQ_* flags as one value
David Sterbab8b3d622017-06-12 19:50:41 +02003285 * @wbc: optional writeback control for io accounting
3286 * @page: page to add to the bio
Qu Wenruo0c64c332021-01-06 09:01:40 +08003287 * @disk_bytenr: logical bytenr where the write will be
3288 * @size: portion of page that we want to write to
David Sterbab8b3d622017-06-12 19:50:41 +02003289 * @pg_offset: offset of the new bio or to check whether we are adding
3290 * a contiguous page to the previous one
David Sterba5c2b1fd2017-06-06 19:22:55 +02003291 * @bio_ret: must be valid pointer, newly allocated bio will be stored there
David Sterbab8b3d622017-06-12 19:50:41 +02003292 * @end_io_func: end_io callback for new bio
3293 * @mirror_num: desired mirror to read/write
3294 * @prev_bio_flags: flags of previous bio to see if we can merge the current one
3295 * @bio_flags: flags of the current bio to see if we can merge them
David Sterba4b81ba42017-06-06 19:14:26 +02003296 */
David Sterba0ceb34b2020-02-05 19:09:28 +01003297static int submit_extent_page(unsigned int opf,
Chris Masonda2f0f72015-07-02 13:57:22 -07003298 struct writeback_control *wbc,
Qu Wenruo390ed292021-04-14 16:42:15 +08003299 struct btrfs_bio_ctrl *bio_ctrl,
Qu Wenruo0c64c332021-01-06 09:01:40 +08003300 struct page *page, u64 disk_bytenr,
David Sterba6c5a4e22017-10-04 17:10:34 +02003301 size_t size, unsigned long pg_offset,
Chris Masonf1885912008-04-09 16:28:12 -04003302 bio_end_io_t end_io_func,
Chris Masonc8b97812008-10-29 14:49:59 -04003303 int mirror_num,
Filipe Manana005efed2015-09-14 09:09:31 +01003304 unsigned long bio_flags,
3305 bool force_bio_submit)
Chris Masond1310b22008-01-24 16:13:08 -05003306{
3307 int ret = 0;
3308 struct bio *bio;
Qu Wenruoe940e9a2020-10-21 14:25:01 +08003309 size_t io_size = min_t(size_t, size, PAGE_SIZE);
Naohiro Aotae1326f02021-02-04 19:21:58 +09003310 struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
3311 struct extent_io_tree *tree = &inode->io_tree;
3312 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Chris Masond1310b22008-01-24 16:13:08 -05003313
Qu Wenruo390ed292021-04-14 16:42:15 +08003314 ASSERT(bio_ctrl);
David Sterba5c2b1fd2017-06-06 19:22:55 +02003315
Qu Wenruo390ed292021-04-14 16:42:15 +08003316 ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE &&
3317 pg_offset + size <= PAGE_SIZE);
3318 if (bio_ctrl->bio) {
3319 bio = bio_ctrl->bio;
Naohiro Aota953651e2021-02-04 19:21:57 +09003320 if (force_bio_submit ||
Qu Wenruo390ed292021-04-14 16:42:15 +08003321 !btrfs_bio_add_page(bio_ctrl, page, disk_bytenr, io_size,
3322 pg_offset, bio_flags)) {
3323 ret = submit_one_bio(bio, mirror_num, bio_ctrl->bio_flags);
3324 bio_ctrl->bio = NULL;
3325 if (ret < 0)
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003326 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05003327 } else {
Chris Masonda2f0f72015-07-02 13:57:22 -07003328 if (wbc)
Qu Wenruoe940e9a2020-10-21 14:25:01 +08003329 wbc_account_cgroup_owner(wbc, page, io_size);
Chris Masond1310b22008-01-24 16:13:08 -05003330 return 0;
3331 }
3332 }
Chris Masonc8b97812008-10-29 14:49:59 -04003333
Qu Wenruo0c64c332021-01-06 09:01:40 +08003334 bio = btrfs_bio_alloc(disk_bytenr);
Qu Wenruoe940e9a2020-10-21 14:25:01 +08003335 bio_add_page(bio, page, io_size, pg_offset);
Chris Masond1310b22008-01-24 16:13:08 -05003336 bio->bi_end_io = end_io_func;
3337 bio->bi_private = tree;
Jens Axboee6959b92017-06-27 11:51:28 -06003338 bio->bi_write_hint = page->mapping->host->i_write_hint;
David Sterba4b81ba42017-06-06 19:14:26 +02003339 bio->bi_opf = opf;
Chris Masonda2f0f72015-07-02 13:57:22 -07003340 if (wbc) {
David Sterba429aebc2019-11-18 23:27:55 +01003341 struct block_device *bdev;
3342
Naohiro Aotae1326f02021-02-04 19:21:58 +09003343 bdev = fs_info->fs_devices->latest_bdev;
David Sterba429aebc2019-11-18 23:27:55 +01003344 bio_set_dev(bio, bdev);
Chris Masonda2f0f72015-07-02 13:57:22 -07003345 wbc_init_bio(wbc, bio);
Qu Wenruoe940e9a2020-10-21 14:25:01 +08003346 wbc_account_cgroup_owner(wbc, page, io_size);
Chris Masonda2f0f72015-07-02 13:57:22 -07003347 }
Naohiro Aotae1326f02021-02-04 19:21:58 +09003348 if (btrfs_is_zoned(fs_info) && bio_op(bio) == REQ_OP_ZONE_APPEND) {
Johannes Thumshirne7ff9e62021-05-19 00:40:29 +09003349 struct btrfs_device *device;
Naohiro Aotae1326f02021-02-04 19:21:58 +09003350
Johannes Thumshirne7ff9e62021-05-19 00:40:29 +09003351 device = btrfs_zoned_get_device(fs_info, disk_bytenr, io_size);
3352 if (IS_ERR(device))
3353 return PTR_ERR(device);
Naohiro Aotae1326f02021-02-04 19:21:58 +09003354
Johannes Thumshirne7ff9e62021-05-19 00:40:29 +09003355 btrfs_io_bio(bio)->device = device;
Naohiro Aotae1326f02021-02-04 19:21:58 +09003356 }
Chris Mason70dec802008-01-29 09:59:12 -05003357
Qu Wenruo390ed292021-04-14 16:42:15 +08003358 bio_ctrl->bio = bio;
3359 bio_ctrl->bio_flags = bio_flags;
3360 ret = calc_bio_boundaries(bio_ctrl, inode);
Chris Masond1310b22008-01-24 16:13:08 -05003361
3362 return ret;
3363}
3364
Qu Wenruo760f9912021-01-26 16:33:48 +08003365static int attach_extent_buffer_page(struct extent_buffer *eb,
3366 struct page *page,
3367 struct btrfs_subpage *prealloc)
Josef Bacik4f2de97a2012-03-07 16:20:05 -05003368{
Qu Wenruo760f9912021-01-26 16:33:48 +08003369 struct btrfs_fs_info *fs_info = eb->fs_info;
3370 int ret = 0;
3371
Qu Wenruo0d01e242020-10-21 14:25:02 +08003372 /*
3373 * If the page is mapped to btree inode, we should hold the private
3374 * lock to prevent race.
3375 * For cloned or dummy extent buffers, their pages are not mapped and
3376 * will not race with any other ebs.
3377 */
3378 if (page->mapping)
3379 lockdep_assert_held(&page->mapping->private_lock);
3380
Qu Wenruo760f9912021-01-26 16:33:48 +08003381 if (fs_info->sectorsize == PAGE_SIZE) {
3382 if (!PagePrivate(page))
3383 attach_page_private(page, eb);
3384 else
3385 WARN_ON(page->private != (unsigned long)eb);
3386 return 0;
3387 }
3388
3389 /* Already mapped, just free prealloc */
3390 if (PagePrivate(page)) {
3391 btrfs_free_subpage(prealloc);
3392 return 0;
3393 }
3394
3395 if (prealloc)
3396 /* Has preallocated memory for subpage */
3397 attach_page_private(page, prealloc);
Guoqing Jiangd1b89bc2020-06-01 21:47:45 -07003398 else
Qu Wenruo760f9912021-01-26 16:33:48 +08003399 /* Do new allocation to attach subpage */
3400 ret = btrfs_attach_subpage(fs_info, page,
3401 BTRFS_SUBPAGE_METADATA);
3402 return ret;
Josef Bacik4f2de97a2012-03-07 16:20:05 -05003403}
3404
Qu Wenruo32443de2021-01-26 16:34:00 +08003405int set_page_extent_mapped(struct page *page)
Chris Masond1310b22008-01-24 16:13:08 -05003406{
Qu Wenruo32443de2021-01-26 16:34:00 +08003407 struct btrfs_fs_info *fs_info;
3408
3409 ASSERT(page->mapping);
3410
3411 if (PagePrivate(page))
3412 return 0;
3413
3414 fs_info = btrfs_sb(page->mapping->host->i_sb);
3415
3416 if (fs_info->sectorsize < PAGE_SIZE)
3417 return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
3418
3419 attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
3420 return 0;
3421}
3422
3423void clear_page_extent_mapped(struct page *page)
3424{
3425 struct btrfs_fs_info *fs_info;
3426
3427 ASSERT(page->mapping);
3428
Guoqing Jiangd1b89bc2020-06-01 21:47:45 -07003429 if (!PagePrivate(page))
Qu Wenruo32443de2021-01-26 16:34:00 +08003430 return;
3431
3432 fs_info = btrfs_sb(page->mapping->host->i_sb);
3433 if (fs_info->sectorsize < PAGE_SIZE)
3434 return btrfs_detach_subpage(fs_info, page);
3435
3436 detach_page_private(page);
Chris Masond1310b22008-01-24 16:13:08 -05003437}
3438
Miao Xie125bac012013-07-25 19:22:37 +08003439static struct extent_map *
3440__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
Nikolay Borisov1a5ee1e2020-09-14 12:37:06 +03003441 u64 start, u64 len, struct extent_map **em_cached)
Miao Xie125bac012013-07-25 19:22:37 +08003442{
3443 struct extent_map *em;
3444
3445 if (em_cached && *em_cached) {
3446 em = *em_cached;
Filipe Mananacbc0e922014-02-25 14:15:12 +00003447 if (extent_map_in_tree(em) && start >= em->start &&
Miao Xie125bac012013-07-25 19:22:37 +08003448 start < extent_map_end(em)) {
Elena Reshetova490b54d2017-03-03 10:55:12 +02003449 refcount_inc(&em->refs);
Miao Xie125bac012013-07-25 19:22:37 +08003450 return em;
3451 }
3452
3453 free_extent_map(em);
3454 *em_cached = NULL;
3455 }
3456
Nikolay Borisov1a5ee1e2020-09-14 12:37:06 +03003457 em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
Miao Xie125bac012013-07-25 19:22:37 +08003458 if (em_cached && !IS_ERR_OR_NULL(em)) {
3459 BUG_ON(*em_cached);
Elena Reshetova490b54d2017-03-03 10:55:12 +02003460 refcount_inc(&em->refs);
Miao Xie125bac012013-07-25 19:22:37 +08003461 *em_cached = em;
3462 }
3463 return em;
3464}
Chris Masond1310b22008-01-24 16:13:08 -05003465/*
3466 * basic readpage implementation. Locked extent state structs are inserted
3467 * into the tree that are removed when the IO is done (by the end_io
3468 * handlers)
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003469 * XXX JDM: This needs looking at to ensure proper page locking
Liu Bobaf863b2016-07-11 10:39:07 -07003470 * return 0 on success, otherwise return error
Chris Masond1310b22008-01-24 16:13:08 -05003471 */
Nikolay Borisov0f208812020-09-14 14:39:16 +03003472int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
Qu Wenruo390ed292021-04-14 16:42:15 +08003473 struct btrfs_bio_ctrl *bio_ctrl,
Nikolay Borisov0f208812020-09-14 14:39:16 +03003474 unsigned int read_flags, u64 *prev_em_start)
Chris Masond1310b22008-01-24 16:13:08 -05003475{
3476 struct inode *inode = page->mapping->host;
Qu Wenruo92082d42021-02-02 10:28:36 +08003477 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xie4eee4fa2012-12-21 09:17:45 +00003478 u64 start = page_offset(page);
David Sterba8eec8292017-06-06 19:50:13 +02003479 const u64 end = start + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05003480 u64 cur = start;
3481 u64 extent_offset;
3482 u64 last_byte = i_size_read(inode);
3483 u64 block_start;
3484 u64 cur_end;
Chris Masond1310b22008-01-24 16:13:08 -05003485 struct extent_map *em;
Liu Bobaf863b2016-07-11 10:39:07 -07003486 int ret = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003487 int nr = 0;
David Sterba306e16c2011-04-19 14:29:38 +02003488 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003489 size_t iosize;
3490 size_t blocksize = inode->i_sb->s_blocksize;
Filipe Manana7f042a82016-01-27 19:17:20 +00003491 unsigned long this_bio_flag = 0;
David Sterbaf657a312020-02-05 19:09:42 +01003492 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
David Sterbaae6957e2020-02-05 19:09:30 +01003493
Qu Wenruo32443de2021-01-26 16:34:00 +08003494 ret = set_page_extent_mapped(page);
3495 if (ret < 0) {
3496 unlock_extent(tree, start, end);
Qu Wenruo92082d42021-02-02 10:28:36 +08003497 btrfs_page_set_error(fs_info, page, start, PAGE_SIZE);
3498 unlock_page(page);
Qu Wenruo32443de2021-01-26 16:34:00 +08003499 goto out;
3500 }
Chris Masond1310b22008-01-24 16:13:08 -05003501
Dan Magenheimer90a887c2011-05-26 10:01:56 -06003502 if (!PageUptodate(page)) {
3503 if (cleancache_get_page(page) == 0) {
3504 BUG_ON(blocksize != PAGE_SIZE);
Miao Xie99740902013-07-25 19:22:36 +08003505 unlock_extent(tree, start, end);
Qu Wenruo92082d42021-02-02 10:28:36 +08003506 unlock_page(page);
Dan Magenheimer90a887c2011-05-26 10:01:56 -06003507 goto out;
3508 }
3509 }
3510
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003511 if (page->index == last_byte >> PAGE_SHIFT) {
Johannes Thumshirn70730172018-12-05 15:23:03 +01003512 size_t zero_offset = offset_in_page(last_byte);
Chris Masonc8b97812008-10-29 14:49:59 -04003513
3514 if (zero_offset) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003515 iosize = PAGE_SIZE - zero_offset;
Ira Weinyd048b9c2021-05-04 18:40:07 -07003516 memzero_page(page, zero_offset, iosize);
Chris Masonc8b97812008-10-29 14:49:59 -04003517 flush_dcache_page(page);
Chris Masonc8b97812008-10-29 14:49:59 -04003518 }
3519 }
Qu Wenruo92082d42021-02-02 10:28:36 +08003520 begin_page_read(fs_info, page);
Chris Masond1310b22008-01-24 16:13:08 -05003521 while (cur <= end) {
Filipe Manana005efed2015-09-14 09:09:31 +01003522 bool force_bio_submit = false;
Qu Wenruo0c64c332021-01-06 09:01:40 +08003523 u64 disk_bytenr;
Josef Bacikc8f2f242013-02-11 11:33:00 -05003524
Chris Masond1310b22008-01-24 16:13:08 -05003525 if (cur >= last_byte) {
Arne Jansen507903b2011-04-06 10:02:20 +00003526 struct extent_state *cached = NULL;
3527
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003528 iosize = PAGE_SIZE - pg_offset;
Ira Weinyd048b9c2021-05-04 18:40:07 -07003529 memzero_page(page, pg_offset, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003530 flush_dcache_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05003531 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00003532 &cached, GFP_NOFS);
Filipe Manana7f042a82016-01-27 19:17:20 +00003533 unlock_extent_cached(tree, cur,
David Sterbae43bbe52017-12-12 21:43:52 +01003534 cur + iosize - 1, &cached);
Qu Wenruo92082d42021-02-02 10:28:36 +08003535 end_page_read(page, true, cur, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003536 break;
3537 }
Miao Xie125bac012013-07-25 19:22:37 +08003538 em = __get_extent_map(inode, page, pg_offset, cur,
Nikolay Borisov1a5ee1e2020-09-14 12:37:06 +03003539 end - cur + 1, em_cached);
David Sterbac7040052011-04-19 18:00:01 +02003540 if (IS_ERR_OR_NULL(em)) {
Filipe Manana7f042a82016-01-27 19:17:20 +00003541 unlock_extent(tree, cur, end);
Qu Wenruo92082d42021-02-02 10:28:36 +08003542 end_page_read(page, false, cur, end + 1 - cur);
Chris Masond1310b22008-01-24 16:13:08 -05003543 break;
3544 }
Chris Masond1310b22008-01-24 16:13:08 -05003545 extent_offset = cur - em->start;
3546 BUG_ON(extent_map_end(em) <= cur);
3547 BUG_ON(end < cur);
3548
Li Zefan261507a02010-12-17 14:21:50 +08003549 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
Mark Fasheh4b384312013-08-06 11:42:50 -07003550 this_bio_flag |= EXTENT_BIO_COMPRESSED;
Li Zefan261507a02010-12-17 14:21:50 +08003551 extent_set_compress_type(&this_bio_flag,
3552 em->compress_type);
3553 }
Chris Masonc8b97812008-10-29 14:49:59 -04003554
Chris Masond1310b22008-01-24 16:13:08 -05003555 iosize = min(extent_map_end(em) - cur, end - cur + 1);
3556 cur_end = min(extent_map_end(em) - 1, end);
Qu Wenruofda28322013-02-26 08:10:22 +00003557 iosize = ALIGN(iosize, blocksize);
Goldwyn Rodrigues949b3272020-09-15 10:41:40 -05003558 if (this_bio_flag & EXTENT_BIO_COMPRESSED)
Qu Wenruo0c64c332021-01-06 09:01:40 +08003559 disk_bytenr = em->block_start;
Goldwyn Rodrigues949b3272020-09-15 10:41:40 -05003560 else
Qu Wenruo0c64c332021-01-06 09:01:40 +08003561 disk_bytenr = em->block_start + extent_offset;
Chris Masond1310b22008-01-24 16:13:08 -05003562 block_start = em->block_start;
Yan Zhengd899e052008-10-30 14:25:28 -04003563 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3564 block_start = EXTENT_MAP_HOLE;
Filipe Manana005efed2015-09-14 09:09:31 +01003565
3566 /*
3567 * If we have a file range that points to a compressed extent
Randy Dunlap260db432020-08-04 19:48:34 -07003568 * and it's followed by a consecutive file range that points
Filipe Manana005efed2015-09-14 09:09:31 +01003569 * to the same compressed extent (possibly with a different
3570 * offset and/or length, so it either points to the whole extent
3571 * or only part of it), we must make sure we do not submit a
3572 * single bio to populate the pages for the 2 ranges because
3573 * this makes the compressed extent read zero out the pages
3574 * belonging to the 2nd range. Imagine the following scenario:
3575 *
3576 * File layout
3577 * [0 - 8K] [8K - 24K]
3578 * | |
3579 * | |
3580 * points to extent X, points to extent X,
3581 * offset 4K, length of 8K offset 0, length 16K
3582 *
3583 * [extent X, compressed length = 4K uncompressed length = 16K]
3584 *
3585 * If the bio to read the compressed extent covers both ranges,
3586 * it will decompress extent X into the pages belonging to the
3587 * first range and then it will stop, zeroing out the remaining
3588 * pages that belong to the other range that points to extent X.
3589 * So here we make sure we submit 2 bios, one for the first
3590 * range and another one for the third range. Both will target
3591 * the same physical extent from disk, but we can't currently
3592 * make the compressed bio endio callback populate the pages
3593 * for both ranges because each compressed bio is tightly
3594 * coupled with a single extent map, and each range can have
3595 * an extent map with a different offset value relative to the
3596 * uncompressed data of our extent and different lengths. This
3597 * is a corner case so we prioritize correctness over
3598 * non-optimal behavior (submitting 2 bios for the same extent).
3599 */
3600 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3601 prev_em_start && *prev_em_start != (u64)-1 &&
Filipe Manana8e928212019-02-14 15:17:20 +00003602 *prev_em_start != em->start)
Filipe Manana005efed2015-09-14 09:09:31 +01003603 force_bio_submit = true;
3604
3605 if (prev_em_start)
Filipe Manana8e928212019-02-14 15:17:20 +00003606 *prev_em_start = em->start;
Filipe Manana005efed2015-09-14 09:09:31 +01003607
Chris Masond1310b22008-01-24 16:13:08 -05003608 free_extent_map(em);
3609 em = NULL;
3610
3611 /* we've found a hole, just zero and go on */
3612 if (block_start == EXTENT_MAP_HOLE) {
Arne Jansen507903b2011-04-06 10:02:20 +00003613 struct extent_state *cached = NULL;
3614
Ira Weinyd048b9c2021-05-04 18:40:07 -07003615 memzero_page(page, pg_offset, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003616 flush_dcache_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05003617
3618 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00003619 &cached, GFP_NOFS);
Filipe Manana7f042a82016-01-27 19:17:20 +00003620 unlock_extent_cached(tree, cur,
David Sterbae43bbe52017-12-12 21:43:52 +01003621 cur + iosize - 1, &cached);
Qu Wenruo92082d42021-02-02 10:28:36 +08003622 end_page_read(page, true, cur, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003623 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003624 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003625 continue;
3626 }
3627 /* the get_extent function already copied into the page */
Chris Mason9655d292009-09-02 15:22:30 -04003628 if (test_range_bit(tree, cur, cur_end,
3629 EXTENT_UPTODATE, 1, NULL)) {
Chris Masona1b32a52008-09-05 16:09:51 -04003630 check_page_uptodate(tree, page);
Filipe Manana7f042a82016-01-27 19:17:20 +00003631 unlock_extent(tree, cur, cur + iosize - 1);
Qu Wenruo92082d42021-02-02 10:28:36 +08003632 end_page_read(page, true, cur, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003633 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003634 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003635 continue;
3636 }
Chris Mason70dec802008-01-29 09:59:12 -05003637 /* we have an inline extent but it didn't get marked up
3638 * to date. Error out
3639 */
3640 if (block_start == EXTENT_MAP_INLINE) {
Filipe Manana7f042a82016-01-27 19:17:20 +00003641 unlock_extent(tree, cur, cur + iosize - 1);
Qu Wenruo92082d42021-02-02 10:28:36 +08003642 end_page_read(page, false, cur, iosize);
Chris Mason70dec802008-01-29 09:59:12 -05003643 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003644 pg_offset += iosize;
Chris Mason70dec802008-01-29 09:59:12 -05003645 continue;
3646 }
Chris Masond1310b22008-01-24 16:13:08 -05003647
David Sterba0ceb34b2020-02-05 19:09:28 +01003648 ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
Qu Wenruo390ed292021-04-14 16:42:15 +08003649 bio_ctrl, page, disk_bytenr, iosize,
3650 pg_offset,
Nikolay Borisovfd513002020-09-14 12:37:11 +03003651 end_bio_extent_readpage, 0,
Filipe Manana005efed2015-09-14 09:09:31 +01003652 this_bio_flag,
3653 force_bio_submit);
Josef Bacikc8f2f242013-02-11 11:33:00 -05003654 if (!ret) {
3655 nr++;
Josef Bacikc8f2f242013-02-11 11:33:00 -05003656 } else {
Filipe Manana7f042a82016-01-27 19:17:20 +00003657 unlock_extent(tree, cur, cur + iosize - 1);
Qu Wenruo92082d42021-02-02 10:28:36 +08003658 end_page_read(page, false, cur, iosize);
Liu Bobaf863b2016-07-11 10:39:07 -07003659 goto out;
Josef Bacikedd33c92012-10-05 16:40:32 -04003660 }
Chris Masond1310b22008-01-24 16:13:08 -05003661 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003662 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003663 }
Dan Magenheimer90a887c2011-05-26 10:01:56 -06003664out:
Liu Bobaf863b2016-07-11 10:39:07 -07003665 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05003666}
3667
David Sterbab6660e82020-02-05 19:09:40 +01003668static inline void contiguous_readpages(struct page *pages[], int nr_pages,
Qu Wenruo390ed292021-04-14 16:42:15 +08003669 u64 start, u64 end,
3670 struct extent_map **em_cached,
3671 struct btrfs_bio_ctrl *bio_ctrl,
3672 u64 *prev_em_start)
Miao Xie99740902013-07-25 19:22:36 +08003673{
Nikolay Borisov23d31bd2019-05-07 10:19:23 +03003674 struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
Miao Xie99740902013-07-25 19:22:36 +08003675 int index;
3676
David Sterbab272ae22020-02-05 19:09:33 +01003677 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
Miao Xie99740902013-07-25 19:22:36 +08003678
3679 for (index = 0; index < nr_pages; index++) {
Qu Wenruo390ed292021-04-14 16:42:15 +08003680 btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
Nikolay Borisov0f208812020-09-14 14:39:16 +03003681 REQ_RAHEAD, prev_em_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003682 put_page(pages[index]);
Miao Xie99740902013-07-25 19:22:36 +08003683 }
3684}
3685
David Sterba3d4b9492017-02-10 19:33:41 +01003686static void update_nr_written(struct writeback_control *wbc,
Liu Boa91326672016-03-07 16:56:21 -08003687 unsigned long nr_written)
Chris Mason11c83492009-04-20 15:50:09 -04003688{
3689 wbc->nr_to_write -= nr_written;
Chris Mason11c83492009-04-20 15:50:09 -04003690}
3691
Chris Masond1310b22008-01-24 16:13:08 -05003692/*
Chris Mason40f76582014-05-21 13:35:51 -07003693 * helper for __extent_writepage, doing all of the delayed allocation setup.
3694 *
Nikolay Borisov5eaad972018-11-01 14:09:46 +02003695 * This returns 1 if btrfs_run_delalloc_range function did all the work required
Chris Mason40f76582014-05-21 13:35:51 -07003696 * to write the page (copy into inline extent). In this case the IO has
3697 * been started and the page is already unlocked.
3698 *
3699 * This returns 0 if all went well (page still locked)
3700 * This returns < 0 if there were errors (page still locked)
Chris Masond1310b22008-01-24 16:13:08 -05003701 */
Nikolay Borisovcd4c0bf942020-06-05 10:42:10 +03003702static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
Nikolay Borisov8cc02372018-11-08 10:18:07 +02003703 struct page *page, struct writeback_control *wbc,
3704 u64 delalloc_start, unsigned long *nr_written)
Chris Masond1310b22008-01-24 16:13:08 -05003705{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003706 u64 page_end = delalloc_start + PAGE_SIZE - 1;
Lu Fengqi3522e902018-11-29 11:33:38 +08003707 bool found;
Chris Mason40f76582014-05-21 13:35:51 -07003708 u64 delalloc_to_write = 0;
3709 u64 delalloc_end = 0;
3710 int ret;
3711 int page_started = 0;
3712
Chris Mason40f76582014-05-21 13:35:51 -07003713
3714 while (delalloc_end < page_end) {
Nikolay Borisovcd4c0bf942020-06-05 10:42:10 +03003715 found = find_lock_delalloc_range(&inode->vfs_inode, page,
Chris Mason40f76582014-05-21 13:35:51 -07003716 &delalloc_start,
Nikolay Borisov917aace2018-10-26 14:43:20 +03003717 &delalloc_end);
Lu Fengqi3522e902018-11-29 11:33:38 +08003718 if (!found) {
Chris Mason40f76582014-05-21 13:35:51 -07003719 delalloc_start = delalloc_end + 1;
3720 continue;
3721 }
Nikolay Borisovcd4c0bf942020-06-05 10:42:10 +03003722 ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
Nikolay Borisov5eaad972018-11-01 14:09:46 +02003723 delalloc_end, &page_started, nr_written, wbc);
Chris Mason40f76582014-05-21 13:35:51 -07003724 if (ret) {
3725 SetPageError(page);
Nikolay Borisov5eaad972018-11-01 14:09:46 +02003726 /*
3727 * btrfs_run_delalloc_range should return < 0 for error
3728 * but just in case, we use > 0 here meaning the IO is
3729 * started, so we don't want to return > 0 unless
3730 * things are going well.
Chris Mason40f76582014-05-21 13:35:51 -07003731 */
Nikolay Borisovb69d1ee2020-07-16 18:17:19 +03003732 return ret < 0 ? ret : -EIO;
Chris Mason40f76582014-05-21 13:35:51 -07003733 }
3734 /*
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03003735 * delalloc_end is already one less than the total length, so
3736 * we don't subtract one from PAGE_SIZE
Chris Mason40f76582014-05-21 13:35:51 -07003737 */
3738 delalloc_to_write += (delalloc_end - delalloc_start +
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03003739 PAGE_SIZE) >> PAGE_SHIFT;
Chris Mason40f76582014-05-21 13:35:51 -07003740 delalloc_start = delalloc_end + 1;
3741 }
3742 if (wbc->nr_to_write < delalloc_to_write) {
3743 int thresh = 8192;
3744
3745 if (delalloc_to_write < thresh * 2)
3746 thresh = delalloc_to_write;
3747 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3748 thresh);
3749 }
3750
3751 /* did the fill delalloc function already unlock and start
3752 * the IO?
3753 */
3754 if (page_started) {
3755 /*
3756 * we've unlocked the page, so we can't update
3757 * the mapping's writeback index, just update
3758 * nr_to_write.
3759 */
3760 wbc->nr_to_write -= *nr_written;
3761 return 1;
3762 }
3763
Nikolay Borisovb69d1ee2020-07-16 18:17:19 +03003764 return 0;
Chris Mason40f76582014-05-21 13:35:51 -07003765}
3766
3767/*
Qu Wenruoc5ef5c62021-05-31 16:50:50 +08003768 * Find the first byte we need to write.
3769 *
3770 * For subpage, one page can contain several sectors, and
3771 * __extent_writepage_io() will just grab all extent maps in the page
3772 * range and try to submit all non-inline/non-compressed extents.
3773 *
3774 * This is a big problem for subpage, we shouldn't re-submit already written
3775 * data at all.
3776 * This function will lookup subpage dirty bit to find which range we really
3777 * need to submit.
3778 *
3779 * Return the next dirty range in [@start, @end).
3780 * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
3781 */
3782static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
3783 struct page *page, u64 *start, u64 *end)
3784{
3785 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
3786 u64 orig_start = *start;
3787 /* Declare as unsigned long so we can use bitmap ops */
3788 unsigned long dirty_bitmap;
3789 unsigned long flags;
3790 int nbits = (orig_start - page_offset(page)) >> fs_info->sectorsize_bits;
3791 int range_start_bit = nbits;
3792 int range_end_bit;
3793
3794 /*
3795 * For regular sector size == page size case, since one page only
3796 * contains one sector, we return the page offset directly.
3797 */
3798 if (fs_info->sectorsize == PAGE_SIZE) {
3799 *start = page_offset(page);
3800 *end = page_offset(page) + PAGE_SIZE;
3801 return;
3802 }
3803
3804 /* We should have the page locked, but just in case */
3805 spin_lock_irqsave(&subpage->lock, flags);
3806 dirty_bitmap = subpage->dirty_bitmap;
3807 spin_unlock_irqrestore(&subpage->lock, flags);
3808
3809 bitmap_next_set_region(&dirty_bitmap, &range_start_bit, &range_end_bit,
3810 BTRFS_SUBPAGE_BITMAP_SIZE);
3811 *start = page_offset(page) + range_start_bit * fs_info->sectorsize;
3812 *end = page_offset(page) + range_end_bit * fs_info->sectorsize;
3813}
3814
3815/*
Chris Mason40f76582014-05-21 13:35:51 -07003816 * helper for __extent_writepage. This calls the writepage start hooks,
3817 * and does the loop to map the page into extents and bios.
3818 *
3819 * We return 1 if the IO is started and the page is unlocked,
3820 * 0 if all went well (page still locked)
3821 * < 0 if there were errors (page still locked)
3822 */
Nikolay Borisovd4580fe2020-06-03 08:55:33 +03003823static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
Chris Mason40f76582014-05-21 13:35:51 -07003824 struct page *page,
3825 struct writeback_control *wbc,
3826 struct extent_page_data *epd,
3827 loff_t i_size,
3828 unsigned long nr_written,
David Sterba57e5ffe2019-10-29 18:28:55 +01003829 int *nr_ret)
Chris Mason40f76582014-05-21 13:35:51 -07003830{
Qu Wenruo6bc56362021-01-06 09:01:41 +08003831 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Miao Xie4eee4fa2012-12-21 09:17:45 +00003832 u64 start = page_offset(page);
Qu Wenruo6bc56362021-01-06 09:01:41 +08003833 u64 end = start + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05003834 u64 cur = start;
3835 u64 extent_offset;
Chris Masond1310b22008-01-24 16:13:08 -05003836 u64 block_start;
Chris Masond1310b22008-01-24 16:13:08 -05003837 struct extent_map *em;
Chris Mason40f76582014-05-21 13:35:51 -07003838 int ret = 0;
3839 int nr = 0;
Naohiro Aotad8e3fb12021-02-04 19:22:05 +09003840 u32 opf = REQ_OP_WRITE;
David Sterba57e5ffe2019-10-29 18:28:55 +01003841 const unsigned int write_flags = wbc_to_write_flags(wbc);
Chris Mason40f76582014-05-21 13:35:51 -07003842 bool compressed;
Chris Masond1310b22008-01-24 16:13:08 -05003843
Qu Wenruo6bc56362021-01-06 09:01:41 +08003844 ret = btrfs_writepage_cow_fixup(page, start, end);
Nikolay Borisovd75855b2018-11-01 14:09:47 +02003845 if (ret) {
3846 /* Fixup worker will requeue */
Josef Bacik5ab58052020-01-21 11:51:43 -05003847 redirty_page_for_writepage(wbc, page);
Nikolay Borisovd75855b2018-11-01 14:09:47 +02003848 update_nr_written(wbc, nr_written);
3849 unlock_page(page);
3850 return 1;
Chris Mason247e7432008-07-17 12:53:51 -04003851 }
3852
Chris Mason11c83492009-04-20 15:50:09 -04003853 /*
3854 * we don't want to touch the inode after unlocking the page,
3855 * so we update the mapping writeback index now
3856 */
David Sterba3d4b9492017-02-10 19:33:41 +01003857 update_nr_written(wbc, nr_written + 1);
Chris Mason771ed682008-11-06 22:02:51 -05003858
Chris Masond1310b22008-01-24 16:13:08 -05003859 while (cur <= end) {
Qu Wenruo0c64c332021-01-06 09:01:40 +08003860 u64 disk_bytenr;
Chris Mason40f76582014-05-21 13:35:51 -07003861 u64 em_end;
Qu Wenruoc5ef5c62021-05-31 16:50:50 +08003862 u64 dirty_range_start = cur;
3863 u64 dirty_range_end;
Qu Wenruo6bc56362021-01-06 09:01:41 +08003864 u32 iosize;
David Sterba58409ed2016-05-04 11:46:10 +02003865
Chris Mason40f76582014-05-21 13:35:51 -07003866 if (cur >= i_size) {
Qu Wenruo38a39ac72021-04-08 20:32:27 +08003867 btrfs_writepage_endio_finish_ordered(inode, page, cur,
3868 end, 1);
Chris Masond1310b22008-01-24 16:13:08 -05003869 break;
3870 }
Qu Wenruoc5ef5c62021-05-31 16:50:50 +08003871
3872 find_next_dirty_byte(fs_info, page, &dirty_range_start,
3873 &dirty_range_end);
3874 if (cur < dirty_range_start) {
3875 cur = dirty_range_start;
3876 continue;
3877 }
3878
Nikolay Borisovd4580fe2020-06-03 08:55:33 +03003879 em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
David Sterbac7040052011-04-19 18:00:01 +02003880 if (IS_ERR_OR_NULL(em)) {
Qu Wenruoc5ef5c62021-05-31 16:50:50 +08003881 btrfs_page_set_error(fs_info, page, cur, end - cur + 1);
Filipe Manana61391d52014-05-09 17:17:40 +01003882 ret = PTR_ERR_OR_ZERO(em);
Chris Masond1310b22008-01-24 16:13:08 -05003883 break;
3884 }
3885
3886 extent_offset = cur - em->start;
Chris Mason40f76582014-05-21 13:35:51 -07003887 em_end = extent_map_end(em);
Qu Wenruo6bc56362021-01-06 09:01:41 +08003888 ASSERT(cur <= em_end);
3889 ASSERT(cur < end);
3890 ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
3891 ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
Chris Masond1310b22008-01-24 16:13:08 -05003892 block_start = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04003893 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
Qu Wenruo6bc56362021-01-06 09:01:41 +08003894 disk_bytenr = em->block_start + extent_offset;
3895
Qu Wenruoc5ef5c62021-05-31 16:50:50 +08003896 /*
3897 * Note that em_end from extent_map_end() and dirty_range_end from
3898 * find_next_dirty_byte() are all exclusive
3899 */
3900 iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
Naohiro Aotad8e3fb12021-02-04 19:22:05 +09003901
Johannes Thumshirne380adf2021-05-19 00:40:27 +09003902 if (btrfs_use_zone_append(inode, em->block_start))
Naohiro Aotad8e3fb12021-02-04 19:22:05 +09003903 opf = REQ_OP_ZONE_APPEND;
3904
Chris Masond1310b22008-01-24 16:13:08 -05003905 free_extent_map(em);
3906 em = NULL;
3907
Chris Masonc8b97812008-10-29 14:49:59 -04003908 /*
3909 * compressed and inline extents are written through other
3910 * paths in the FS
3911 */
3912 if (compressed || block_start == EXTENT_MAP_HOLE ||
Chris Masond1310b22008-01-24 16:13:08 -05003913 block_start == EXTENT_MAP_INLINE) {
Omar Sandovalc8b04032019-12-02 17:34:24 -08003914 if (compressed)
Chris Masonc8b97812008-10-29 14:49:59 -04003915 nr++;
Omar Sandovalc8b04032019-12-02 17:34:24 -08003916 else
Qu Wenruo38a39ac72021-04-08 20:32:27 +08003917 btrfs_writepage_endio_finish_ordered(inode,
3918 page, cur, cur + iosize - 1, 1);
Chris Masonc8b97812008-10-29 14:49:59 -04003919 cur += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003920 continue;
3921 }
Chris Masonc8b97812008-10-29 14:49:59 -04003922
Qu Wenruod2a91062021-05-31 16:50:49 +08003923 btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
David Sterba58409ed2016-05-04 11:46:10 +02003924 if (!PageWriteback(page)) {
Nikolay Borisovd4580fe2020-06-03 08:55:33 +03003925 btrfs_err(inode->root->fs_info,
David Sterba58409ed2016-05-04 11:46:10 +02003926 "page %lu not writeback, cur %llu end %llu",
3927 page->index, cur, end);
Chris Masond1310b22008-01-24 16:13:08 -05003928 }
David Sterba58409ed2016-05-04 11:46:10 +02003929
Qu Wenruoc5ef5c62021-05-31 16:50:50 +08003930 /*
3931 * Although the PageDirty bit is cleared before entering this
3932 * function, subpage dirty bit is not cleared.
3933 * So clear subpage dirty bit here so next time we won't submit
3934 * page for range already written to disk.
3935 */
3936 btrfs_page_clear_dirty(fs_info, page, cur, iosize);
3937
Qu Wenruo390ed292021-04-14 16:42:15 +08003938 ret = submit_extent_page(opf | write_flags, wbc,
3939 &epd->bio_ctrl, page,
Naohiro Aotad8e3fb12021-02-04 19:22:05 +09003940 disk_bytenr, iosize,
Qu Wenruo390ed292021-04-14 16:42:15 +08003941 cur - page_offset(page),
David Sterba58409ed2016-05-04 11:46:10 +02003942 end_bio_extent_writepage,
Qu Wenruo390ed292021-04-14 16:42:15 +08003943 0, 0, false);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09003944 if (ret) {
Qu Wenruoc5ef5c62021-05-31 16:50:50 +08003945 btrfs_page_set_error(fs_info, page, cur, iosize);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09003946 if (PageWriteback(page))
Qu Wenruoc5ef5c62021-05-31 16:50:50 +08003947 btrfs_page_clear_writeback(fs_info, page, cur,
3948 iosize);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09003949 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04003950
Qu Wenruo6bc56362021-01-06 09:01:41 +08003951 cur += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003952 nr++;
3953 }
Chris Mason40f76582014-05-21 13:35:51 -07003954 *nr_ret = nr;
Chris Mason40f76582014-05-21 13:35:51 -07003955 return ret;
3956}
3957
3958/*
3959 * the writepage semantics are similar to regular writepage. extent
3960 * records are inserted to lock ranges in the tree, and as dirty areas
3961 * are found, they are marked writeback. Then the lock bits are removed
3962 * and the end_io handler clears the writeback ranges
Qu Wenruo30659762019-03-20 14:27:42 +08003963 *
3964 * Return 0 if everything goes well.
3965 * Return <0 for error.
Chris Mason40f76582014-05-21 13:35:51 -07003966 */
3967static int __extent_writepage(struct page *page, struct writeback_control *wbc,
David Sterbaaab6e9e2017-11-30 18:00:02 +01003968 struct extent_page_data *epd)
Chris Mason40f76582014-05-21 13:35:51 -07003969{
3970 struct inode *inode = page->mapping->host;
Chris Mason40f76582014-05-21 13:35:51 -07003971 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003972 u64 page_end = start + PAGE_SIZE - 1;
Chris Mason40f76582014-05-21 13:35:51 -07003973 int ret;
3974 int nr = 0;
Omar Sandovaleb70d222019-12-02 17:34:20 -08003975 size_t pg_offset;
Chris Mason40f76582014-05-21 13:35:51 -07003976 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003977 unsigned long end_index = i_size >> PAGE_SHIFT;
Chris Mason40f76582014-05-21 13:35:51 -07003978 unsigned long nr_written = 0;
3979
Chris Mason40f76582014-05-21 13:35:51 -07003980 trace___extent_writepage(page, inode, wbc);
3981
3982 WARN_ON(!PageLocked(page));
3983
3984 ClearPageError(page);
3985
Johannes Thumshirn70730172018-12-05 15:23:03 +01003986 pg_offset = offset_in_page(i_size);
Chris Mason40f76582014-05-21 13:35:51 -07003987 if (page->index > end_index ||
3988 (page->index == end_index && !pg_offset)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003989 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
Chris Mason40f76582014-05-21 13:35:51 -07003990 unlock_page(page);
3991 return 0;
3992 }
3993
3994 if (page->index == end_index) {
Ira Weinyd048b9c2021-05-04 18:40:07 -07003995 memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
Chris Mason40f76582014-05-21 13:35:51 -07003996 flush_dcache_page(page);
3997 }
3998
Qu Wenruo32443de2021-01-26 16:34:00 +08003999 ret = set_page_extent_mapped(page);
4000 if (ret < 0) {
4001 SetPageError(page);
4002 goto done;
4003 }
Chris Mason40f76582014-05-21 13:35:51 -07004004
Nikolay Borisov7789a552018-11-08 10:18:06 +02004005 if (!epd->extent_locked) {
Nikolay Borisovcd4c0bf942020-06-05 10:42:10 +03004006 ret = writepage_delalloc(BTRFS_I(inode), page, wbc, start,
4007 &nr_written);
Nikolay Borisov7789a552018-11-08 10:18:06 +02004008 if (ret == 1)
Omar Sandoval169d2c82019-12-02 17:34:21 -08004009 return 0;
Nikolay Borisov7789a552018-11-08 10:18:06 +02004010 if (ret)
4011 goto done;
4012 }
Chris Mason40f76582014-05-21 13:35:51 -07004013
Nikolay Borisovd4580fe2020-06-03 08:55:33 +03004014 ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
4015 nr_written, &nr);
Chris Mason40f76582014-05-21 13:35:51 -07004016 if (ret == 1)
Omar Sandoval169d2c82019-12-02 17:34:21 -08004017 return 0;
Chris Mason40f76582014-05-21 13:35:51 -07004018
4019done:
Chris Masond1310b22008-01-24 16:13:08 -05004020 if (nr == 0) {
4021 /* make sure the mapping tag for page dirty gets cleared */
4022 set_page_writeback(page);
4023 end_page_writeback(page);
4024 }
Filipe Manana61391d52014-05-09 17:17:40 +01004025 if (PageError(page)) {
4026 ret = ret < 0 ? ret : -EIO;
4027 end_extent_writepage(page, ret, start, page_end);
4028 }
Chris Masond1310b22008-01-24 16:13:08 -05004029 unlock_page(page);
Qu Wenruo30659762019-03-20 14:27:42 +08004030 ASSERT(ret <= 0);
Chris Mason40f76582014-05-21 13:35:51 -07004031 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05004032}
4033
Josef Bacikfd8b2b62013-04-24 16:41:19 -04004034void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004035{
NeilBrown74316202014-07-07 15:16:04 +10004036 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
4037 TASK_UNINTERRUPTIBLE);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004038}
4039
Filipe Manana18dfa712019-09-11 17:42:00 +01004040static void end_extent_buffer_writeback(struct extent_buffer *eb)
4041{
4042 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
4043 smp_mb__after_atomic();
4044 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
4045}
4046
Qu Wenruo2e3c2512019-03-20 14:27:46 +08004047/*
Qu Wenruoa3efb2f2020-10-21 14:24:49 +08004048 * Lock extent buffer status and pages for writeback.
Qu Wenruo2e3c2512019-03-20 14:27:46 +08004049 *
Qu Wenruoa3efb2f2020-10-21 14:24:49 +08004050 * May try to flush write bio if we can't get the lock.
4051 *
4052 * Return 0 if the extent buffer doesn't need to be submitted.
4053 * (E.g. the extent buffer is not dirty)
4054 * Return >0 is the extent buffer is submitted to bio.
4055 * Return <0 if something went wrong, no page is locked.
Qu Wenruo2e3c2512019-03-20 14:27:46 +08004056 */
David Sterba9df76fb2019-03-20 11:21:41 +01004057static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
Chris Mason0e378df2014-05-19 20:55:27 -07004058 struct extent_page_data *epd)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004059{
David Sterba9df76fb2019-03-20 11:21:41 +01004060 struct btrfs_fs_info *fs_info = eb->fs_info;
Qu Wenruo2e3c2512019-03-20 14:27:46 +08004061 int i, num_pages, failed_page_nr;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004062 int flush = 0;
4063 int ret = 0;
4064
4065 if (!btrfs_try_tree_write_lock(eb)) {
Qu Wenruof4340622019-03-20 14:27:41 +08004066 ret = flush_write_bio(epd);
Qu Wenruo2e3c2512019-03-20 14:27:46 +08004067 if (ret < 0)
4068 return ret;
4069 flush = 1;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004070 btrfs_tree_lock(eb);
4071 }
4072
4073 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
4074 btrfs_tree_unlock(eb);
4075 if (!epd->sync_io)
4076 return 0;
4077 if (!flush) {
Qu Wenruof4340622019-03-20 14:27:41 +08004078 ret = flush_write_bio(epd);
Qu Wenruo2e3c2512019-03-20 14:27:46 +08004079 if (ret < 0)
4080 return ret;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004081 flush = 1;
4082 }
Chris Masona098d8e82012-03-21 12:09:56 -04004083 while (1) {
4084 wait_on_extent_buffer_writeback(eb);
4085 btrfs_tree_lock(eb);
4086 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
4087 break;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004088 btrfs_tree_unlock(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004089 }
4090 }
4091
Josef Bacik51561ff2012-07-20 16:25:24 -04004092 /*
4093 * We need to do this to prevent races in people who check if the eb is
4094 * under IO since we can end up having no IO bits set for a short period
4095 * of time.
4096 */
4097 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004098 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
4099 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
Josef Bacik51561ff2012-07-20 16:25:24 -04004100 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004101 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
Nikolay Borisov104b4e52017-06-20 21:01:20 +03004102 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4103 -eb->len,
4104 fs_info->dirty_metadata_batch);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004105 ret = 1;
Josef Bacik51561ff2012-07-20 16:25:24 -04004106 } else {
4107 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004108 }
4109
4110 btrfs_tree_unlock(eb);
4111
Qu Wenruof3156df2021-04-06 08:36:02 +08004112 /*
4113 * Either we don't need to submit any tree block, or we're submitting
4114 * subpage eb.
4115 * Subpage metadata doesn't use page locking at all, so we can skip
4116 * the page locking.
4117 */
4118 if (!ret || fs_info->sectorsize < PAGE_SIZE)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004119 return ret;
4120
David Sterba65ad0102018-06-29 10:56:49 +02004121 num_pages = num_extent_pages(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004122 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02004123 struct page *p = eb->pages[i];
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004124
4125 if (!trylock_page(p)) {
4126 if (!flush) {
Filipe Manana18dfa712019-09-11 17:42:00 +01004127 int err;
4128
4129 err = flush_write_bio(epd);
4130 if (err < 0) {
4131 ret = err;
Qu Wenruo2e3c2512019-03-20 14:27:46 +08004132 failed_page_nr = i;
4133 goto err_unlock;
4134 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004135 flush = 1;
4136 }
4137 lock_page(p);
4138 }
4139 }
4140
4141 return ret;
Qu Wenruo2e3c2512019-03-20 14:27:46 +08004142err_unlock:
4143 /* Unlock already locked pages */
4144 for (i = 0; i < failed_page_nr; i++)
4145 unlock_page(eb->pages[i]);
Filipe Manana18dfa712019-09-11 17:42:00 +01004146 /*
4147 * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
4148 * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
4149 * be made and undo everything done before.
4150 */
4151 btrfs_tree_lock(eb);
4152 spin_lock(&eb->refs_lock);
4153 set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4154 end_extent_buffer_writeback(eb);
4155 spin_unlock(&eb->refs_lock);
4156 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
4157 fs_info->dirty_metadata_batch);
4158 btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
4159 btrfs_tree_unlock(eb);
Qu Wenruo2e3c2512019-03-20 14:27:46 +08004160 return ret;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004161}
4162
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004163static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
Filipe Manana656f30d2014-09-26 12:25:56 +01004164{
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004165 struct btrfs_fs_info *fs_info = eb->fs_info;
Filipe Manana656f30d2014-09-26 12:25:56 +01004166
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004167 btrfs_page_set_error(fs_info, page, eb->start, eb->len);
Filipe Manana656f30d2014-09-26 12:25:56 +01004168 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4169 return;
4170
4171 /*
Dennis Zhoueb5b64f2019-09-13 14:54:07 +01004172 * If we error out, we should add back the dirty_metadata_bytes
4173 * to make it consistent.
4174 */
Dennis Zhoueb5b64f2019-09-13 14:54:07 +01004175 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4176 eb->len, fs_info->dirty_metadata_batch);
4177
4178 /*
Filipe Manana656f30d2014-09-26 12:25:56 +01004179 * If writeback for a btree extent that doesn't belong to a log tree
4180 * failed, increment the counter transaction->eb_write_errors.
4181 * We do this because while the transaction is running and before it's
4182 * committing (when we call filemap_fdata[write|wait]_range against
4183 * the btree inode), we might have
4184 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
4185 * returns an error or an error happens during writeback, when we're
4186 * committing the transaction we wouldn't know about it, since the pages
4187 * can be no longer dirty nor marked anymore for writeback (if a
4188 * subsequent modification to the extent buffer didn't happen before the
4189 * transaction commit), which makes filemap_fdata[write|wait]_range not
4190 * able to find the pages tagged with SetPageError at transaction
4191 * commit time. So if this happens we must abort the transaction,
4192 * otherwise we commit a super block with btree roots that point to
4193 * btree nodes/leafs whose content on disk is invalid - either garbage
4194 * or the content of some node/leaf from a past generation that got
4195 * cowed or deleted and is no longer valid.
4196 *
4197 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
4198 * not be enough - we need to distinguish between log tree extents vs
4199 * non-log tree extents, and the next filemap_fdatawait_range() call
4200 * will catch and clear such errors in the mapping - and that call might
4201 * be from a log sync and not from a transaction commit. Also, checking
4202 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
4203 * not done and would not be reliable - the eb might have been released
4204 * from memory and reading it back again means that flag would not be
4205 * set (since it's a runtime flag, not persisted on disk).
4206 *
4207 * Using the flags below in the btree inode also makes us achieve the
4208 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
4209 * writeback for all dirty pages and before filemap_fdatawait_range()
4210 * is called, the writeback for all dirty pages had already finished
4211 * with errors - because we were not using AS_EIO/AS_ENOSPC,
4212 * filemap_fdatawait_range() would return success, as it could not know
4213 * that writeback errors happened (the pages were no longer tagged for
4214 * writeback).
4215 */
4216 switch (eb->log_index) {
4217 case -1:
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004218 set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
Filipe Manana656f30d2014-09-26 12:25:56 +01004219 break;
4220 case 0:
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004221 set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
Filipe Manana656f30d2014-09-26 12:25:56 +01004222 break;
4223 case 1:
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004224 set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
Filipe Manana656f30d2014-09-26 12:25:56 +01004225 break;
4226 default:
4227 BUG(); /* unexpected, logic error */
4228 }
4229}
4230
Qu Wenruo2f3186d2021-04-06 08:36:00 +08004231/*
4232 * The endio specific version which won't touch any unsafe spinlock in endio
4233 * context.
4234 */
4235static struct extent_buffer *find_extent_buffer_nolock(
4236 struct btrfs_fs_info *fs_info, u64 start)
4237{
4238 struct extent_buffer *eb;
4239
4240 rcu_read_lock();
4241 eb = radix_tree_lookup(&fs_info->buffer_radix,
4242 start >> fs_info->sectorsize_bits);
4243 if (eb && atomic_inc_not_zero(&eb->refs)) {
4244 rcu_read_unlock();
4245 return eb;
4246 }
4247 rcu_read_unlock();
4248 return NULL;
4249}
4250
4251/*
4252 * The endio function for subpage extent buffer write.
4253 *
4254 * Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback()
4255 * after all extent buffers in the page has finished their writeback.
4256 */
Qu Wenruofa04c162021-04-27 12:53:35 +08004257static void end_bio_subpage_eb_writepage(struct bio *bio)
Qu Wenruo2f3186d2021-04-06 08:36:00 +08004258{
Qu Wenruofa04c162021-04-27 12:53:35 +08004259 struct btrfs_fs_info *fs_info;
Qu Wenruo2f3186d2021-04-06 08:36:00 +08004260 struct bio_vec *bvec;
4261 struct bvec_iter_all iter_all;
4262
Qu Wenruofa04c162021-04-27 12:53:35 +08004263 fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
4264 ASSERT(fs_info->sectorsize < PAGE_SIZE);
4265
Qu Wenruo2f3186d2021-04-06 08:36:00 +08004266 ASSERT(!bio_flagged(bio, BIO_CLONED));
4267 bio_for_each_segment_all(bvec, bio, iter_all) {
4268 struct page *page = bvec->bv_page;
4269 u64 bvec_start = page_offset(page) + bvec->bv_offset;
4270 u64 bvec_end = bvec_start + bvec->bv_len - 1;
4271 u64 cur_bytenr = bvec_start;
4272
4273 ASSERT(IS_ALIGNED(bvec->bv_len, fs_info->nodesize));
4274
4275 /* Iterate through all extent buffers in the range */
4276 while (cur_bytenr <= bvec_end) {
4277 struct extent_buffer *eb;
4278 int done;
4279
4280 /*
4281 * Here we can't use find_extent_buffer(), as it may
4282 * try to lock eb->refs_lock, which is not safe in endio
4283 * context.
4284 */
4285 eb = find_extent_buffer_nolock(fs_info, cur_bytenr);
4286 ASSERT(eb);
4287
4288 cur_bytenr = eb->start + eb->len;
4289
4290 ASSERT(test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags));
4291 done = atomic_dec_and_test(&eb->io_pages);
4292 ASSERT(done);
4293
4294 if (bio->bi_status ||
4295 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
4296 ClearPageUptodate(page);
4297 set_btree_ioerr(page, eb);
4298 }
4299
4300 btrfs_subpage_clear_writeback(fs_info, page, eb->start,
4301 eb->len);
4302 end_extent_buffer_writeback(eb);
4303 /*
4304 * free_extent_buffer() will grab spinlock which is not
4305 * safe in endio context. Thus here we manually dec
4306 * the ref.
4307 */
4308 atomic_dec(&eb->refs);
4309 }
4310 }
4311 bio_put(bio);
4312}
4313
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02004314static void end_bio_extent_buffer_writepage(struct bio *bio)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004315{
Kent Overstreet2c30c712013-11-07 12:20:26 -08004316 struct bio_vec *bvec;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004317 struct extent_buffer *eb;
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02004318 int done;
Ming Lei6dc4f102019-02-15 19:13:19 +08004319 struct bvec_iter_all iter_all;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004320
David Sterbac09abff2017-07-13 18:10:07 +02004321 ASSERT(!bio_flagged(bio, BIO_CLONED));
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02004322 bio_for_each_segment_all(bvec, bio, iter_all) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004323 struct page *page = bvec->bv_page;
4324
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004325 eb = (struct extent_buffer *)page->private;
4326 BUG_ON(!eb);
4327 done = atomic_dec_and_test(&eb->io_pages);
4328
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02004329 if (bio->bi_status ||
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02004330 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004331 ClearPageUptodate(page);
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004332 set_btree_ioerr(page, eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004333 }
4334
4335 end_page_writeback(page);
4336
4337 if (!done)
4338 continue;
4339
4340 end_extent_buffer_writeback(eb);
Kent Overstreet2c30c712013-11-07 12:20:26 -08004341 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004342
4343 bio_put(bio);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004344}
4345
Qu Wenruofa04c162021-04-27 12:53:35 +08004346static void prepare_eb_write(struct extent_buffer *eb)
4347{
4348 u32 nritems;
4349 unsigned long start;
4350 unsigned long end;
4351
4352 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
4353 atomic_set(&eb->io_pages, num_extent_pages(eb));
4354
4355 /* Set btree blocks beyond nritems with 0 to avoid stale content */
4356 nritems = btrfs_header_nritems(eb);
4357 if (btrfs_header_level(eb) > 0) {
4358 end = btrfs_node_key_ptr_offset(nritems);
4359 memzero_extent_buffer(eb, end, eb->len - end);
4360 } else {
4361 /*
4362 * Leaf:
4363 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
4364 */
4365 start = btrfs_item_nr_offset(nritems);
4366 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
4367 memzero_extent_buffer(eb, start, end - start);
4368 }
4369}
4370
Qu Wenruo35b6ddf2021-04-06 08:36:01 +08004371/*
4372 * Unlike the work in write_one_eb(), we rely completely on extent locking.
4373 * Page locking is only utilized at minimum to keep the VMM code happy.
Qu Wenruo35b6ddf2021-04-06 08:36:01 +08004374 */
4375static int write_one_subpage_eb(struct extent_buffer *eb,
4376 struct writeback_control *wbc,
4377 struct extent_page_data *epd)
4378{
4379 struct btrfs_fs_info *fs_info = eb->fs_info;
4380 struct page *page = eb->pages[0];
4381 unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
4382 bool no_dirty_ebs = false;
4383 int ret;
4384
Qu Wenruofa04c162021-04-27 12:53:35 +08004385 prepare_eb_write(eb);
4386
Qu Wenruo35b6ddf2021-04-06 08:36:01 +08004387 /* clear_page_dirty_for_io() in subpage helper needs page locked */
4388 lock_page(page);
4389 btrfs_subpage_set_writeback(fs_info, page, eb->start, eb->len);
4390
4391 /* Check if this is the last dirty bit to update nr_written */
4392 no_dirty_ebs = btrfs_subpage_clear_and_test_dirty(fs_info, page,
4393 eb->start, eb->len);
4394 if (no_dirty_ebs)
4395 clear_page_dirty_for_io(page);
4396
Qu Wenruo390ed292021-04-14 16:42:15 +08004397 ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
4398 &epd->bio_ctrl, page, eb->start, eb->len,
4399 eb->start - page_offset(page),
Qu Wenruofa04c162021-04-27 12:53:35 +08004400 end_bio_subpage_eb_writepage, 0, 0, false);
Qu Wenruo35b6ddf2021-04-06 08:36:01 +08004401 if (ret) {
4402 btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
4403 set_btree_ioerr(page, eb);
4404 unlock_page(page);
4405
4406 if (atomic_dec_and_test(&eb->io_pages))
4407 end_extent_buffer_writeback(eb);
4408 return -EIO;
4409 }
4410 unlock_page(page);
4411 /*
4412 * Submission finished without problem, if no range of the page is
4413 * dirty anymore, we have submitted a page. Update nr_written in wbc.
4414 */
4415 if (no_dirty_ebs)
4416 update_nr_written(wbc, 1);
4417 return ret;
4418}
4419
Chris Mason0e378df2014-05-19 20:55:27 -07004420static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004421 struct writeback_control *wbc,
4422 struct extent_page_data *epd)
4423{
Qu Wenruo0c64c332021-01-06 09:01:40 +08004424 u64 disk_bytenr = eb->start;
David Sterbacc5e31a2018-03-01 18:20:27 +01004425 int i, num_pages;
Liu Boff40adf2017-08-24 18:19:48 -06004426 unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
Josef Bacikd7dbe9e2012-04-23 14:00:51 -04004427 int ret = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004428
Qu Wenruofa04c162021-04-27 12:53:35 +08004429 prepare_eb_write(eb);
4430
David Sterba65ad0102018-06-29 10:56:49 +02004431 num_pages = num_extent_pages(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004432 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02004433 struct page *p = eb->pages[i];
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004434
4435 clear_page_dirty_for_io(p);
4436 set_page_writeback(p);
David Sterba0ceb34b2020-02-05 19:09:28 +01004437 ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
Qu Wenruo390ed292021-04-14 16:42:15 +08004438 &epd->bio_ctrl, p, disk_bytenr,
4439 PAGE_SIZE, 0,
Mike Christie1f7ad752016-06-05 14:31:51 -05004440 end_bio_extent_buffer_writepage,
Qu Wenruo390ed292021-04-14 16:42:15 +08004441 0, 0, false);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004442 if (ret) {
Qu Wenruo5a2c6072021-03-25 15:14:44 +08004443 set_btree_ioerr(p, eb);
Takafumi Kubotafe01aa62017-02-09 17:24:33 +09004444 if (PageWriteback(p))
4445 end_page_writeback(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004446 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
4447 end_extent_buffer_writeback(eb);
4448 ret = -EIO;
4449 break;
4450 }
Qu Wenruo0c64c332021-01-06 09:01:40 +08004451 disk_bytenr += PAGE_SIZE;
David Sterba3d4b9492017-02-10 19:33:41 +01004452 update_nr_written(wbc, 1);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004453 unlock_page(p);
4454 }
4455
4456 if (unlikely(ret)) {
4457 for (; i < num_pages; i++) {
Chris Masonbbf65cf2014-10-04 09:56:45 -07004458 struct page *p = eb->pages[i];
Liu Bo81465022014-09-23 22:22:33 +08004459 clear_page_dirty_for_io(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004460 unlock_page(p);
4461 }
4462 }
4463
4464 return ret;
4465}
4466
Qu Wenruof91e0d02020-12-02 14:48:00 +08004467/*
Qu Wenruoc4aec292021-04-06 08:36:03 +08004468 * Submit one subpage btree page.
4469 *
4470 * The main difference to submit_eb_page() is:
4471 * - Page locking
4472 * For subpage, we don't rely on page locking at all.
4473 *
4474 * - Flush write bio
4475 * We only flush bio if we may be unable to fit current extent buffers into
4476 * current bio.
4477 *
4478 * Return >=0 for the number of submitted extent buffers.
4479 * Return <0 for fatal error.
4480 */
4481static int submit_eb_subpage(struct page *page,
4482 struct writeback_control *wbc,
4483 struct extent_page_data *epd)
4484{
4485 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
4486 int submitted = 0;
4487 u64 page_start = page_offset(page);
4488 int bit_start = 0;
4489 const int nbits = BTRFS_SUBPAGE_BITMAP_SIZE;
4490 int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
4491 int ret;
4492
4493 /* Lock and write each dirty extent buffers in the range */
4494 while (bit_start < nbits) {
4495 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
4496 struct extent_buffer *eb;
4497 unsigned long flags;
4498 u64 start;
4499
4500 /*
4501 * Take private lock to ensure the subpage won't be detached
4502 * in the meantime.
4503 */
4504 spin_lock(&page->mapping->private_lock);
4505 if (!PagePrivate(page)) {
4506 spin_unlock(&page->mapping->private_lock);
4507 break;
4508 }
4509 spin_lock_irqsave(&subpage->lock, flags);
4510 if (!((1 << bit_start) & subpage->dirty_bitmap)) {
4511 spin_unlock_irqrestore(&subpage->lock, flags);
4512 spin_unlock(&page->mapping->private_lock);
4513 bit_start++;
4514 continue;
4515 }
4516
4517 start = page_start + bit_start * fs_info->sectorsize;
4518 bit_start += sectors_per_node;
4519
4520 /*
4521 * Here we just want to grab the eb without touching extra
4522 * spin locks, so call find_extent_buffer_nolock().
4523 */
4524 eb = find_extent_buffer_nolock(fs_info, start);
4525 spin_unlock_irqrestore(&subpage->lock, flags);
4526 spin_unlock(&page->mapping->private_lock);
4527
4528 /*
4529 * The eb has already reached 0 refs thus find_extent_buffer()
4530 * doesn't return it. We don't need to write back such eb
4531 * anyway.
4532 */
4533 if (!eb)
4534 continue;
4535
4536 ret = lock_extent_buffer_for_io(eb, epd);
4537 if (ret == 0) {
4538 free_extent_buffer(eb);
4539 continue;
4540 }
4541 if (ret < 0) {
4542 free_extent_buffer(eb);
4543 goto cleanup;
4544 }
Qu Wenruofa04c162021-04-27 12:53:35 +08004545 ret = write_one_subpage_eb(eb, wbc, epd);
Qu Wenruoc4aec292021-04-06 08:36:03 +08004546 free_extent_buffer(eb);
4547 if (ret < 0)
4548 goto cleanup;
4549 submitted++;
4550 }
4551 return submitted;
4552
4553cleanup:
4554 /* We hit error, end bio for the submitted extent buffers */
4555 end_write_bio(epd, ret);
4556 return ret;
4557}
4558
4559/*
Qu Wenruof91e0d02020-12-02 14:48:00 +08004560 * Submit all page(s) of one extent buffer.
4561 *
4562 * @page: the page of one extent buffer
4563 * @eb_context: to determine if we need to submit this page, if current page
4564 * belongs to this eb, we don't need to submit
4565 *
4566 * The caller should pass each page in their bytenr order, and here we use
4567 * @eb_context to determine if we have submitted pages of one extent buffer.
4568 *
4569 * If we have, we just skip until we hit a new page that doesn't belong to
4570 * current @eb_context.
4571 *
4572 * If not, we submit all the page(s) of the extent buffer.
4573 *
4574 * Return >0 if we have submitted the extent buffer successfully.
4575 * Return 0 if we don't need to submit the page, as it's already submitted by
4576 * previous call.
4577 * Return <0 for fatal error.
4578 */
4579static int submit_eb_page(struct page *page, struct writeback_control *wbc,
4580 struct extent_page_data *epd,
4581 struct extent_buffer **eb_context)
4582{
4583 struct address_space *mapping = page->mapping;
Naohiro Aota0bc09ca2021-02-04 19:22:08 +09004584 struct btrfs_block_group *cache = NULL;
Qu Wenruof91e0d02020-12-02 14:48:00 +08004585 struct extent_buffer *eb;
4586 int ret;
4587
4588 if (!PagePrivate(page))
4589 return 0;
4590
Qu Wenruoc4aec292021-04-06 08:36:03 +08004591 if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
4592 return submit_eb_subpage(page, wbc, epd);
4593
Qu Wenruof91e0d02020-12-02 14:48:00 +08004594 spin_lock(&mapping->private_lock);
4595 if (!PagePrivate(page)) {
4596 spin_unlock(&mapping->private_lock);
4597 return 0;
4598 }
4599
4600 eb = (struct extent_buffer *)page->private;
4601
4602 /*
4603 * Shouldn't happen and normally this would be a BUG_ON but no point
4604 * crashing the machine for something we can survive anyway.
4605 */
4606 if (WARN_ON(!eb)) {
4607 spin_unlock(&mapping->private_lock);
4608 return 0;
4609 }
4610
4611 if (eb == *eb_context) {
4612 spin_unlock(&mapping->private_lock);
4613 return 0;
4614 }
4615 ret = atomic_inc_not_zero(&eb->refs);
4616 spin_unlock(&mapping->private_lock);
4617 if (!ret)
4618 return 0;
4619
Naohiro Aota0bc09ca2021-02-04 19:22:08 +09004620 if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
4621 /*
4622 * If for_sync, this hole will be filled with
4623 * trasnsaction commit.
4624 */
4625 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
4626 ret = -EAGAIN;
4627 else
4628 ret = 0;
4629 free_extent_buffer(eb);
4630 return ret;
4631 }
4632
Qu Wenruof91e0d02020-12-02 14:48:00 +08004633 *eb_context = eb;
4634
4635 ret = lock_extent_buffer_for_io(eb, epd);
4636 if (ret <= 0) {
Naohiro Aota0bc09ca2021-02-04 19:22:08 +09004637 btrfs_revert_meta_write_pointer(cache, eb);
4638 if (cache)
4639 btrfs_put_block_group(cache);
Qu Wenruof91e0d02020-12-02 14:48:00 +08004640 free_extent_buffer(eb);
4641 return ret;
4642 }
Naohiro Aota0bc09ca2021-02-04 19:22:08 +09004643 if (cache)
4644 btrfs_put_block_group(cache);
Qu Wenruof91e0d02020-12-02 14:48:00 +08004645 ret = write_one_eb(eb, wbc, epd);
4646 free_extent_buffer(eb);
4647 if (ret < 0)
4648 return ret;
4649 return 1;
4650}
4651
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004652int btree_write_cache_pages(struct address_space *mapping,
4653 struct writeback_control *wbc)
4654{
Qu Wenruof91e0d02020-12-02 14:48:00 +08004655 struct extent_buffer *eb_context = NULL;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004656 struct extent_page_data epd = {
Qu Wenruo390ed292021-04-14 16:42:15 +08004657 .bio_ctrl = { 0 },
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004658 .extent_locked = 0,
4659 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4660 };
Qu Wenruob3ff8f12020-02-12 14:12:44 +08004661 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004662 int ret = 0;
4663 int done = 0;
4664 int nr_to_write_done = 0;
4665 struct pagevec pvec;
4666 int nr_pages;
4667 pgoff_t index;
4668 pgoff_t end; /* Inclusive */
4669 int scanned = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -05004670 xa_mark_t tag;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004671
Mel Gorman86679822017-11-15 17:37:52 -08004672 pagevec_init(&pvec);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004673 if (wbc->range_cyclic) {
4674 index = mapping->writeback_index; /* Start from prev offset */
4675 end = -1;
Josef Bacik556755a2020-01-03 10:38:44 -05004676 /*
4677 * Start from the beginning does not need to cycle over the
4678 * range, mark it as scanned.
4679 */
4680 scanned = (index == 0);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004681 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004682 index = wbc->range_start >> PAGE_SHIFT;
4683 end = wbc->range_end >> PAGE_SHIFT;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004684 scanned = 1;
4685 }
4686 if (wbc->sync_mode == WB_SYNC_ALL)
4687 tag = PAGECACHE_TAG_TOWRITE;
4688 else
4689 tag = PAGECACHE_TAG_DIRTY;
Naohiro Aota0bc09ca2021-02-04 19:22:08 +09004690 btrfs_zoned_meta_io_lock(fs_info);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004691retry:
4692 if (wbc->sync_mode == WB_SYNC_ALL)
4693 tag_pages_for_writeback(mapping, index, end);
4694 while (!done && !nr_to_write_done && (index <= end) &&
Jan Kara4006f432017-11-15 17:34:37 -08004695 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
Jan Kara67fd7072017-11-15 17:35:19 -08004696 tag))) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004697 unsigned i;
4698
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004699 for (i = 0; i < nr_pages; i++) {
4700 struct page *page = pvec.pages[i];
4701
Qu Wenruof91e0d02020-12-02 14:48:00 +08004702 ret = submit_eb_page(page, wbc, &epd, &eb_context);
4703 if (ret == 0)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004704 continue;
Qu Wenruof91e0d02020-12-02 14:48:00 +08004705 if (ret < 0) {
Filipe Manana0607eb1d2019-09-11 17:42:28 +01004706 done = 1;
Filipe Manana0607eb1d2019-09-11 17:42:28 +01004707 break;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004708 }
4709
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004710 /*
4711 * the filesystem may choose to bump up nr_to_write.
4712 * We have to make sure to honor the new nr_to_write
4713 * at any time
4714 */
4715 nr_to_write_done = wbc->nr_to_write <= 0;
4716 }
4717 pagevec_release(&pvec);
4718 cond_resched();
4719 }
4720 if (!scanned && !done) {
4721 /*
4722 * We hit the last page and there is more work to be done: wrap
4723 * back to the start of the file
4724 */
4725 scanned = 1;
4726 index = 0;
4727 goto retry;
4728 }
Qu Wenruo2b952ee2019-03-20 14:27:43 +08004729 if (ret < 0) {
4730 end_write_bio(&epd, ret);
Naohiro Aota0bc09ca2021-02-04 19:22:08 +09004731 goto out;
Qu Wenruo2b952ee2019-03-20 14:27:43 +08004732 }
Qu Wenruob3ff8f12020-02-12 14:12:44 +08004733 /*
4734 * If something went wrong, don't allow any metadata write bio to be
4735 * submitted.
4736 *
4737 * This would prevent use-after-free if we had dirty pages not
4738 * cleaned up, which can still happen by fuzzed images.
4739 *
4740 * - Bad extent tree
4741 * Allowing existing tree block to be allocated for other trees.
4742 *
4743 * - Log tree operations
4744 * Exiting tree blocks get allocated to log tree, bumps its
4745 * generation, then get cleaned in tree re-balance.
4746 * Such tree block will not be written back, since it's clean,
4747 * thus no WRITTEN flag set.
4748 * And after log writes back, this tree block is not traced by
4749 * any dirty extent_io_tree.
4750 *
4751 * - Offending tree block gets re-dirtied from its original owner
4752 * Since it has bumped generation, no WRITTEN flag, it can be
4753 * reused without COWing. This tree block will not be traced
4754 * by btrfs_transaction::dirty_pages.
4755 *
4756 * Now such dirty tree block will not be cleaned by any dirty
4757 * extent io tree. Thus we don't want to submit such wild eb
4758 * if the fs already has error.
4759 */
4760 if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
4761 ret = flush_write_bio(&epd);
4762 } else {
Josef Bacikfbabd4a2020-07-21 10:38:37 -04004763 ret = -EROFS;
Qu Wenruob3ff8f12020-02-12 14:12:44 +08004764 end_write_bio(&epd, ret);
4765 }
Naohiro Aota0bc09ca2021-02-04 19:22:08 +09004766out:
4767 btrfs_zoned_meta_io_unlock(fs_info);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004768 return ret;
4769}
4770
Chris Masond1310b22008-01-24 16:13:08 -05004771/**
Nikolay Borisov3bed2da2021-01-22 11:58:03 +02004772 * Walk the list of dirty pages of the given address space and write all of them.
4773 *
Chris Masond1310b22008-01-24 16:13:08 -05004774 * @mapping: address space structure to write
Nikolay Borisov3bed2da2021-01-22 11:58:03 +02004775 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
4776 * @epd: holds context for the write, namely the bio
Chris Masond1310b22008-01-24 16:13:08 -05004777 *
4778 * If a page is already under I/O, write_cache_pages() skips it, even
4779 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
4780 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
4781 * and msync() need to guarantee that all the data which was dirty at the time
4782 * the call was made get new I/O started against them. If wbc->sync_mode is
4783 * WB_SYNC_ALL then we were called for data integrity and we must wait for
4784 * existing IO to complete.
4785 */
David Sterba4242b642017-02-10 19:38:24 +01004786static int extent_write_cache_pages(struct address_space *mapping,
Chris Mason4bef0842008-09-08 11:18:08 -04004787 struct writeback_control *wbc,
David Sterbaaab6e9e2017-11-30 18:00:02 +01004788 struct extent_page_data *epd)
Chris Masond1310b22008-01-24 16:13:08 -05004789{
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04004790 struct inode *inode = mapping->host;
Chris Masond1310b22008-01-24 16:13:08 -05004791 int ret = 0;
4792 int done = 0;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04004793 int nr_to_write_done = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004794 struct pagevec pvec;
4795 int nr_pages;
4796 pgoff_t index;
4797 pgoff_t end; /* Inclusive */
Liu Boa91326672016-03-07 16:56:21 -08004798 pgoff_t done_index;
4799 int range_whole = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004800 int scanned = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -05004801 xa_mark_t tag;
Chris Masond1310b22008-01-24 16:13:08 -05004802
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04004803 /*
4804 * We have to hold onto the inode so that ordered extents can do their
4805 * work when the IO finishes. The alternative to this is failing to add
4806 * an ordered extent if the igrab() fails there and that is a huge pain
4807 * to deal with, so instead just hold onto the inode throughout the
4808 * writepages operation. If it fails here we are freeing up the inode
4809 * anyway and we'd rather not waste our time writing out stuff that is
4810 * going to be truncated anyway.
4811 */
4812 if (!igrab(inode))
4813 return 0;
4814
Mel Gorman86679822017-11-15 17:37:52 -08004815 pagevec_init(&pvec);
Chris Masond1310b22008-01-24 16:13:08 -05004816 if (wbc->range_cyclic) {
4817 index = mapping->writeback_index; /* Start from prev offset */
4818 end = -1;
Josef Bacik556755a2020-01-03 10:38:44 -05004819 /*
4820 * Start from the beginning does not need to cycle over the
4821 * range, mark it as scanned.
4822 */
4823 scanned = (index == 0);
Chris Masond1310b22008-01-24 16:13:08 -05004824 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004825 index = wbc->range_start >> PAGE_SHIFT;
4826 end = wbc->range_end >> PAGE_SHIFT;
Liu Boa91326672016-03-07 16:56:21 -08004827 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4828 range_whole = 1;
Chris Masond1310b22008-01-24 16:13:08 -05004829 scanned = 1;
4830 }
Ethan Lien3cd24c62018-11-01 14:49:03 +08004831
4832 /*
4833 * We do the tagged writepage as long as the snapshot flush bit is set
4834 * and we are the first one who do the filemap_flush() on this inode.
4835 *
4836 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
4837 * not race in and drop the bit.
4838 */
4839 if (range_whole && wbc->nr_to_write == LONG_MAX &&
4840 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
4841 &BTRFS_I(inode)->runtime_flags))
4842 wbc->tagged_writepages = 1;
4843
4844 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
Josef Bacikf7aaa062011-07-15 21:26:38 +00004845 tag = PAGECACHE_TAG_TOWRITE;
4846 else
4847 tag = PAGECACHE_TAG_DIRTY;
Chris Masond1310b22008-01-24 16:13:08 -05004848retry:
Ethan Lien3cd24c62018-11-01 14:49:03 +08004849 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
Josef Bacikf7aaa062011-07-15 21:26:38 +00004850 tag_pages_for_writeback(mapping, index, end);
Liu Boa91326672016-03-07 16:56:21 -08004851 done_index = index;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04004852 while (!done && !nr_to_write_done && (index <= end) &&
Jan Kara67fd7072017-11-15 17:35:19 -08004853 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
4854 &index, end, tag))) {
Chris Masond1310b22008-01-24 16:13:08 -05004855 unsigned i;
4856
Chris Masond1310b22008-01-24 16:13:08 -05004857 for (i = 0; i < nr_pages; i++) {
4858 struct page *page = pvec.pages[i];
4859
Tejun Heof7bddf12019-10-03 07:27:13 -07004860 done_index = page->index + 1;
Chris Masond1310b22008-01-24 16:13:08 -05004861 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07004862 * At this point we hold neither the i_pages lock nor
4863 * the page lock: the page may be truncated or
4864 * invalidated (changing page->mapping to NULL),
4865 * or even swizzled back from swapper_space to
4866 * tmpfs file mapping
Chris Masond1310b22008-01-24 16:13:08 -05004867 */
Josef Bacikc8f2f242013-02-11 11:33:00 -05004868 if (!trylock_page(page)) {
Qu Wenruof4340622019-03-20 14:27:41 +08004869 ret = flush_write_bio(epd);
4870 BUG_ON(ret < 0);
Josef Bacikc8f2f242013-02-11 11:33:00 -05004871 lock_page(page);
Chris Mason01d658f2011-11-01 10:08:06 -04004872 }
Chris Masond1310b22008-01-24 16:13:08 -05004873
4874 if (unlikely(page->mapping != mapping)) {
4875 unlock_page(page);
4876 continue;
4877 }
4878
Chris Masond2c3f4f2008-11-19 12:44:22 -05004879 if (wbc->sync_mode != WB_SYNC_NONE) {
Qu Wenruof4340622019-03-20 14:27:41 +08004880 if (PageWriteback(page)) {
4881 ret = flush_write_bio(epd);
4882 BUG_ON(ret < 0);
4883 }
Chris Masond1310b22008-01-24 16:13:08 -05004884 wait_on_page_writeback(page);
Chris Masond2c3f4f2008-11-19 12:44:22 -05004885 }
Chris Masond1310b22008-01-24 16:13:08 -05004886
4887 if (PageWriteback(page) ||
4888 !clear_page_dirty_for_io(page)) {
4889 unlock_page(page);
4890 continue;
4891 }
4892
David Sterbaaab6e9e2017-11-30 18:00:02 +01004893 ret = __extent_writepage(page, wbc, epd);
Liu Boa91326672016-03-07 16:56:21 -08004894 if (ret < 0) {
Liu Boa91326672016-03-07 16:56:21 -08004895 done = 1;
4896 break;
4897 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04004898
4899 /*
4900 * the filesystem may choose to bump up nr_to_write.
4901 * We have to make sure to honor the new nr_to_write
4902 * at any time
4903 */
4904 nr_to_write_done = wbc->nr_to_write <= 0;
Chris Masond1310b22008-01-24 16:13:08 -05004905 }
4906 pagevec_release(&pvec);
4907 cond_resched();
4908 }
Liu Bo894b36e2016-03-07 16:56:22 -08004909 if (!scanned && !done) {
Chris Masond1310b22008-01-24 16:13:08 -05004910 /*
4911 * We hit the last page and there is more work to be done: wrap
4912 * back to the start of the file
4913 */
4914 scanned = 1;
4915 index = 0;
Josef Bacik42ffb0b2020-01-23 15:33:02 -05004916
4917 /*
4918 * If we're looping we could run into a page that is locked by a
4919 * writer and that writer could be waiting on writeback for a
4920 * page in our current bio, and thus deadlock, so flush the
4921 * write bio here.
4922 */
4923 ret = flush_write_bio(epd);
4924 if (!ret)
4925 goto retry;
Chris Masond1310b22008-01-24 16:13:08 -05004926 }
Liu Boa91326672016-03-07 16:56:21 -08004927
4928 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
4929 mapping->writeback_index = done_index;
4930
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04004931 btrfs_add_delayed_iput(inode);
Liu Bo894b36e2016-03-07 16:56:22 -08004932 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05004933}
Chris Masond1310b22008-01-24 16:13:08 -05004934
Nikolay Borisov0a9b0e52017-12-08 15:55:59 +02004935int extent_write_full_page(struct page *page, struct writeback_control *wbc)
Chris Masond1310b22008-01-24 16:13:08 -05004936{
4937 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05004938 struct extent_page_data epd = {
Qu Wenruo390ed292021-04-14 16:42:15 +08004939 .bio_ctrl = { 0 },
Chris Mason771ed682008-11-06 22:02:51 -05004940 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04004941 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05004942 };
Chris Masond1310b22008-01-24 16:13:08 -05004943
Chris Masond1310b22008-01-24 16:13:08 -05004944 ret = __extent_writepage(page, wbc, &epd);
Qu Wenruo30659762019-03-20 14:27:42 +08004945 ASSERT(ret <= 0);
4946 if (ret < 0) {
4947 end_write_bio(&epd, ret);
4948 return ret;
4949 }
Chris Masond1310b22008-01-24 16:13:08 -05004950
Qu Wenruo30659762019-03-20 14:27:42 +08004951 ret = flush_write_bio(&epd);
4952 ASSERT(ret <= 0);
Chris Masond1310b22008-01-24 16:13:08 -05004953 return ret;
4954}
Chris Masond1310b22008-01-24 16:13:08 -05004955
Nikolay Borisov5e3ee232017-12-08 15:55:58 +02004956int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
Chris Mason771ed682008-11-06 22:02:51 -05004957 int mode)
4958{
4959 int ret = 0;
4960 struct address_space *mapping = inode->i_mapping;
4961 struct page *page;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004962 unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4963 PAGE_SHIFT;
Chris Mason771ed682008-11-06 22:02:51 -05004964
4965 struct extent_page_data epd = {
Qu Wenruo390ed292021-04-14 16:42:15 +08004966 .bio_ctrl = { 0 },
Chris Mason771ed682008-11-06 22:02:51 -05004967 .extent_locked = 1,
Chris Masonffbd5172009-04-20 15:50:09 -04004968 .sync_io = mode == WB_SYNC_ALL,
Chris Mason771ed682008-11-06 22:02:51 -05004969 };
4970 struct writeback_control wbc_writepages = {
Chris Mason771ed682008-11-06 22:02:51 -05004971 .sync_mode = mode,
Chris Mason771ed682008-11-06 22:02:51 -05004972 .nr_to_write = nr_pages * 2,
4973 .range_start = start,
4974 .range_end = end + 1,
Chris Masonec39f762019-07-10 12:28:17 -07004975 /* We're called from an async helper function */
4976 .punt_to_cgroup = 1,
4977 .no_cgroup_owner = 1,
Chris Mason771ed682008-11-06 22:02:51 -05004978 };
4979
Chris Masondbb70be2019-07-10 12:28:18 -07004980 wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
Chris Masond3977122009-01-05 21:25:51 -05004981 while (start <= end) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004982 page = find_get_page(mapping, start >> PAGE_SHIFT);
Chris Mason771ed682008-11-06 22:02:51 -05004983 if (clear_page_dirty_for_io(page))
4984 ret = __extent_writepage(page, &wbc_writepages, &epd);
4985 else {
Qu Wenruo38a39ac72021-04-08 20:32:27 +08004986 btrfs_writepage_endio_finish_ordered(BTRFS_I(inode),
4987 page, start, start + PAGE_SIZE - 1, 1);
Chris Mason771ed682008-11-06 22:02:51 -05004988 unlock_page(page);
4989 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004990 put_page(page);
4991 start += PAGE_SIZE;
Chris Mason771ed682008-11-06 22:02:51 -05004992 }
4993
Qu Wenruo02c6db42019-03-20 14:27:45 +08004994 ASSERT(ret <= 0);
Chris Masondbb70be2019-07-10 12:28:18 -07004995 if (ret == 0)
4996 ret = flush_write_bio(&epd);
4997 else
Qu Wenruo02c6db42019-03-20 14:27:45 +08004998 end_write_bio(&epd, ret);
Chris Masondbb70be2019-07-10 12:28:18 -07004999
5000 wbc_detach_inode(&wbc_writepages);
Chris Mason771ed682008-11-06 22:02:51 -05005001 return ret;
5002}
Chris Masond1310b22008-01-24 16:13:08 -05005003
Nikolay Borisov8ae225a2018-04-19 10:46:38 +03005004int extent_writepages(struct address_space *mapping,
Chris Masond1310b22008-01-24 16:13:08 -05005005 struct writeback_control *wbc)
5006{
5007 int ret = 0;
5008 struct extent_page_data epd = {
Qu Wenruo390ed292021-04-14 16:42:15 +08005009 .bio_ctrl = { 0 },
Chris Mason771ed682008-11-06 22:02:51 -05005010 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04005011 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05005012 };
5013
David Sterba935db852017-06-23 04:30:28 +02005014 ret = extent_write_cache_pages(mapping, wbc, &epd);
Qu Wenruoa2a72fb2019-03-20 14:27:48 +08005015 ASSERT(ret <= 0);
5016 if (ret < 0) {
5017 end_write_bio(&epd, ret);
5018 return ret;
5019 }
5020 ret = flush_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05005021 return ret;
5022}
Chris Masond1310b22008-01-24 16:13:08 -05005023
Matthew Wilcox (Oracle)ba206a02020-06-01 21:47:05 -07005024void extent_readahead(struct readahead_control *rac)
Chris Masond1310b22008-01-24 16:13:08 -05005025{
Qu Wenruo390ed292021-04-14 16:42:15 +08005026 struct btrfs_bio_ctrl bio_ctrl = { 0 };
Liu Bo67c96842012-07-20 21:43:09 -06005027 struct page *pagepool[16];
Miao Xie125bac012013-07-25 19:22:37 +08005028 struct extent_map *em_cached = NULL;
Filipe Manana808f80b2015-09-28 09:56:26 +01005029 u64 prev_em_start = (u64)-1;
Matthew Wilcox (Oracle)ba206a02020-06-01 21:47:05 -07005030 int nr;
Chris Masond1310b22008-01-24 16:13:08 -05005031
Matthew Wilcox (Oracle)ba206a02020-06-01 21:47:05 -07005032 while ((nr = readahead_page_batch(rac, pagepool))) {
Matthew Wilcox (Oracle)32c0a6b2021-03-21 21:03:11 +00005033 u64 contig_start = readahead_pos(rac);
5034 u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
Chris Masond1310b22008-01-24 16:13:08 -05005035
Matthew Wilcox (Oracle)ba206a02020-06-01 21:47:05 -07005036 contiguous_readpages(pagepool, nr, contig_start, contig_end,
Qu Wenruo390ed292021-04-14 16:42:15 +08005037 &em_cached, &bio_ctrl, &prev_em_start);
Chris Masond1310b22008-01-24 16:13:08 -05005038 }
Liu Bo67c96842012-07-20 21:43:09 -06005039
Miao Xie125bac012013-07-25 19:22:37 +08005040 if (em_cached)
5041 free_extent_map(em_cached);
5042
Qu Wenruo390ed292021-04-14 16:42:15 +08005043 if (bio_ctrl.bio) {
5044 if (submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags))
Matthew Wilcox (Oracle)ba206a02020-06-01 21:47:05 -07005045 return;
5046 }
Chris Masond1310b22008-01-24 16:13:08 -05005047}
Chris Masond1310b22008-01-24 16:13:08 -05005048
5049/*
5050 * basic invalidatepage code, this waits on any locked or writeback
5051 * ranges corresponding to the page, and then deletes any extent state
5052 * records from the tree
5053 */
5054int extent_invalidatepage(struct extent_io_tree *tree,
5055 struct page *page, unsigned long offset)
5056{
Josef Bacik2ac55d42010-02-03 19:33:23 +00005057 struct extent_state *cached_state = NULL;
Miao Xie4eee4fa2012-12-21 09:17:45 +00005058 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005059 u64 end = start + PAGE_SIZE - 1;
Chris Masond1310b22008-01-24 16:13:08 -05005060 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
5061
Qu Wenruo829ddec2020-11-13 20:51:39 +08005062 /* This function is only called for the btree inode */
5063 ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
5064
Qu Wenruofda28322013-02-26 08:10:22 +00005065 start += ALIGN(offset, blocksize);
Chris Masond1310b22008-01-24 16:13:08 -05005066 if (start > end)
5067 return 0;
5068
David Sterbaff13db42015-12-03 14:30:40 +01005069 lock_extent_bits(tree, start, end, &cached_state);
Chris Mason1edbb732009-09-02 13:24:36 -04005070 wait_on_page_writeback(page);
Qu Wenruo829ddec2020-11-13 20:51:39 +08005071
5072 /*
5073 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
5074 * so here we only need to unlock the extent range to free any
5075 * existing extent state.
5076 */
5077 unlock_extent_cached(tree, start, end, &cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05005078 return 0;
5079}
Chris Masond1310b22008-01-24 16:13:08 -05005080
5081/*
Chris Mason7b13b7b2008-04-18 10:29:50 -04005082 * a helper for releasepage, this tests for areas of the page that
5083 * are locked or under IO and drops the related state bits if it is safe
5084 * to drop the page.
5085 */
Nikolay Borisov29c68b2d2018-04-19 10:46:35 +03005086static int try_release_extent_state(struct extent_io_tree *tree,
Eric Sandeen48a3b632013-04-25 20:41:01 +00005087 struct page *page, gfp_t mask)
Chris Mason7b13b7b2008-04-18 10:29:50 -04005088{
Miao Xie4eee4fa2012-12-21 09:17:45 +00005089 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005090 u64 end = start + PAGE_SIZE - 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04005091 int ret = 1;
5092
Nikolay Borisov88826792019-03-14 15:28:31 +02005093 if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
Chris Mason7b13b7b2008-04-18 10:29:50 -04005094 ret = 0;
Nikolay Borisov88826792019-03-14 15:28:31 +02005095 } else {
Chris Mason11ef1602009-09-23 20:28:46 -04005096 /*
Filipe Manana2766ff62020-11-04 11:07:34 +00005097 * At this point we can safely clear everything except the
5098 * locked bit, the nodatasum bit and the delalloc new bit.
5099 * The delalloc new bit will be cleared by ordered extent
5100 * completion.
Chris Mason11ef1602009-09-23 20:28:46 -04005101 */
David Sterba66b0c882017-10-31 16:30:47 +01005102 ret = __clear_extent_bit(tree, start, end,
Filipe Manana2766ff62020-11-04 11:07:34 +00005103 ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW),
5104 0, 0, NULL, mask, NULL);
Chris Masone3f24cc2011-02-14 12:52:08 -05005105
5106 /* if clear_extent_bit failed for enomem reasons,
5107 * we can't allow the release to continue.
5108 */
5109 if (ret < 0)
5110 ret = 0;
5111 else
5112 ret = 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04005113 }
5114 return ret;
5115}
Chris Mason7b13b7b2008-04-18 10:29:50 -04005116
5117/*
Chris Masond1310b22008-01-24 16:13:08 -05005118 * a helper for releasepage. As long as there are no locked extents
5119 * in the range corresponding to the page, both state records and extent
5120 * map records are removed
5121 */
Nikolay Borisov477a30b2018-04-19 10:46:34 +03005122int try_release_extent_mapping(struct page *page, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05005123{
5124 struct extent_map *em;
Miao Xie4eee4fa2012-12-21 09:17:45 +00005125 u64 start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005126 u64 end = start + PAGE_SIZE - 1;
Filipe Mananabd3599a2018-07-12 01:36:43 +01005127 struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
5128 struct extent_io_tree *tree = &btrfs_inode->io_tree;
5129 struct extent_map_tree *map = &btrfs_inode->extent_tree;
Chris Mason7b13b7b2008-04-18 10:29:50 -04005130
Mel Gormand0164ad2015-11-06 16:28:21 -08005131 if (gfpflags_allow_blocking(mask) &&
Byongho Leeee221842015-12-15 01:42:10 +09005132 page->mapping->host->i_size > SZ_16M) {
Yan39b56372008-02-15 10:40:50 -05005133 u64 len;
Chris Mason70dec802008-01-29 09:59:12 -05005134 while (start <= end) {
Filipe Mananafbc2bd72020-07-22 12:28:52 +01005135 struct btrfs_fs_info *fs_info;
5136 u64 cur_gen;
5137
Yan39b56372008-02-15 10:40:50 -05005138 len = end - start + 1;
Chris Mason890871b2009-09-02 16:24:52 -04005139 write_lock(&map->lock);
Yan39b56372008-02-15 10:40:50 -05005140 em = lookup_extent_mapping(map, start, len);
Tsutomu Itoh285190d2012-02-16 16:23:58 +09005141 if (!em) {
Chris Mason890871b2009-09-02 16:24:52 -04005142 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05005143 break;
5144 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04005145 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
5146 em->start != start) {
Chris Mason890871b2009-09-02 16:24:52 -04005147 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05005148 free_extent_map(em);
5149 break;
5150 }
Filipe Manana3d6448e2020-07-22 12:28:37 +01005151 if (test_range_bit(tree, em->start,
5152 extent_map_end(em) - 1,
5153 EXTENT_LOCKED, 0, NULL))
5154 goto next;
5155 /*
5156 * If it's not in the list of modified extents, used
5157 * by a fast fsync, we can remove it. If it's being
5158 * logged we can safely remove it since fsync took an
5159 * extra reference on the em.
5160 */
5161 if (list_empty(&em->list) ||
Filipe Mananafbc2bd72020-07-22 12:28:52 +01005162 test_bit(EXTENT_FLAG_LOGGING, &em->flags))
5163 goto remove_em;
5164 /*
5165 * If it's in the list of modified extents, remove it
5166 * only if its generation is older then the current one,
5167 * in which case we don't need it for a fast fsync.
5168 * Otherwise don't remove it, we could be racing with an
5169 * ongoing fast fsync that could miss the new extent.
5170 */
5171 fs_info = btrfs_inode->root->fs_info;
5172 spin_lock(&fs_info->trans_lock);
5173 cur_gen = fs_info->generation;
5174 spin_unlock(&fs_info->trans_lock);
5175 if (em->generation >= cur_gen)
5176 goto next;
5177remove_em:
Filipe Manana5e548b32020-07-22 12:29:01 +01005178 /*
5179 * We only remove extent maps that are not in the list of
5180 * modified extents or that are in the list but with a
5181 * generation lower then the current generation, so there
5182 * is no need to set the full fsync flag on the inode (it
5183 * hurts the fsync performance for workloads with a data
5184 * size that exceeds or is close to the system's memory).
5185 */
Filipe Mananafbc2bd72020-07-22 12:28:52 +01005186 remove_extent_mapping(map, em);
5187 /* once for the rb tree */
5188 free_extent_map(em);
Filipe Manana3d6448e2020-07-22 12:28:37 +01005189next:
Chris Mason70dec802008-01-29 09:59:12 -05005190 start = extent_map_end(em);
Chris Mason890871b2009-09-02 16:24:52 -04005191 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05005192
5193 /* once for us */
Chris Masond1310b22008-01-24 16:13:08 -05005194 free_extent_map(em);
Paul E. McKenney9f47eb52020-05-08 14:15:37 -07005195
5196 cond_resched(); /* Allow large-extent preemption. */
Chris Masond1310b22008-01-24 16:13:08 -05005197 }
Chris Masond1310b22008-01-24 16:13:08 -05005198 }
Nikolay Borisov29c68b2d2018-04-19 10:46:35 +03005199 return try_release_extent_state(tree, page, mask);
Chris Masond1310b22008-01-24 16:13:08 -05005200}
Chris Masond1310b22008-01-24 16:13:08 -05005201
Chris Masonec29ed52011-02-23 16:23:20 -05005202/*
5203 * helper function for fiemap, which doesn't want to see any holes.
5204 * This maps until we find something past 'last'
5205 */
Nikolay Borisovf1bbde82020-08-31 14:42:45 +03005206static struct extent_map *get_extent_skip_holes(struct btrfs_inode *inode,
David Sterbae3350e12017-06-23 04:09:57 +02005207 u64 offset, u64 last)
Chris Masonec29ed52011-02-23 16:23:20 -05005208{
Nikolay Borisovf1bbde82020-08-31 14:42:45 +03005209 u64 sectorsize = btrfs_inode_sectorsize(inode);
Chris Masonec29ed52011-02-23 16:23:20 -05005210 struct extent_map *em;
5211 u64 len;
5212
5213 if (offset >= last)
5214 return NULL;
5215
Dulshani Gunawardhana67871252013-10-31 10:33:04 +05305216 while (1) {
Chris Masonec29ed52011-02-23 16:23:20 -05005217 len = last - offset;
5218 if (len == 0)
5219 break;
Qu Wenruofda28322013-02-26 08:10:22 +00005220 len = ALIGN(len, sectorsize);
Nikolay Borisovf1bbde82020-08-31 14:42:45 +03005221 em = btrfs_get_extent_fiemap(inode, offset, len);
David Sterbac7040052011-04-19 18:00:01 +02005222 if (IS_ERR_OR_NULL(em))
Chris Masonec29ed52011-02-23 16:23:20 -05005223 return em;
5224
5225 /* if this isn't a hole return it */
Nikolay Borisov4a2d25c2017-11-23 10:51:43 +02005226 if (em->block_start != EXTENT_MAP_HOLE)
Chris Masonec29ed52011-02-23 16:23:20 -05005227 return em;
Chris Masonec29ed52011-02-23 16:23:20 -05005228
5229 /* this is a hole, advance to the next extent */
5230 offset = extent_map_end(em);
5231 free_extent_map(em);
5232 if (offset >= last)
5233 break;
5234 }
5235 return NULL;
5236}
5237
Qu Wenruo47518322017-04-07 10:43:15 +08005238/*
5239 * To cache previous fiemap extent
5240 *
5241 * Will be used for merging fiemap extent
5242 */
5243struct fiemap_cache {
5244 u64 offset;
5245 u64 phys;
5246 u64 len;
5247 u32 flags;
5248 bool cached;
5249};
5250
5251/*
5252 * Helper to submit fiemap extent.
5253 *
5254 * Will try to merge current fiemap extent specified by @offset, @phys,
5255 * @len and @flags with cached one.
5256 * And only when we fails to merge, cached one will be submitted as
5257 * fiemap extent.
5258 *
5259 * Return value is the same as fiemap_fill_next_extent().
5260 */
5261static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
5262 struct fiemap_cache *cache,
5263 u64 offset, u64 phys, u64 len, u32 flags)
5264{
5265 int ret = 0;
5266
5267 if (!cache->cached)
5268 goto assign;
5269
5270 /*
5271 * Sanity check, extent_fiemap() should have ensured that new
Andrea Gelmini52042d82018-11-28 12:05:13 +01005272 * fiemap extent won't overlap with cached one.
Qu Wenruo47518322017-04-07 10:43:15 +08005273 * Not recoverable.
5274 *
5275 * NOTE: Physical address can overlap, due to compression
5276 */
5277 if (cache->offset + cache->len > offset) {
5278 WARN_ON(1);
5279 return -EINVAL;
5280 }
5281
5282 /*
5283 * Only merges fiemap extents if
5284 * 1) Their logical addresses are continuous
5285 *
5286 * 2) Their physical addresses are continuous
5287 * So truly compressed (physical size smaller than logical size)
5288 * extents won't get merged with each other
5289 *
5290 * 3) Share same flags except FIEMAP_EXTENT_LAST
5291 * So regular extent won't get merged with prealloc extent
5292 */
5293 if (cache->offset + cache->len == offset &&
5294 cache->phys + cache->len == phys &&
5295 (cache->flags & ~FIEMAP_EXTENT_LAST) ==
5296 (flags & ~FIEMAP_EXTENT_LAST)) {
5297 cache->len += len;
5298 cache->flags |= flags;
5299 goto try_submit_last;
5300 }
5301
5302 /* Not mergeable, need to submit cached one */
5303 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
5304 cache->len, cache->flags);
5305 cache->cached = false;
5306 if (ret)
5307 return ret;
5308assign:
5309 cache->cached = true;
5310 cache->offset = offset;
5311 cache->phys = phys;
5312 cache->len = len;
5313 cache->flags = flags;
5314try_submit_last:
5315 if (cache->flags & FIEMAP_EXTENT_LAST) {
5316 ret = fiemap_fill_next_extent(fieinfo, cache->offset,
5317 cache->phys, cache->len, cache->flags);
5318 cache->cached = false;
5319 }
5320 return ret;
5321}
5322
5323/*
Qu Wenruo848c23b2017-06-22 10:01:21 +08005324 * Emit last fiemap cache
Qu Wenruo47518322017-04-07 10:43:15 +08005325 *
Qu Wenruo848c23b2017-06-22 10:01:21 +08005326 * The last fiemap cache may still be cached in the following case:
5327 * 0 4k 8k
5328 * |<- Fiemap range ->|
5329 * |<------------ First extent ----------->|
5330 *
5331 * In this case, the first extent range will be cached but not emitted.
5332 * So we must emit it before ending extent_fiemap().
Qu Wenruo47518322017-04-07 10:43:15 +08005333 */
David Sterba5c5aff92019-03-20 11:29:46 +01005334static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
Qu Wenruo848c23b2017-06-22 10:01:21 +08005335 struct fiemap_cache *cache)
Qu Wenruo47518322017-04-07 10:43:15 +08005336{
5337 int ret;
5338
5339 if (!cache->cached)
5340 return 0;
5341
Qu Wenruo47518322017-04-07 10:43:15 +08005342 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
5343 cache->len, cache->flags);
5344 cache->cached = false;
5345 if (ret > 0)
5346 ret = 0;
5347 return ret;
5348}
5349
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005350int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
David Sterbabab16e22020-06-23 20:56:12 +02005351 u64 start, u64 len)
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005352{
Josef Bacik975f84f2010-11-23 19:36:57 +00005353 int ret = 0;
Boris Burkov15c77452021-04-06 15:31:18 -07005354 u64 off;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005355 u64 max = start + len;
5356 u32 flags = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00005357 u32 found_type;
5358 u64 last;
Chris Masonec29ed52011-02-23 16:23:20 -05005359 u64 last_for_get_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005360 u64 disko = 0;
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005361 u64 isize = i_size_read(&inode->vfs_inode);
Josef Bacik975f84f2010-11-23 19:36:57 +00005362 struct btrfs_key found_key;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005363 struct extent_map *em = NULL;
Josef Bacik2ac55d42010-02-03 19:33:23 +00005364 struct extent_state *cached_state = NULL;
Josef Bacik975f84f2010-11-23 19:36:57 +00005365 struct btrfs_path *path;
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005366 struct btrfs_root *root = inode->root;
Qu Wenruo47518322017-04-07 10:43:15 +08005367 struct fiemap_cache cache = { 0 };
David Sterba5911c8f2019-05-15 15:31:04 +02005368 struct ulist *roots;
5369 struct ulist *tmp_ulist;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005370 int end = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05005371 u64 em_start = 0;
5372 u64 em_len = 0;
5373 u64 em_end = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005374
5375 if (len == 0)
5376 return -EINVAL;
5377
Josef Bacik975f84f2010-11-23 19:36:57 +00005378 path = btrfs_alloc_path();
5379 if (!path)
5380 return -ENOMEM;
Josef Bacik975f84f2010-11-23 19:36:57 +00005381
David Sterba5911c8f2019-05-15 15:31:04 +02005382 roots = ulist_alloc(GFP_KERNEL);
5383 tmp_ulist = ulist_alloc(GFP_KERNEL);
5384 if (!roots || !tmp_ulist) {
5385 ret = -ENOMEM;
5386 goto out_free_ulist;
5387 }
5388
Boris Burkov15c77452021-04-06 15:31:18 -07005389 /*
5390 * We can't initialize that to 'start' as this could miss extents due
5391 * to extent item merging
5392 */
5393 off = 0;
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005394 start = round_down(start, btrfs_inode_sectorsize(inode));
5395 len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
Josef Bacik4d479cf2011-11-17 11:34:31 -05005396
Chris Masonec29ed52011-02-23 16:23:20 -05005397 /*
5398 * lookup the last file extent. We're not using i_size here
5399 * because there might be preallocation past i_size
5400 */
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005401 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
5402 0);
Josef Bacik975f84f2010-11-23 19:36:57 +00005403 if (ret < 0) {
David Sterba5911c8f2019-05-15 15:31:04 +02005404 goto out_free_ulist;
Liu Bo2d324f52016-05-17 17:21:48 -07005405 } else {
5406 WARN_ON(!ret);
5407 if (ret == 1)
5408 ret = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00005409 }
Liu Bo2d324f52016-05-17 17:21:48 -07005410
Josef Bacik975f84f2010-11-23 19:36:57 +00005411 path->slots[0]--;
Josef Bacik975f84f2010-11-23 19:36:57 +00005412 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
David Sterba962a2982014-06-04 18:41:45 +02005413 found_type = found_key.type;
Josef Bacik975f84f2010-11-23 19:36:57 +00005414
Chris Masonec29ed52011-02-23 16:23:20 -05005415 /* No extents, but there might be delalloc bits */
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005416 if (found_key.objectid != btrfs_ino(inode) ||
Josef Bacik975f84f2010-11-23 19:36:57 +00005417 found_type != BTRFS_EXTENT_DATA_KEY) {
Chris Masonec29ed52011-02-23 16:23:20 -05005418 /* have to trust i_size as the end */
5419 last = (u64)-1;
5420 last_for_get_extent = isize;
5421 } else {
5422 /*
5423 * remember the start of the last extent. There are a
5424 * bunch of different factors that go into the length of the
5425 * extent, so its much less complex to remember where it started
5426 */
5427 last = found_key.offset;
5428 last_for_get_extent = last + 1;
Josef Bacik975f84f2010-11-23 19:36:57 +00005429 }
Liu Bofe09e162013-09-22 12:54:23 +08005430 btrfs_release_path(path);
Josef Bacik975f84f2010-11-23 19:36:57 +00005431
Chris Masonec29ed52011-02-23 16:23:20 -05005432 /*
5433 * we might have some extents allocated but more delalloc past those
5434 * extents. so, we trust isize unless the start of the last extent is
5435 * beyond isize
5436 */
5437 if (last < isize) {
5438 last = (u64)-1;
5439 last_for_get_extent = isize;
5440 }
5441
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005442 lock_extent_bits(&inode->io_tree, start, start + len - 1,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01005443 &cached_state);
Chris Masonec29ed52011-02-23 16:23:20 -05005444
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005445 em = get_extent_skip_holes(inode, start, last_for_get_extent);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005446 if (!em)
5447 goto out;
5448 if (IS_ERR(em)) {
5449 ret = PTR_ERR(em);
5450 goto out;
5451 }
Josef Bacik975f84f2010-11-23 19:36:57 +00005452
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005453 while (!end) {
Josef Bacikb76bb702013-07-05 13:52:51 -04005454 u64 offset_in_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005455
Chris Masonea8efc72011-03-08 11:54:40 -05005456 /* break if the extent we found is outside the range */
5457 if (em->start >= max || extent_map_end(em) < off)
5458 break;
5459
5460 /*
5461 * get_extent may return an extent that starts before our
5462 * requested range. We have to make sure the ranges
5463 * we return to fiemap always move forward and don't
5464 * overlap, so adjust the offsets here
5465 */
5466 em_start = max(em->start, off);
5467
5468 /*
5469 * record the offset from the start of the extent
Josef Bacikb76bb702013-07-05 13:52:51 -04005470 * for adjusting the disk offset below. Only do this if the
5471 * extent isn't compressed since our in ram offset may be past
5472 * what we have actually allocated on disk.
Chris Masonea8efc72011-03-08 11:54:40 -05005473 */
Josef Bacikb76bb702013-07-05 13:52:51 -04005474 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
5475 offset_in_extent = em_start - em->start;
Chris Masonec29ed52011-02-23 16:23:20 -05005476 em_end = extent_map_end(em);
Chris Masonea8efc72011-03-08 11:54:40 -05005477 em_len = em_end - em_start;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005478 flags = 0;
Filipe Mananaf0986312018-06-20 10:02:30 +01005479 if (em->block_start < EXTENT_MAP_LAST_BYTE)
5480 disko = em->block_start + offset_in_extent;
5481 else
5482 disko = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005483
Chris Masonea8efc72011-03-08 11:54:40 -05005484 /*
5485 * bump off for our next call to get_extent
5486 */
5487 off = extent_map_end(em);
5488 if (off >= max)
5489 end = 1;
5490
Heiko Carstens93dbfad2009-04-03 10:33:45 -04005491 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005492 end = 1;
5493 flags |= FIEMAP_EXTENT_LAST;
Heiko Carstens93dbfad2009-04-03 10:33:45 -04005494 } else if (em->block_start == EXTENT_MAP_INLINE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005495 flags |= (FIEMAP_EXTENT_DATA_INLINE |
5496 FIEMAP_EXTENT_NOT_ALIGNED);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04005497 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005498 flags |= (FIEMAP_EXTENT_DELALLOC |
5499 FIEMAP_EXTENT_UNKNOWN);
Josef Bacikdc046b12014-09-10 16:20:45 -04005500 } else if (fieinfo->fi_extents_max) {
5501 u64 bytenr = em->block_start -
5502 (em->start - em->orig_start);
Liu Bofe09e162013-09-22 12:54:23 +08005503
Liu Bofe09e162013-09-22 12:54:23 +08005504 /*
5505 * As btrfs supports shared space, this information
5506 * can be exported to userspace tools via
Josef Bacikdc046b12014-09-10 16:20:45 -04005507 * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0
5508 * then we're just getting a count and we can skip the
5509 * lookup stuff.
Liu Bofe09e162013-09-22 12:54:23 +08005510 */
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005511 ret = btrfs_check_shared(root, btrfs_ino(inode),
David Sterba5911c8f2019-05-15 15:31:04 +02005512 bytenr, roots, tmp_ulist);
Josef Bacikdc046b12014-09-10 16:20:45 -04005513 if (ret < 0)
Liu Bofe09e162013-09-22 12:54:23 +08005514 goto out_free;
Josef Bacikdc046b12014-09-10 16:20:45 -04005515 if (ret)
Liu Bofe09e162013-09-22 12:54:23 +08005516 flags |= FIEMAP_EXTENT_SHARED;
Josef Bacikdc046b12014-09-10 16:20:45 -04005517 ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005518 }
5519 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
5520 flags |= FIEMAP_EXTENT_ENCODED;
Josef Bacik0d2b2372015-05-19 10:44:04 -04005521 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5522 flags |= FIEMAP_EXTENT_UNWRITTEN;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005523
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005524 free_extent_map(em);
5525 em = NULL;
Chris Masonec29ed52011-02-23 16:23:20 -05005526 if ((em_start >= last) || em_len == (u64)-1 ||
5527 (last == (u64)-1 && isize <= em_end)) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005528 flags |= FIEMAP_EXTENT_LAST;
5529 end = 1;
5530 }
5531
Chris Masonec29ed52011-02-23 16:23:20 -05005532 /* now scan forward to see if this is really the last extent. */
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005533 em = get_extent_skip_holes(inode, off, last_for_get_extent);
Chris Masonec29ed52011-02-23 16:23:20 -05005534 if (IS_ERR(em)) {
5535 ret = PTR_ERR(em);
5536 goto out;
5537 }
5538 if (!em) {
Josef Bacik975f84f2010-11-23 19:36:57 +00005539 flags |= FIEMAP_EXTENT_LAST;
5540 end = 1;
5541 }
Qu Wenruo47518322017-04-07 10:43:15 +08005542 ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
5543 em_len, flags);
Chengyu Song26e726a2015-03-24 18:12:56 -04005544 if (ret) {
5545 if (ret == 1)
5546 ret = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05005547 goto out_free;
Chengyu Song26e726a2015-03-24 18:12:56 -04005548 }
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005549 }
5550out_free:
Qu Wenruo47518322017-04-07 10:43:15 +08005551 if (!ret)
David Sterba5c5aff92019-03-20 11:29:46 +01005552 ret = emit_last_fiemap_cache(fieinfo, &cache);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005553 free_extent_map(em);
5554out:
Nikolay Borisovfacee0a2020-08-31 14:42:49 +03005555 unlock_extent_cached(&inode->io_tree, start, start + len - 1,
David Sterbae43bbe52017-12-12 21:43:52 +01005556 &cached_state);
David Sterba5911c8f2019-05-15 15:31:04 +02005557
5558out_free_ulist:
Colin Ian Kinge02d48e2019-07-05 08:26:24 +01005559 btrfs_free_path(path);
David Sterba5911c8f2019-05-15 15:31:04 +02005560 ulist_free(roots);
5561 ulist_free(tmp_ulist);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05005562 return ret;
5563}
5564
Chris Mason727011e2010-08-06 13:21:20 -04005565static void __free_extent_buffer(struct extent_buffer *eb)
5566{
Chris Mason727011e2010-08-06 13:21:20 -04005567 kmem_cache_free(extent_buffer_cache, eb);
5568}
5569
David Sterba2b489662020-04-29 03:04:10 +02005570int extent_buffer_under_io(const struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05005571{
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005572 return (atomic_read(&eb->io_pages) ||
5573 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
5574 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
Chris Masond1310b22008-01-24 16:13:08 -05005575}
5576
Qu Wenruo8ff84662021-01-26 16:33:50 +08005577static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
Miao Xie897ca6e92010-10-26 20:57:29 -04005578{
Qu Wenruo8ff84662021-01-26 16:33:50 +08005579 struct btrfs_subpage *subpage;
Miao Xie897ca6e92010-10-26 20:57:29 -04005580
Qu Wenruo8ff84662021-01-26 16:33:50 +08005581 lockdep_assert_held(&page->mapping->private_lock);
Miao Xie897ca6e92010-10-26 20:57:29 -04005582
Qu Wenruo8ff84662021-01-26 16:33:50 +08005583 if (PagePrivate(page)) {
5584 subpage = (struct btrfs_subpage *)page->private;
5585 if (atomic_read(&subpage->eb_refs))
5586 return true;
Qu Wenruo3d078ef2021-06-07 17:02:58 +08005587 /*
5588 * Even there is no eb refs here, we may still have
5589 * end_page_read() call relying on page::private.
5590 */
5591 if (atomic_read(&subpage->readers))
5592 return true;
Qu Wenruo8ff84662021-01-26 16:33:50 +08005593 }
5594 return false;
5595}
Miao Xie897ca6e92010-10-26 20:57:29 -04005596
Qu Wenruo8ff84662021-01-26 16:33:50 +08005597static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
5598{
5599 struct btrfs_fs_info *fs_info = eb->fs_info;
5600 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
5601
5602 /*
5603 * For mapped eb, we're going to change the page private, which should
5604 * be done under the private_lock.
5605 */
5606 if (mapped)
5607 spin_lock(&page->mapping->private_lock);
5608
5609 if (!PagePrivate(page)) {
Forrest Liu5d2361d2015-02-09 17:31:45 +08005610 if (mapped)
Qu Wenruo8ff84662021-01-26 16:33:50 +08005611 spin_unlock(&page->mapping->private_lock);
5612 return;
5613 }
5614
5615 if (fs_info->sectorsize == PAGE_SIZE) {
Forrest Liu5d2361d2015-02-09 17:31:45 +08005616 /*
5617 * We do this since we'll remove the pages after we've
5618 * removed the eb from the radix tree, so we could race
5619 * and have this page now attached to the new eb. So
5620 * only clear page_private if it's still connected to
5621 * this eb.
5622 */
5623 if (PagePrivate(page) &&
5624 page->private == (unsigned long)eb) {
5625 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
5626 BUG_ON(PageDirty(page));
5627 BUG_ON(PageWriteback(page));
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005628 /*
Forrest Liu5d2361d2015-02-09 17:31:45 +08005629 * We need to make sure we haven't be attached
5630 * to a new eb.
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005631 */
Guoqing Jiangd1b89bc2020-06-01 21:47:45 -07005632 detach_page_private(page);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05005633 }
Forrest Liu5d2361d2015-02-09 17:31:45 +08005634 if (mapped)
5635 spin_unlock(&page->mapping->private_lock);
Qu Wenruo8ff84662021-01-26 16:33:50 +08005636 return;
5637 }
5638
5639 /*
5640 * For subpage, we can have dummy eb with page private. In this case,
5641 * we can directly detach the private as such page is only attached to
5642 * one dummy eb, no sharing.
5643 */
5644 if (!mapped) {
5645 btrfs_detach_subpage(fs_info, page);
5646 return;
5647 }
5648
5649 btrfs_page_dec_eb_refs(fs_info, page);
5650
5651 /*
5652 * We can only detach the page private if there are no other ebs in the
Qu Wenruo3d078ef2021-06-07 17:02:58 +08005653 * page range and no unfinished IO.
Qu Wenruo8ff84662021-01-26 16:33:50 +08005654 */
5655 if (!page_range_has_eb(fs_info, page))
5656 btrfs_detach_subpage(fs_info, page);
5657
5658 spin_unlock(&page->mapping->private_lock);
5659}
5660
5661/* Release all pages attached to the extent buffer */
5662static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
5663{
5664 int i;
5665 int num_pages;
5666
5667 ASSERT(!extent_buffer_under_io(eb));
5668
5669 num_pages = num_extent_pages(eb);
5670 for (i = 0; i < num_pages; i++) {
5671 struct page *page = eb->pages[i];
5672
5673 if (!page)
5674 continue;
5675
5676 detach_extent_buffer_page(eb, page);
Forrest Liu5d2361d2015-02-09 17:31:45 +08005677
Nicholas D Steeves01327612016-05-19 21:18:45 -04005678 /* One for when we allocated the page */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005679 put_page(page);
Nikolay Borisovd64766f2018-06-27 16:38:22 +03005680 }
Miao Xie897ca6e92010-10-26 20:57:29 -04005681}
5682
5683/*
5684 * Helper for releasing the extent buffer.
5685 */
5686static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
5687{
David Sterba55ac0132018-07-19 17:24:32 +02005688 btrfs_release_extent_buffer_pages(eb);
Josef Bacik8c389382020-02-14 16:11:42 -05005689 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
Miao Xie897ca6e92010-10-26 20:57:29 -04005690 __free_extent_buffer(eb);
5691}
5692
Josef Bacikf28491e2013-12-16 13:24:27 -05005693static struct extent_buffer *
5694__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
David Sterba23d79d82014-06-15 02:55:29 +02005695 unsigned long len)
Josef Bacikdb7f3432013-08-07 14:54:37 -04005696{
5697 struct extent_buffer *eb = NULL;
5698
Michal Hockod1b5c562015-08-19 14:17:40 +02005699 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005700 eb->start = start;
5701 eb->len = len;
Josef Bacikf28491e2013-12-16 13:24:27 -05005702 eb->fs_info = fs_info;
Josef Bacikdb7f3432013-08-07 14:54:37 -04005703 eb->bflags = 0;
Josef Bacik196d59a2020-08-20 11:46:09 -04005704 init_rwsem(&eb->lock);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005705
Josef Bacik3fd63722020-02-14 16:11:40 -05005706 btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
5707 &fs_info->allocated_ebs);
Naohiro Aotad35751562021-02-04 19:21:54 +09005708 INIT_LIST_HEAD(&eb->release_list);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005709
5710 spin_lock_init(&eb->refs_lock);
5711 atomic_set(&eb->refs, 1);
5712 atomic_set(&eb->io_pages, 0);
5713
Qu Wenruodeb67892020-12-02 14:48:01 +08005714 ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005715
5716 return eb;
5717}
5718
David Sterba2b489662020-04-29 03:04:10 +02005719struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
Josef Bacikdb7f3432013-08-07 14:54:37 -04005720{
David Sterbacc5e31a2018-03-01 18:20:27 +01005721 int i;
Josef Bacikdb7f3432013-08-07 14:54:37 -04005722 struct page *p;
5723 struct extent_buffer *new;
David Sterbacc5e31a2018-03-01 18:20:27 +01005724 int num_pages = num_extent_pages(src);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005725
David Sterba3f556f72014-06-15 03:20:26 +02005726 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005727 if (new == NULL)
5728 return NULL;
5729
Qu Wenruo62c053f2021-01-26 16:33:46 +08005730 /*
5731 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
5732 * btrfs_release_extent_buffer() have different behavior for
5733 * UNMAPPED subpage extent buffer.
5734 */
5735 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
5736
Josef Bacikdb7f3432013-08-07 14:54:37 -04005737 for (i = 0; i < num_pages; i++) {
Qu Wenruo760f9912021-01-26 16:33:48 +08005738 int ret;
5739
Josef Bacik9ec72672013-08-07 16:57:23 -04005740 p = alloc_page(GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005741 if (!p) {
5742 btrfs_release_extent_buffer(new);
5743 return NULL;
5744 }
Qu Wenruo760f9912021-01-26 16:33:48 +08005745 ret = attach_extent_buffer_page(new, p, NULL);
5746 if (ret < 0) {
5747 put_page(p);
5748 btrfs_release_extent_buffer(new);
5749 return NULL;
5750 }
Josef Bacikdb7f3432013-08-07 14:54:37 -04005751 WARN_ON(PageDirty(p));
Josef Bacikdb7f3432013-08-07 14:54:37 -04005752 new->pages[i] = p;
David Sterbafba1acf2016-11-08 17:56:24 +01005753 copy_page(page_address(p), page_address(src->pages[i]));
Josef Bacikdb7f3432013-08-07 14:54:37 -04005754 }
Qu Wenruo92d83e92021-01-26 16:33:55 +08005755 set_extent_buffer_uptodate(new);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005756
5757 return new;
5758}
5759
Omar Sandoval0f331222015-09-29 20:50:31 -07005760struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5761 u64 start, unsigned long len)
Josef Bacikdb7f3432013-08-07 14:54:37 -04005762{
5763 struct extent_buffer *eb;
David Sterbacc5e31a2018-03-01 18:20:27 +01005764 int num_pages;
5765 int i;
Josef Bacikdb7f3432013-08-07 14:54:37 -04005766
David Sterba3f556f72014-06-15 03:20:26 +02005767 eb = __alloc_extent_buffer(fs_info, start, len);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005768 if (!eb)
5769 return NULL;
5770
David Sterba65ad0102018-06-29 10:56:49 +02005771 num_pages = num_extent_pages(eb);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005772 for (i = 0; i < num_pages; i++) {
Qu Wenruo09bc1f02021-01-26 16:33:51 +08005773 int ret;
5774
Josef Bacik9ec72672013-08-07 16:57:23 -04005775 eb->pages[i] = alloc_page(GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005776 if (!eb->pages[i])
5777 goto err;
Qu Wenruo09bc1f02021-01-26 16:33:51 +08005778 ret = attach_extent_buffer_page(eb, eb->pages[i], NULL);
5779 if (ret < 0)
5780 goto err;
Josef Bacikdb7f3432013-08-07 14:54:37 -04005781 }
5782 set_extent_buffer_uptodate(eb);
5783 btrfs_set_header_nritems(eb, 0);
Nikolay Borisovb0132a32018-06-27 16:38:24 +03005784 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005785
5786 return eb;
5787err:
Qu Wenruo09bc1f02021-01-26 16:33:51 +08005788 for (; i > 0; i--) {
5789 detach_extent_buffer_page(eb, eb->pages[i - 1]);
Josef Bacikdb7f3432013-08-07 14:54:37 -04005790 __free_page(eb->pages[i - 1]);
Qu Wenruo09bc1f02021-01-26 16:33:51 +08005791 }
Josef Bacikdb7f3432013-08-07 14:54:37 -04005792 __free_extent_buffer(eb);
5793 return NULL;
5794}
5795
Omar Sandoval0f331222015-09-29 20:50:31 -07005796struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -04005797 u64 start)
Omar Sandoval0f331222015-09-29 20:50:31 -07005798{
Jeff Mahoneyda170662016-06-15 09:22:56 -04005799 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
Omar Sandoval0f331222015-09-29 20:50:31 -07005800}
5801
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005802static void check_buffer_tree_ref(struct extent_buffer *eb)
5803{
Chris Mason242e18c2013-01-29 17:49:37 -05005804 int refs;
Boris Burkov6bf9cd22020-06-17 11:35:19 -07005805 /*
5806 * The TREE_REF bit is first set when the extent_buffer is added
5807 * to the radix tree. It is also reset, if unset, when a new reference
5808 * is created by find_extent_buffer.
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005809 *
Boris Burkov6bf9cd22020-06-17 11:35:19 -07005810 * It is only cleared in two cases: freeing the last non-tree
5811 * reference to the extent_buffer when its STALE bit is set or
5812 * calling releasepage when the tree reference is the only reference.
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005813 *
Boris Burkov6bf9cd22020-06-17 11:35:19 -07005814 * In both cases, care is taken to ensure that the extent_buffer's
5815 * pages are not under io. However, releasepage can be concurrently
5816 * called with creating new references, which is prone to race
5817 * conditions between the calls to check_buffer_tree_ref in those
5818 * codepaths and clearing TREE_REF in try_release_extent_buffer.
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005819 *
Boris Burkov6bf9cd22020-06-17 11:35:19 -07005820 * The actual lifetime of the extent_buffer in the radix tree is
5821 * adequately protected by the refcount, but the TREE_REF bit and
5822 * its corresponding reference are not. To protect against this
5823 * class of races, we call check_buffer_tree_ref from the codepaths
5824 * which trigger io after they set eb->io_pages. Note that once io is
5825 * initiated, TREE_REF can no longer be cleared, so that is the
5826 * moment at which any such race is best fixed.
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005827 */
Chris Mason242e18c2013-01-29 17:49:37 -05005828 refs = atomic_read(&eb->refs);
5829 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5830 return;
5831
Josef Bacik594831c2012-07-20 16:11:08 -04005832 spin_lock(&eb->refs_lock);
5833 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005834 atomic_inc(&eb->refs);
Josef Bacik594831c2012-07-20 16:11:08 -04005835 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005836}
5837
Mel Gorman2457aec2014-06-04 16:10:31 -07005838static void mark_extent_buffer_accessed(struct extent_buffer *eb,
5839 struct page *accessed)
Josef Bacik5df42352012-03-15 18:24:42 -04005840{
David Sterbacc5e31a2018-03-01 18:20:27 +01005841 int num_pages, i;
Josef Bacik5df42352012-03-15 18:24:42 -04005842
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005843 check_buffer_tree_ref(eb);
5844
David Sterba65ad0102018-06-29 10:56:49 +02005845 num_pages = num_extent_pages(eb);
Josef Bacik5df42352012-03-15 18:24:42 -04005846 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005847 struct page *p = eb->pages[i];
5848
Mel Gorman2457aec2014-06-04 16:10:31 -07005849 if (p != accessed)
5850 mark_page_accessed(p);
Josef Bacik5df42352012-03-15 18:24:42 -04005851 }
5852}
5853
Josef Bacikf28491e2013-12-16 13:24:27 -05005854struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
5855 u64 start)
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005856{
5857 struct extent_buffer *eb;
5858
Qu Wenruo2f3186d2021-04-06 08:36:00 +08005859 eb = find_extent_buffer_nolock(fs_info, start);
5860 if (!eb)
5861 return NULL;
5862 /*
5863 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
5864 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
5865 * another task running free_extent_buffer() might have seen that flag
5866 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
5867 * writeback flags not set) and it's still in the tree (flag
5868 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
5869 * decrementing the extent buffer's reference count twice. So here we
5870 * could race and increment the eb's reference count, clear its stale
5871 * flag, mark it as dirty and drop our reference before the other task
5872 * finishes executing free_extent_buffer, which would later result in
5873 * an attempt to free an extent buffer that is dirty.
5874 */
5875 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
5876 spin_lock(&eb->refs_lock);
5877 spin_unlock(&eb->refs_lock);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005878 }
Qu Wenruo2f3186d2021-04-06 08:36:00 +08005879 mark_extent_buffer_accessed(eb, NULL);
5880 return eb;
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005881}
5882
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005883#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5884struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
Jeff Mahoneyda170662016-06-15 09:22:56 -04005885 u64 start)
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005886{
5887 struct extent_buffer *eb, *exists = NULL;
5888 int ret;
5889
5890 eb = find_extent_buffer(fs_info, start);
5891 if (eb)
5892 return eb;
Jeff Mahoneyda170662016-06-15 09:22:56 -04005893 eb = alloc_dummy_extent_buffer(fs_info, start);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005894 if (!eb)
Dan Carpenterb6293c82019-12-03 14:24:58 +03005895 return ERR_PTR(-ENOMEM);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005896 eb->fs_info = fs_info;
5897again:
David Sterbae1860a72016-05-09 14:11:38 +02005898 ret = radix_tree_preload(GFP_NOFS);
Dan Carpenterb6293c82019-12-03 14:24:58 +03005899 if (ret) {
5900 exists = ERR_PTR(ret);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005901 goto free_eb;
Dan Carpenterb6293c82019-12-03 14:24:58 +03005902 }
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005903 spin_lock(&fs_info->buffer_lock);
5904 ret = radix_tree_insert(&fs_info->buffer_radix,
Qu Wenruo478ef882020-10-21 14:25:05 +08005905 start >> fs_info->sectorsize_bits, eb);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005906 spin_unlock(&fs_info->buffer_lock);
5907 radix_tree_preload_end();
5908 if (ret == -EEXIST) {
5909 exists = find_extent_buffer(fs_info, start);
5910 if (exists)
5911 goto free_eb;
5912 else
5913 goto again;
5914 }
5915 check_buffer_tree_ref(eb);
5916 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5917
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04005918 return eb;
5919free_eb:
5920 btrfs_release_extent_buffer(eb);
5921 return exists;
5922}
5923#endif
5924
Qu Wenruo819822102021-01-26 16:33:49 +08005925static struct extent_buffer *grab_extent_buffer(
5926 struct btrfs_fs_info *fs_info, struct page *page)
Qu Wenruoc0f0a9e2021-01-06 09:01:45 +08005927{
5928 struct extent_buffer *exists;
5929
Qu Wenruo819822102021-01-26 16:33:49 +08005930 /*
5931 * For subpage case, we completely rely on radix tree to ensure we
5932 * don't try to insert two ebs for the same bytenr. So here we always
5933 * return NULL and just continue.
5934 */
5935 if (fs_info->sectorsize < PAGE_SIZE)
5936 return NULL;
5937
Qu Wenruoc0f0a9e2021-01-06 09:01:45 +08005938 /* Page not yet attached to an extent buffer */
5939 if (!PagePrivate(page))
5940 return NULL;
5941
5942 /*
5943 * We could have already allocated an eb for this page and attached one
5944 * so lets see if we can get a ref on the existing eb, and if we can we
5945 * know it's good and we can just return that one, else we know we can
5946 * just overwrite page->private.
5947 */
5948 exists = (struct extent_buffer *)page->private;
5949 if (atomic_inc_not_zero(&exists->refs))
5950 return exists;
5951
5952 WARN_ON(PageDirty(page));
5953 detach_page_private(page);
5954 return NULL;
5955}
5956
Josef Bacikf28491e2013-12-16 13:24:27 -05005957struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
Josef Bacik3fbaf252020-11-05 10:45:20 -05005958 u64 start, u64 owner_root, int level)
Chris Masond1310b22008-01-24 16:13:08 -05005959{
Jeff Mahoneyda170662016-06-15 09:22:56 -04005960 unsigned long len = fs_info->nodesize;
David Sterbacc5e31a2018-03-01 18:20:27 +01005961 int num_pages;
5962 int i;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005963 unsigned long index = start >> PAGE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005964 struct extent_buffer *eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04005965 struct extent_buffer *exists = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05005966 struct page *p;
Josef Bacikf28491e2013-12-16 13:24:27 -05005967 struct address_space *mapping = fs_info->btree_inode->i_mapping;
Chris Masond1310b22008-01-24 16:13:08 -05005968 int uptodate = 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04005969 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05005970
Jeff Mahoneyda170662016-06-15 09:22:56 -04005971 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
Liu Boc871b0f2016-06-06 12:01:23 -07005972 btrfs_err(fs_info, "bad tree block start %llu", start);
5973 return ERR_PTR(-EINVAL);
5974 }
5975
Qu Wenruoe9306ad2021-02-25 09:18:14 +08005976#if BITS_PER_LONG == 32
5977 if (start >= MAX_LFS_FILESIZE) {
5978 btrfs_err_rl(fs_info,
5979 "extent buffer %llu is beyond 32bit page cache limit", start);
5980 btrfs_err_32bit_limit(fs_info);
5981 return ERR_PTR(-EOVERFLOW);
5982 }
5983 if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
5984 btrfs_warn_32bit_limit(fs_info);
5985#endif
5986
Qu Wenruo1aaac382020-12-02 14:48:02 +08005987 if (fs_info->sectorsize < PAGE_SIZE &&
5988 offset_in_page(start) + len > PAGE_SIZE) {
5989 btrfs_err(fs_info,
5990 "tree block crosses page boundary, start %llu nodesize %lu",
5991 start, len);
5992 return ERR_PTR(-EINVAL);
5993 }
5994
Josef Bacikf28491e2013-12-16 13:24:27 -05005995 eb = find_extent_buffer(fs_info, start);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05005996 if (eb)
Chris Mason6af118ce2008-07-22 11:18:07 -04005997 return eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04005998
David Sterba23d79d82014-06-15 02:55:29 +02005999 eb = __alloc_extent_buffer(fs_info, start, len);
Peter2b114d12008-04-01 11:21:40 -04006000 if (!eb)
Liu Boc871b0f2016-06-06 12:01:23 -07006001 return ERR_PTR(-ENOMEM);
Josef Bacike114c542020-11-05 10:45:21 -05006002 btrfs_set_buffer_lockdep_class(owner_root, eb, level);
Chris Masond1310b22008-01-24 16:13:08 -05006003
David Sterba65ad0102018-06-29 10:56:49 +02006004 num_pages = num_extent_pages(eb);
Chris Mason727011e2010-08-06 13:21:20 -04006005 for (i = 0; i < num_pages; i++, index++) {
Qu Wenruo760f9912021-01-26 16:33:48 +08006006 struct btrfs_subpage *prealloc = NULL;
6007
Michal Hockod1b5c562015-08-19 14:17:40 +02006008 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
Liu Boc871b0f2016-06-06 12:01:23 -07006009 if (!p) {
6010 exists = ERR_PTR(-ENOMEM);
Chris Mason6af118ce2008-07-22 11:18:07 -04006011 goto free_eb;
Liu Boc871b0f2016-06-06 12:01:23 -07006012 }
Josef Bacik4f2de97a2012-03-07 16:20:05 -05006013
Qu Wenruo760f9912021-01-26 16:33:48 +08006014 /*
6015 * Preallocate page->private for subpage case, so that we won't
6016 * allocate memory with private_lock hold. The memory will be
6017 * freed by attach_extent_buffer_page() or freed manually if
6018 * we exit earlier.
6019 *
6020 * Although we have ensured one subpage eb can only have one
6021 * page, but it may change in the future for 16K page size
6022 * support, so we still preallocate the memory in the loop.
6023 */
6024 ret = btrfs_alloc_subpage(fs_info, &prealloc,
6025 BTRFS_SUBPAGE_METADATA);
6026 if (ret < 0) {
6027 unlock_page(p);
6028 put_page(p);
6029 exists = ERR_PTR(ret);
6030 goto free_eb;
6031 }
6032
Josef Bacik4f2de97a2012-03-07 16:20:05 -05006033 spin_lock(&mapping->private_lock);
Qu Wenruo819822102021-01-26 16:33:49 +08006034 exists = grab_extent_buffer(fs_info, p);
Qu Wenruoc0f0a9e2021-01-06 09:01:45 +08006035 if (exists) {
6036 spin_unlock(&mapping->private_lock);
6037 unlock_page(p);
6038 put_page(p);
6039 mark_extent_buffer_accessed(exists, p);
Qu Wenruo760f9912021-01-26 16:33:48 +08006040 btrfs_free_subpage(prealloc);
Qu Wenruoc0f0a9e2021-01-06 09:01:45 +08006041 goto free_eb;
Chris Masond1310b22008-01-24 16:13:08 -05006042 }
Qu Wenruo760f9912021-01-26 16:33:48 +08006043 /* Should not fail, as we have preallocated the memory */
6044 ret = attach_extent_buffer_page(eb, p, prealloc);
6045 ASSERT(!ret);
Qu Wenruo8ff84662021-01-26 16:33:50 +08006046 /*
6047 * To inform we have extra eb under allocation, so that
6048 * detach_extent_buffer_page() won't release the page private
6049 * when the eb hasn't yet been inserted into radix tree.
6050 *
6051 * The ref will be decreased when the eb released the page, in
6052 * detach_extent_buffer_page().
6053 * Thus needs no special handling in error path.
6054 */
6055 btrfs_page_inc_eb_refs(fs_info, p);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05006056 spin_unlock(&mapping->private_lock);
Qu Wenruo760f9912021-01-26 16:33:48 +08006057
Qu Wenruo1e5eb3d2021-03-25 15:14:41 +08006058 WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
Chris Mason727011e2010-08-06 13:21:20 -04006059 eb->pages[i] = p;
Chris Masond1310b22008-01-24 16:13:08 -05006060 if (!PageUptodate(p))
6061 uptodate = 0;
Chris Masoneb14ab82011-02-10 12:35:00 -05006062
6063 /*
Nikolay Borisovb16d0112018-07-04 10:24:52 +03006064 * We can't unlock the pages just yet since the extent buffer
6065 * hasn't been properly inserted in the radix tree, this
6066 * opens a race with btree_releasepage which can free a page
6067 * while we are still filling in all pages for the buffer and
6068 * we could crash.
Chris Masoneb14ab82011-02-10 12:35:00 -05006069 */
Chris Masond1310b22008-01-24 16:13:08 -05006070 }
6071 if (uptodate)
Chris Masonb4ce94d2009-02-04 09:25:08 -05006072 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Josef Bacik115391d2012-03-09 09:51:43 -05006073again:
David Sterbae1860a72016-05-09 14:11:38 +02006074 ret = radix_tree_preload(GFP_NOFS);
Liu Boc871b0f2016-06-06 12:01:23 -07006075 if (ret) {
6076 exists = ERR_PTR(ret);
Miao Xie19fe0a82010-10-26 20:57:29 -04006077 goto free_eb;
Liu Boc871b0f2016-06-06 12:01:23 -07006078 }
Miao Xie19fe0a82010-10-26 20:57:29 -04006079
Josef Bacikf28491e2013-12-16 13:24:27 -05006080 spin_lock(&fs_info->buffer_lock);
6081 ret = radix_tree_insert(&fs_info->buffer_radix,
Qu Wenruo478ef882020-10-21 14:25:05 +08006082 start >> fs_info->sectorsize_bits, eb);
Josef Bacikf28491e2013-12-16 13:24:27 -05006083 spin_unlock(&fs_info->buffer_lock);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05006084 radix_tree_preload_end();
Miao Xie19fe0a82010-10-26 20:57:29 -04006085 if (ret == -EEXIST) {
Josef Bacikf28491e2013-12-16 13:24:27 -05006086 exists = find_extent_buffer(fs_info, start);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05006087 if (exists)
6088 goto free_eb;
6089 else
Josef Bacik115391d2012-03-09 09:51:43 -05006090 goto again;
Chris Mason6af118ce2008-07-22 11:18:07 -04006091 }
Chris Mason6af118ce2008-07-22 11:18:07 -04006092 /* add one reference for the tree */
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006093 check_buffer_tree_ref(eb);
Josef Bacik34b41ac2013-12-13 10:41:51 -05006094 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
Chris Masoneb14ab82011-02-10 12:35:00 -05006095
6096 /*
Nikolay Borisovb16d0112018-07-04 10:24:52 +03006097 * Now it's safe to unlock the pages because any calls to
6098 * btree_releasepage will correctly detect that a page belongs to a
6099 * live buffer and won't free them prematurely.
Chris Masoneb14ab82011-02-10 12:35:00 -05006100 */
Nikolay Borisov28187ae2018-07-04 10:24:51 +03006101 for (i = 0; i < num_pages; i++)
6102 unlock_page(eb->pages[i]);
Chris Masond1310b22008-01-24 16:13:08 -05006103 return eb;
6104
Chris Mason6af118ce2008-07-22 11:18:07 -04006105free_eb:
Omar Sandoval5ca64f42015-02-24 02:47:05 -08006106 WARN_ON(!atomic_dec_and_test(&eb->refs));
Chris Mason727011e2010-08-06 13:21:20 -04006107 for (i = 0; i < num_pages; i++) {
6108 if (eb->pages[i])
6109 unlock_page(eb->pages[i]);
6110 }
Chris Masoneb14ab82011-02-10 12:35:00 -05006111
Miao Xie897ca6e92010-10-26 20:57:29 -04006112 btrfs_release_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04006113 return exists;
Chris Masond1310b22008-01-24 16:13:08 -05006114}
Chris Masond1310b22008-01-24 16:13:08 -05006115
Josef Bacik3083ee22012-03-09 16:01:49 -05006116static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
6117{
6118 struct extent_buffer *eb =
6119 container_of(head, struct extent_buffer, rcu_head);
6120
6121 __free_extent_buffer(eb);
6122}
6123
David Sterbaf7a52a42013-04-26 14:56:29 +00006124static int release_extent_buffer(struct extent_buffer *eb)
Jules Irenge5ce48d02020-02-23 23:16:42 +00006125 __releases(&eb->refs_lock)
Josef Bacik3083ee22012-03-09 16:01:49 -05006126{
Nikolay Borisov07e21c42018-06-27 16:38:23 +03006127 lockdep_assert_held(&eb->refs_lock);
6128
Josef Bacik3083ee22012-03-09 16:01:49 -05006129 WARN_ON(atomic_read(&eb->refs) == 0);
6130 if (atomic_dec_and_test(&eb->refs)) {
Josef Bacik34b41ac2013-12-13 10:41:51 -05006131 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
Josef Bacikf28491e2013-12-16 13:24:27 -05006132 struct btrfs_fs_info *fs_info = eb->fs_info;
Josef Bacik3083ee22012-03-09 16:01:49 -05006133
Jan Schmidt815a51c2012-05-16 17:00:02 +02006134 spin_unlock(&eb->refs_lock);
Josef Bacik3083ee22012-03-09 16:01:49 -05006135
Josef Bacikf28491e2013-12-16 13:24:27 -05006136 spin_lock(&fs_info->buffer_lock);
6137 radix_tree_delete(&fs_info->buffer_radix,
Qu Wenruo478ef882020-10-21 14:25:05 +08006138 eb->start >> fs_info->sectorsize_bits);
Josef Bacikf28491e2013-12-16 13:24:27 -05006139 spin_unlock(&fs_info->buffer_lock);
Josef Bacik34b41ac2013-12-13 10:41:51 -05006140 } else {
6141 spin_unlock(&eb->refs_lock);
Jan Schmidt815a51c2012-05-16 17:00:02 +02006142 }
Josef Bacik3083ee22012-03-09 16:01:49 -05006143
Josef Bacik8c389382020-02-14 16:11:42 -05006144 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
Josef Bacik3083ee22012-03-09 16:01:49 -05006145 /* Should be safe to release our pages at this point */
David Sterba55ac0132018-07-19 17:24:32 +02006146 btrfs_release_extent_buffer_pages(eb);
Josef Bacikbcb7e442015-03-16 17:38:02 -04006147#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
Nikolay Borisovb0132a32018-06-27 16:38:24 +03006148 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
Josef Bacikbcb7e442015-03-16 17:38:02 -04006149 __free_extent_buffer(eb);
6150 return 1;
6151 }
6152#endif
Josef Bacik3083ee22012-03-09 16:01:49 -05006153 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
Josef Bacike64860a2012-07-20 16:05:36 -04006154 return 1;
Josef Bacik3083ee22012-03-09 16:01:49 -05006155 }
6156 spin_unlock(&eb->refs_lock);
Josef Bacike64860a2012-07-20 16:05:36 -04006157
6158 return 0;
Josef Bacik3083ee22012-03-09 16:01:49 -05006159}
6160
Chris Masond1310b22008-01-24 16:13:08 -05006161void free_extent_buffer(struct extent_buffer *eb)
6162{
Chris Mason242e18c2013-01-29 17:49:37 -05006163 int refs;
6164 int old;
Chris Masond1310b22008-01-24 16:13:08 -05006165 if (!eb)
6166 return;
6167
Chris Mason242e18c2013-01-29 17:49:37 -05006168 while (1) {
6169 refs = atomic_read(&eb->refs);
Nikolay Borisov46cc7752018-10-15 17:04:01 +03006170 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
6171 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
6172 refs == 1))
Chris Mason242e18c2013-01-29 17:49:37 -05006173 break;
6174 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
6175 if (old == refs)
6176 return;
6177 }
6178
Josef Bacik3083ee22012-03-09 16:01:49 -05006179 spin_lock(&eb->refs_lock);
6180 if (atomic_read(&eb->refs) == 2 &&
6181 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006182 !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05006183 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6184 atomic_dec(&eb->refs);
Chris Masond1310b22008-01-24 16:13:08 -05006185
Josef Bacik3083ee22012-03-09 16:01:49 -05006186 /*
6187 * I know this is terrible, but it's temporary until we stop tracking
6188 * the uptodate bits and such for the extent buffers.
6189 */
David Sterbaf7a52a42013-04-26 14:56:29 +00006190 release_extent_buffer(eb);
Chris Masond1310b22008-01-24 16:13:08 -05006191}
Chris Masond1310b22008-01-24 16:13:08 -05006192
Josef Bacik3083ee22012-03-09 16:01:49 -05006193void free_extent_buffer_stale(struct extent_buffer *eb)
6194{
6195 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05006196 return;
6197
Josef Bacik3083ee22012-03-09 16:01:49 -05006198 spin_lock(&eb->refs_lock);
6199 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
6200
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006201 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05006202 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6203 atomic_dec(&eb->refs);
David Sterbaf7a52a42013-04-26 14:56:29 +00006204 release_extent_buffer(eb);
Chris Masond1310b22008-01-24 16:13:08 -05006205}
6206
Qu Wenruo0d277972021-03-25 15:14:43 +08006207static void btree_clear_page_dirty(struct page *page)
6208{
6209 ASSERT(PageDirty(page));
6210 ASSERT(PageLocked(page));
6211 clear_page_dirty_for_io(page);
6212 xa_lock_irq(&page->mapping->i_pages);
6213 if (!PageDirty(page))
6214 __xa_clear_mark(&page->mapping->i_pages,
6215 page_index(page), PAGECACHE_TAG_DIRTY);
6216 xa_unlock_irq(&page->mapping->i_pages);
6217}
6218
6219static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
6220{
6221 struct btrfs_fs_info *fs_info = eb->fs_info;
6222 struct page *page = eb->pages[0];
6223 bool last;
6224
6225 /* btree_clear_page_dirty() needs page locked */
6226 lock_page(page);
6227 last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start,
6228 eb->len);
6229 if (last)
6230 btree_clear_page_dirty(page);
6231 unlock_page(page);
6232 WARN_ON(atomic_read(&eb->refs) == 0);
6233}
6234
David Sterba2b489662020-04-29 03:04:10 +02006235void clear_extent_buffer_dirty(const struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05006236{
David Sterbacc5e31a2018-03-01 18:20:27 +01006237 int i;
6238 int num_pages;
Chris Masond1310b22008-01-24 16:13:08 -05006239 struct page *page;
6240
Qu Wenruo0d277972021-03-25 15:14:43 +08006241 if (eb->fs_info->sectorsize < PAGE_SIZE)
6242 return clear_subpage_extent_buffer_dirty(eb);
6243
David Sterba65ad0102018-06-29 10:56:49 +02006244 num_pages = num_extent_pages(eb);
Chris Masond1310b22008-01-24 16:13:08 -05006245
6246 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02006247 page = eb->pages[i];
Chris Masonb9473432009-03-13 11:00:37 -04006248 if (!PageDirty(page))
Chris Masond2c3f4f2008-11-19 12:44:22 -05006249 continue;
Chris Masona61e6f22008-07-22 11:18:08 -04006250 lock_page(page);
Qu Wenruo0d277972021-03-25 15:14:43 +08006251 btree_clear_page_dirty(page);
Chris Masonbf0da8c2011-11-04 12:29:37 -04006252 ClearPageError(page);
Chris Masona61e6f22008-07-22 11:18:08 -04006253 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05006254 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006255 WARN_ON(atomic_read(&eb->refs) == 0);
Chris Masond1310b22008-01-24 16:13:08 -05006256}
Chris Masond1310b22008-01-24 16:13:08 -05006257
Liu Boabb57ef2018-09-14 01:44:42 +08006258bool set_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05006259{
David Sterbacc5e31a2018-03-01 18:20:27 +01006260 int i;
6261 int num_pages;
Liu Boabb57ef2018-09-14 01:44:42 +08006262 bool was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05006263
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006264 check_buffer_tree_ref(eb);
6265
Chris Masonb9473432009-03-13 11:00:37 -04006266 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006267
David Sterba65ad0102018-06-29 10:56:49 +02006268 num_pages = num_extent_pages(eb);
Josef Bacik3083ee22012-03-09 16:01:49 -05006269 WARN_ON(atomic_read(&eb->refs) == 0);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006270 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
6271
Qu Wenruo0d277972021-03-25 15:14:43 +08006272 if (!was_dirty) {
6273 bool subpage = eb->fs_info->sectorsize < PAGE_SIZE;
Liu Bo51995c32018-09-14 01:46:08 +08006274
Qu Wenruo0d277972021-03-25 15:14:43 +08006275 /*
6276 * For subpage case, we can have other extent buffers in the
6277 * same page, and in clear_subpage_extent_buffer_dirty() we
6278 * have to clear page dirty without subpage lock held.
6279 * This can cause race where our page gets dirty cleared after
6280 * we just set it.
6281 *
6282 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
6283 * its page for other reasons, we can use page lock to prevent
6284 * the above race.
6285 */
6286 if (subpage)
6287 lock_page(eb->pages[0]);
6288 for (i = 0; i < num_pages; i++)
6289 btrfs_page_set_dirty(eb->fs_info, eb->pages[i],
6290 eb->start, eb->len);
6291 if (subpage)
6292 unlock_page(eb->pages[0]);
6293 }
Liu Bo51995c32018-09-14 01:46:08 +08006294#ifdef CONFIG_BTRFS_DEBUG
6295 for (i = 0; i < num_pages; i++)
6296 ASSERT(PageDirty(eb->pages[i]));
6297#endif
6298
Chris Masonb9473432009-03-13 11:00:37 -04006299 return was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05006300}
Chris Masond1310b22008-01-24 16:13:08 -05006301
David Sterba69ba3922015-12-03 13:08:59 +01006302void clear_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Mason1259ab72008-05-12 13:39:03 -04006303{
Qu Wenruo251f2ac2021-01-26 16:33:54 +08006304 struct btrfs_fs_info *fs_info = eb->fs_info;
Chris Mason1259ab72008-05-12 13:39:03 -04006305 struct page *page;
David Sterbacc5e31a2018-03-01 18:20:27 +01006306 int num_pages;
Qu Wenruo251f2ac2021-01-26 16:33:54 +08006307 int i;
Chris Mason1259ab72008-05-12 13:39:03 -04006308
Chris Masonb4ce94d2009-02-04 09:25:08 -05006309 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
David Sterba65ad0102018-06-29 10:56:49 +02006310 num_pages = num_extent_pages(eb);
Chris Mason1259ab72008-05-12 13:39:03 -04006311 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02006312 page = eb->pages[i];
Chris Mason33958dc2008-07-30 10:29:12 -04006313 if (page)
Qu Wenruo251f2ac2021-01-26 16:33:54 +08006314 btrfs_page_clear_uptodate(fs_info, page,
6315 eb->start, eb->len);
Chris Mason1259ab72008-05-12 13:39:03 -04006316 }
Chris Mason1259ab72008-05-12 13:39:03 -04006317}
6318
David Sterba09c25a82015-12-03 13:08:59 +01006319void set_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05006320{
Qu Wenruo251f2ac2021-01-26 16:33:54 +08006321 struct btrfs_fs_info *fs_info = eb->fs_info;
Chris Masond1310b22008-01-24 16:13:08 -05006322 struct page *page;
David Sterbacc5e31a2018-03-01 18:20:27 +01006323 int num_pages;
Qu Wenruo251f2ac2021-01-26 16:33:54 +08006324 int i;
Chris Masond1310b22008-01-24 16:13:08 -05006325
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006326 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
David Sterba65ad0102018-06-29 10:56:49 +02006327 num_pages = num_extent_pages(eb);
Chris Masond1310b22008-01-24 16:13:08 -05006328 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02006329 page = eb->pages[i];
Qu Wenruo251f2ac2021-01-26 16:33:54 +08006330 btrfs_page_set_uptodate(fs_info, page, eb->start, eb->len);
Chris Masond1310b22008-01-24 16:13:08 -05006331 }
Chris Masond1310b22008-01-24 16:13:08 -05006332}
Chris Masond1310b22008-01-24 16:13:08 -05006333
Qu Wenruo4012daf2021-01-26 16:33:57 +08006334static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
6335 int mirror_num)
6336{
6337 struct btrfs_fs_info *fs_info = eb->fs_info;
6338 struct extent_io_tree *io_tree;
6339 struct page *page = eb->pages[0];
Qu Wenruo390ed292021-04-14 16:42:15 +08006340 struct btrfs_bio_ctrl bio_ctrl = { 0 };
Qu Wenruo4012daf2021-01-26 16:33:57 +08006341 int ret = 0;
6342
6343 ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags));
6344 ASSERT(PagePrivate(page));
6345 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
6346
6347 if (wait == WAIT_NONE) {
Goldwyn Rodriguesdc562192021-04-08 07:40:25 -05006348 if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1))
6349 return -EAGAIN;
Qu Wenruo4012daf2021-01-26 16:33:57 +08006350 } else {
6351 ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1);
6352 if (ret < 0)
6353 return ret;
6354 }
6355
6356 ret = 0;
6357 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags) ||
6358 PageUptodate(page) ||
6359 btrfs_subpage_test_uptodate(fs_info, page, eb->start, eb->len)) {
6360 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6361 unlock_extent(io_tree, eb->start, eb->start + eb->len - 1);
6362 return ret;
6363 }
6364
6365 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
6366 eb->read_mirror = 0;
6367 atomic_set(&eb->io_pages, 1);
6368 check_buffer_tree_ref(eb);
6369 btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len);
6370
Qu Wenruo3d078ef2021-06-07 17:02:58 +08006371 btrfs_subpage_start_reader(fs_info, page, eb->start, eb->len);
Qu Wenruo390ed292021-04-14 16:42:15 +08006372 ret = submit_extent_page(REQ_OP_READ | REQ_META, NULL, &bio_ctrl,
6373 page, eb->start, eb->len,
6374 eb->start - page_offset(page),
6375 end_bio_extent_readpage, mirror_num, 0,
Qu Wenruo4012daf2021-01-26 16:33:57 +08006376 true);
6377 if (ret) {
6378 /*
6379 * In the endio function, if we hit something wrong we will
6380 * increase the io_pages, so here we need to decrease it for
6381 * error path.
6382 */
6383 atomic_dec(&eb->io_pages);
6384 }
Qu Wenruo390ed292021-04-14 16:42:15 +08006385 if (bio_ctrl.bio) {
Qu Wenruo4012daf2021-01-26 16:33:57 +08006386 int tmp;
6387
Qu Wenruo390ed292021-04-14 16:42:15 +08006388 tmp = submit_one_bio(bio_ctrl.bio, mirror_num, 0);
6389 bio_ctrl.bio = NULL;
Qu Wenruo4012daf2021-01-26 16:33:57 +08006390 if (tmp < 0)
6391 return tmp;
6392 }
6393 if (ret || wait != WAIT_COMPLETE)
6394 return ret;
6395
6396 wait_extent_bit(io_tree, eb->start, eb->start + eb->len - 1, EXTENT_LOCKED);
6397 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
6398 ret = -EIO;
6399 return ret;
6400}
6401
Nikolay Borisovc2ccfbc2019-04-10 17:24:40 +03006402int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05006403{
David Sterbacc5e31a2018-03-01 18:20:27 +01006404 int i;
Chris Masond1310b22008-01-24 16:13:08 -05006405 struct page *page;
6406 int err;
6407 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04006408 int locked_pages = 0;
6409 int all_uptodate = 1;
David Sterbacc5e31a2018-03-01 18:20:27 +01006410 int num_pages;
Chris Mason727011e2010-08-06 13:21:20 -04006411 unsigned long num_reads = 0;
Qu Wenruo390ed292021-04-14 16:42:15 +08006412 struct btrfs_bio_ctrl bio_ctrl = { 0 };
Chris Masona86c12c2008-02-07 10:50:54 -05006413
Chris Masonb4ce94d2009-02-04 09:25:08 -05006414 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
Chris Masond1310b22008-01-24 16:13:08 -05006415 return 0;
6416
Qu Wenruo4012daf2021-01-26 16:33:57 +08006417 if (eb->fs_info->sectorsize < PAGE_SIZE)
6418 return read_extent_buffer_subpage(eb, wait, mirror_num);
6419
David Sterba65ad0102018-06-29 10:56:49 +02006420 num_pages = num_extent_pages(eb);
Josef Bacik8436ea912016-09-02 15:40:03 -04006421 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02006422 page = eb->pages[i];
Arne Jansenbb82ab82011-06-10 14:06:53 +02006423 if (wait == WAIT_NONE) {
Qu Wenruo2c4d8cb2021-01-28 19:25:08 +08006424 /*
6425 * WAIT_NONE is only utilized by readahead. If we can't
6426 * acquire the lock atomically it means either the eb
6427 * is being read out or under modification.
6428 * Either way the eb will be or has been cached,
6429 * readahead can exit safely.
6430 */
David Woodhouse2db04962008-08-07 11:19:43 -04006431 if (!trylock_page(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04006432 goto unlock_exit;
Chris Masond1310b22008-01-24 16:13:08 -05006433 } else {
6434 lock_page(page);
6435 }
Chris Masonce9adaa2008-04-09 16:28:12 -04006436 locked_pages++;
Liu Bo2571e732016-08-03 12:33:01 -07006437 }
6438 /*
6439 * We need to firstly lock all pages to make sure that
6440 * the uptodate bit of our pages won't be affected by
6441 * clear_extent_buffer_uptodate().
6442 */
Josef Bacik8436ea912016-09-02 15:40:03 -04006443 for (i = 0; i < num_pages; i++) {
Liu Bo2571e732016-08-03 12:33:01 -07006444 page = eb->pages[i];
Chris Mason727011e2010-08-06 13:21:20 -04006445 if (!PageUptodate(page)) {
6446 num_reads++;
Chris Masonce9adaa2008-04-09 16:28:12 -04006447 all_uptodate = 0;
Chris Mason727011e2010-08-06 13:21:20 -04006448 }
Chris Masonce9adaa2008-04-09 16:28:12 -04006449 }
Liu Bo2571e732016-08-03 12:33:01 -07006450
Chris Masonce9adaa2008-04-09 16:28:12 -04006451 if (all_uptodate) {
Josef Bacik8436ea912016-09-02 15:40:03 -04006452 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masonce9adaa2008-04-09 16:28:12 -04006453 goto unlock_exit;
6454 }
6455
Filipe Manana656f30d2014-09-26 12:25:56 +01006456 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
Josef Bacik5cf1ab52012-04-16 09:42:26 -04006457 eb->read_mirror = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04006458 atomic_set(&eb->io_pages, num_reads);
Boris Burkov6bf9cd22020-06-17 11:35:19 -07006459 /*
6460 * It is possible for releasepage to clear the TREE_REF bit before we
6461 * set io_pages. See check_buffer_tree_ref for a more detailed comment.
6462 */
6463 check_buffer_tree_ref(eb);
Josef Bacik8436ea912016-09-02 15:40:03 -04006464 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02006465 page = eb->pages[i];
Liu Bobaf863b2016-07-11 10:39:07 -07006466
Chris Masonce9adaa2008-04-09 16:28:12 -04006467 if (!PageUptodate(page)) {
Liu Bobaf863b2016-07-11 10:39:07 -07006468 if (ret) {
6469 atomic_dec(&eb->io_pages);
6470 unlock_page(page);
6471 continue;
6472 }
6473
Chris Masonf1885912008-04-09 16:28:12 -04006474 ClearPageError(page);
Nikolay Borisov04201772020-09-14 12:37:04 +03006475 err = submit_extent_page(REQ_OP_READ | REQ_META, NULL,
Qu Wenruo390ed292021-04-14 16:42:15 +08006476 &bio_ctrl, page, page_offset(page),
6477 PAGE_SIZE, 0, end_bio_extent_readpage,
6478 mirror_num, 0, false);
Liu Bobaf863b2016-07-11 10:39:07 -07006479 if (err) {
Liu Bobaf863b2016-07-11 10:39:07 -07006480 /*
Nikolay Borisov04201772020-09-14 12:37:04 +03006481 * We failed to submit the bio so it's the
6482 * caller's responsibility to perform cleanup
6483 * i.e unlock page/set error bit.
Liu Bobaf863b2016-07-11 10:39:07 -07006484 */
Nikolay Borisov04201772020-09-14 12:37:04 +03006485 ret = err;
6486 SetPageError(page);
6487 unlock_page(page);
Liu Bobaf863b2016-07-11 10:39:07 -07006488 atomic_dec(&eb->io_pages);
6489 }
Chris Masond1310b22008-01-24 16:13:08 -05006490 } else {
6491 unlock_page(page);
6492 }
6493 }
6494
Qu Wenruo390ed292021-04-14 16:42:15 +08006495 if (bio_ctrl.bio) {
6496 err = submit_one_bio(bio_ctrl.bio, mirror_num, bio_ctrl.bio_flags);
6497 bio_ctrl.bio = NULL;
Jeff Mahoney79787ea2012-03-12 16:03:00 +01006498 if (err)
6499 return err;
Jeff Mahoney355808c2011-10-03 23:23:14 -04006500 }
Chris Masona86c12c2008-02-07 10:50:54 -05006501
Arne Jansenbb82ab82011-06-10 14:06:53 +02006502 if (ret || wait != WAIT_COMPLETE)
Chris Masond1310b22008-01-24 16:13:08 -05006503 return ret;
Chris Masond3977122009-01-05 21:25:51 -05006504
Josef Bacik8436ea912016-09-02 15:40:03 -04006505 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02006506 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05006507 wait_on_page_locked(page);
Chris Masond3977122009-01-05 21:25:51 -05006508 if (!PageUptodate(page))
Chris Masond1310b22008-01-24 16:13:08 -05006509 ret = -EIO;
Chris Masond1310b22008-01-24 16:13:08 -05006510 }
Chris Masond3977122009-01-05 21:25:51 -05006511
Chris Masond1310b22008-01-24 16:13:08 -05006512 return ret;
Chris Masonce9adaa2008-04-09 16:28:12 -04006513
6514unlock_exit:
Chris Masond3977122009-01-05 21:25:51 -05006515 while (locked_pages > 0) {
Chris Masonce9adaa2008-04-09 16:28:12 -04006516 locked_pages--;
Josef Bacik8436ea912016-09-02 15:40:03 -04006517 page = eb->pages[locked_pages];
6518 unlock_page(page);
Chris Masonce9adaa2008-04-09 16:28:12 -04006519 }
6520 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05006521}
Chris Masond1310b22008-01-24 16:13:08 -05006522
Qu Wenruof98b6212020-08-19 14:35:47 +08006523static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
6524 unsigned long len)
6525{
6526 btrfs_warn(eb->fs_info,
6527 "access to eb bytenr %llu len %lu out of range start %lu len %lu",
6528 eb->start, eb->len, start, len);
6529 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
6530
6531 return true;
6532}
6533
6534/*
6535 * Check if the [start, start + len) range is valid before reading/writing
6536 * the eb.
6537 * NOTE: @start and @len are offset inside the eb, not logical address.
6538 *
6539 * Caller should not touch the dst/src memory if this function returns error.
6540 */
6541static inline int check_eb_range(const struct extent_buffer *eb,
6542 unsigned long start, unsigned long len)
6543{
6544 unsigned long offset;
6545
6546 /* start, start + len should not go beyond eb->len nor overflow */
6547 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
6548 return report_eb_range(eb, start, len);
6549
6550 return false;
6551}
6552
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06006553void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
6554 unsigned long start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05006555{
6556 size_t cur;
6557 size_t offset;
6558 struct page *page;
6559 char *kaddr;
6560 char *dst = (char *)dstv;
Qu Wenruo884b07d2020-12-02 14:48:04 +08006561 unsigned long i = get_eb_page_index(start);
Chris Masond1310b22008-01-24 16:13:08 -05006562
Qu Wenruof98b6212020-08-19 14:35:47 +08006563 if (check_eb_range(eb, start, len))
Liu Bof716abd2017-08-09 11:10:16 -06006564 return;
Chris Masond1310b22008-01-24 16:13:08 -05006565
Qu Wenruo884b07d2020-12-02 14:48:04 +08006566 offset = get_eb_offset_in_page(eb, start);
Chris Masond1310b22008-01-24 16:13:08 -05006567
Chris Masond3977122009-01-05 21:25:51 -05006568 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02006569 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05006570
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006571 cur = min(len, (PAGE_SIZE - offset));
Chris Masona6591712011-07-19 12:04:14 -04006572 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05006573 memcpy(dst, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05006574
6575 dst += cur;
6576 len -= cur;
6577 offset = 0;
6578 i++;
6579 }
6580}
Chris Masond1310b22008-01-24 16:13:08 -05006581
Josef Bacika48b73e2020-08-10 11:42:27 -04006582int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
6583 void __user *dstv,
6584 unsigned long start, unsigned long len)
Gerhard Heift550ac1d2014-01-30 16:24:01 +01006585{
6586 size_t cur;
6587 size_t offset;
6588 struct page *page;
6589 char *kaddr;
6590 char __user *dst = (char __user *)dstv;
Qu Wenruo884b07d2020-12-02 14:48:04 +08006591 unsigned long i = get_eb_page_index(start);
Gerhard Heift550ac1d2014-01-30 16:24:01 +01006592 int ret = 0;
6593
6594 WARN_ON(start > eb->len);
6595 WARN_ON(start + len > eb->start + eb->len);
6596
Qu Wenruo884b07d2020-12-02 14:48:04 +08006597 offset = get_eb_offset_in_page(eb, start);
Gerhard Heift550ac1d2014-01-30 16:24:01 +01006598
6599 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02006600 page = eb->pages[i];
Gerhard Heift550ac1d2014-01-30 16:24:01 +01006601
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006602 cur = min(len, (PAGE_SIZE - offset));
Gerhard Heift550ac1d2014-01-30 16:24:01 +01006603 kaddr = page_address(page);
Josef Bacika48b73e2020-08-10 11:42:27 -04006604 if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
Gerhard Heift550ac1d2014-01-30 16:24:01 +01006605 ret = -EFAULT;
6606 break;
6607 }
6608
6609 dst += cur;
6610 len -= cur;
6611 offset = 0;
6612 i++;
6613 }
6614
6615 return ret;
6616}
6617
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -06006618int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
6619 unsigned long start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05006620{
6621 size_t cur;
6622 size_t offset;
6623 struct page *page;
6624 char *kaddr;
6625 char *ptr = (char *)ptrv;
Qu Wenruo884b07d2020-12-02 14:48:04 +08006626 unsigned long i = get_eb_page_index(start);
Chris Masond1310b22008-01-24 16:13:08 -05006627 int ret = 0;
6628
Qu Wenruof98b6212020-08-19 14:35:47 +08006629 if (check_eb_range(eb, start, len))
6630 return -EINVAL;
Chris Masond1310b22008-01-24 16:13:08 -05006631
Qu Wenruo884b07d2020-12-02 14:48:04 +08006632 offset = get_eb_offset_in_page(eb, start);
Chris Masond1310b22008-01-24 16:13:08 -05006633
Chris Masond3977122009-01-05 21:25:51 -05006634 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02006635 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05006636
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006637 cur = min(len, (PAGE_SIZE - offset));
Chris Masond1310b22008-01-24 16:13:08 -05006638
Chris Masona6591712011-07-19 12:04:14 -04006639 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05006640 ret = memcmp(ptr, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05006641 if (ret)
6642 break;
6643
6644 ptr += cur;
6645 len -= cur;
6646 offset = 0;
6647 i++;
6648 }
6649 return ret;
6650}
Chris Masond1310b22008-01-24 16:13:08 -05006651
Qu Wenruob8f95772021-03-25 15:14:42 +08006652/*
6653 * Check that the extent buffer is uptodate.
6654 *
6655 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
6656 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
6657 */
6658static void assert_eb_page_uptodate(const struct extent_buffer *eb,
6659 struct page *page)
6660{
6661 struct btrfs_fs_info *fs_info = eb->fs_info;
6662
6663 if (fs_info->sectorsize < PAGE_SIZE) {
6664 bool uptodate;
6665
6666 uptodate = btrfs_subpage_test_uptodate(fs_info, page,
6667 eb->start, eb->len);
6668 WARN_ON(!uptodate);
6669 } else {
6670 WARN_ON(!PageUptodate(page));
6671 }
6672}
6673
David Sterba2b489662020-04-29 03:04:10 +02006674void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
David Sterbaf157bf72016-11-09 17:43:38 +01006675 const void *srcv)
6676{
6677 char *kaddr;
6678
Qu Wenruob8f95772021-03-25 15:14:42 +08006679 assert_eb_page_uptodate(eb, eb->pages[0]);
David Sterba24880be52020-09-21 22:07:14 +02006680 kaddr = page_address(eb->pages[0]) +
6681 get_eb_offset_in_page(eb, offsetof(struct btrfs_header,
6682 chunk_tree_uuid));
6683 memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
David Sterbaf157bf72016-11-09 17:43:38 +01006684}
6685
David Sterba2b489662020-04-29 03:04:10 +02006686void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
David Sterbaf157bf72016-11-09 17:43:38 +01006687{
6688 char *kaddr;
6689
Qu Wenruob8f95772021-03-25 15:14:42 +08006690 assert_eb_page_uptodate(eb, eb->pages[0]);
David Sterba24880be52020-09-21 22:07:14 +02006691 kaddr = page_address(eb->pages[0]) +
6692 get_eb_offset_in_page(eb, offsetof(struct btrfs_header, fsid));
6693 memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
David Sterbaf157bf72016-11-09 17:43:38 +01006694}
6695
David Sterba2b489662020-04-29 03:04:10 +02006696void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
Chris Masond1310b22008-01-24 16:13:08 -05006697 unsigned long start, unsigned long len)
6698{
6699 size_t cur;
6700 size_t offset;
6701 struct page *page;
6702 char *kaddr;
6703 char *src = (char *)srcv;
Qu Wenruo884b07d2020-12-02 14:48:04 +08006704 unsigned long i = get_eb_page_index(start);
Chris Masond1310b22008-01-24 16:13:08 -05006705
Naohiro Aotad35751562021-02-04 19:21:54 +09006706 WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));
6707
Qu Wenruof98b6212020-08-19 14:35:47 +08006708 if (check_eb_range(eb, start, len))
6709 return;
Chris Masond1310b22008-01-24 16:13:08 -05006710
Qu Wenruo884b07d2020-12-02 14:48:04 +08006711 offset = get_eb_offset_in_page(eb, start);
Chris Masond1310b22008-01-24 16:13:08 -05006712
Chris Masond3977122009-01-05 21:25:51 -05006713 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02006714 page = eb->pages[i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006715 assert_eb_page_uptodate(eb, page);
Chris Masond1310b22008-01-24 16:13:08 -05006716
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006717 cur = min(len, PAGE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04006718 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05006719 memcpy(kaddr + offset, src, cur);
Chris Masond1310b22008-01-24 16:13:08 -05006720
6721 src += cur;
6722 len -= cur;
6723 offset = 0;
6724 i++;
6725 }
6726}
Chris Masond1310b22008-01-24 16:13:08 -05006727
David Sterba2b489662020-04-29 03:04:10 +02006728void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
David Sterbab159fa22016-11-08 18:09:03 +01006729 unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05006730{
6731 size_t cur;
6732 size_t offset;
6733 struct page *page;
6734 char *kaddr;
Qu Wenruo884b07d2020-12-02 14:48:04 +08006735 unsigned long i = get_eb_page_index(start);
Chris Masond1310b22008-01-24 16:13:08 -05006736
Qu Wenruof98b6212020-08-19 14:35:47 +08006737 if (check_eb_range(eb, start, len))
6738 return;
Chris Masond1310b22008-01-24 16:13:08 -05006739
Qu Wenruo884b07d2020-12-02 14:48:04 +08006740 offset = get_eb_offset_in_page(eb, start);
Chris Masond1310b22008-01-24 16:13:08 -05006741
Chris Masond3977122009-01-05 21:25:51 -05006742 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02006743 page = eb->pages[i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006744 assert_eb_page_uptodate(eb, page);
Chris Masond1310b22008-01-24 16:13:08 -05006745
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006746 cur = min(len, PAGE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04006747 kaddr = page_address(page);
David Sterbab159fa22016-11-08 18:09:03 +01006748 memset(kaddr + offset, 0, cur);
Chris Masond1310b22008-01-24 16:13:08 -05006749
6750 len -= cur;
6751 offset = 0;
6752 i++;
6753 }
6754}
Chris Masond1310b22008-01-24 16:13:08 -05006755
David Sterba2b489662020-04-29 03:04:10 +02006756void copy_extent_buffer_full(const struct extent_buffer *dst,
6757 const struct extent_buffer *src)
David Sterba58e80122016-11-08 18:30:31 +01006758{
6759 int i;
David Sterbacc5e31a2018-03-01 18:20:27 +01006760 int num_pages;
David Sterba58e80122016-11-08 18:30:31 +01006761
6762 ASSERT(dst->len == src->len);
6763
Qu Wenruo884b07d2020-12-02 14:48:04 +08006764 if (dst->fs_info->sectorsize == PAGE_SIZE) {
6765 num_pages = num_extent_pages(dst);
6766 for (i = 0; i < num_pages; i++)
6767 copy_page(page_address(dst->pages[i]),
6768 page_address(src->pages[i]));
6769 } else {
6770 size_t src_offset = get_eb_offset_in_page(src, 0);
6771 size_t dst_offset = get_eb_offset_in_page(dst, 0);
6772
6773 ASSERT(src->fs_info->sectorsize < PAGE_SIZE);
6774 memcpy(page_address(dst->pages[0]) + dst_offset,
6775 page_address(src->pages[0]) + src_offset,
6776 src->len);
6777 }
David Sterba58e80122016-11-08 18:30:31 +01006778}
6779
David Sterba2b489662020-04-29 03:04:10 +02006780void copy_extent_buffer(const struct extent_buffer *dst,
6781 const struct extent_buffer *src,
Chris Masond1310b22008-01-24 16:13:08 -05006782 unsigned long dst_offset, unsigned long src_offset,
6783 unsigned long len)
6784{
6785 u64 dst_len = dst->len;
6786 size_t cur;
6787 size_t offset;
6788 struct page *page;
6789 char *kaddr;
Qu Wenruo884b07d2020-12-02 14:48:04 +08006790 unsigned long i = get_eb_page_index(dst_offset);
Chris Masond1310b22008-01-24 16:13:08 -05006791
Qu Wenruof98b6212020-08-19 14:35:47 +08006792 if (check_eb_range(dst, dst_offset, len) ||
6793 check_eb_range(src, src_offset, len))
6794 return;
6795
Chris Masond1310b22008-01-24 16:13:08 -05006796 WARN_ON(src->len != dst_len);
6797
Qu Wenruo884b07d2020-12-02 14:48:04 +08006798 offset = get_eb_offset_in_page(dst, dst_offset);
Chris Masond1310b22008-01-24 16:13:08 -05006799
Chris Masond3977122009-01-05 21:25:51 -05006800 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02006801 page = dst->pages[i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006802 assert_eb_page_uptodate(dst, page);
Chris Masond1310b22008-01-24 16:13:08 -05006803
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006804 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
Chris Masond1310b22008-01-24 16:13:08 -05006805
Chris Masona6591712011-07-19 12:04:14 -04006806 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05006807 read_extent_buffer(src, kaddr + offset, src_offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05006808
6809 src_offset += cur;
6810 len -= cur;
6811 offset = 0;
6812 i++;
6813 }
6814}
Chris Masond1310b22008-01-24 16:13:08 -05006815
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006816/*
6817 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
6818 * given bit number
6819 * @eb: the extent buffer
6820 * @start: offset of the bitmap item in the extent buffer
6821 * @nr: bit number
6822 * @page_index: return index of the page in the extent buffer that contains the
6823 * given bit number
6824 * @page_offset: return offset into the page given by page_index
6825 *
6826 * This helper hides the ugliness of finding the byte in an extent buffer which
6827 * contains a given bit.
6828 */
David Sterba2b489662020-04-29 03:04:10 +02006829static inline void eb_bitmap_offset(const struct extent_buffer *eb,
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006830 unsigned long start, unsigned long nr,
6831 unsigned long *page_index,
6832 size_t *page_offset)
6833{
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006834 size_t byte_offset = BIT_BYTE(nr);
6835 size_t offset;
6836
6837 /*
6838 * The byte we want is the offset of the extent buffer + the offset of
6839 * the bitmap item in the extent buffer + the offset of the byte in the
6840 * bitmap item.
6841 */
Qu Wenruo884b07d2020-12-02 14:48:04 +08006842 offset = start + offset_in_page(eb->start) + byte_offset;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006843
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006844 *page_index = offset >> PAGE_SHIFT;
Johannes Thumshirn70730172018-12-05 15:23:03 +01006845 *page_offset = offset_in_page(offset);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006846}
6847
6848/**
6849 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
6850 * @eb: the extent buffer
6851 * @start: offset of the bitmap item in the extent buffer
6852 * @nr: bit number to test
6853 */
David Sterba2b489662020-04-29 03:04:10 +02006854int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006855 unsigned long nr)
6856{
Omar Sandoval2fe1d552016-09-22 17:24:20 -07006857 u8 *kaddr;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006858 struct page *page;
6859 unsigned long i;
6860 size_t offset;
6861
6862 eb_bitmap_offset(eb, start, nr, &i, &offset);
6863 page = eb->pages[i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006864 assert_eb_page_uptodate(eb, page);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006865 kaddr = page_address(page);
6866 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
6867}
6868
6869/**
6870 * extent_buffer_bitmap_set - set an area of a bitmap
6871 * @eb: the extent buffer
6872 * @start: offset of the bitmap item in the extent buffer
6873 * @pos: bit number of the first bit
6874 * @len: number of bits to set
6875 */
David Sterba2b489662020-04-29 03:04:10 +02006876void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006877 unsigned long pos, unsigned long len)
6878{
Omar Sandoval2fe1d552016-09-22 17:24:20 -07006879 u8 *kaddr;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006880 struct page *page;
6881 unsigned long i;
6882 size_t offset;
6883 const unsigned int size = pos + len;
6884 int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
Omar Sandoval2fe1d552016-09-22 17:24:20 -07006885 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006886
6887 eb_bitmap_offset(eb, start, pos, &i, &offset);
6888 page = eb->pages[i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006889 assert_eb_page_uptodate(eb, page);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006890 kaddr = page_address(page);
6891
6892 while (len >= bits_to_set) {
6893 kaddr[offset] |= mask_to_set;
6894 len -= bits_to_set;
6895 bits_to_set = BITS_PER_BYTE;
Dan Carpenter9c894692016-10-12 11:33:21 +03006896 mask_to_set = ~0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006897 if (++offset >= PAGE_SIZE && len > 0) {
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006898 offset = 0;
6899 page = eb->pages[++i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006900 assert_eb_page_uptodate(eb, page);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006901 kaddr = page_address(page);
6902 }
6903 }
6904 if (len) {
6905 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
6906 kaddr[offset] |= mask_to_set;
6907 }
6908}
6909
6910
6911/**
6912 * extent_buffer_bitmap_clear - clear an area of a bitmap
6913 * @eb: the extent buffer
6914 * @start: offset of the bitmap item in the extent buffer
6915 * @pos: bit number of the first bit
6916 * @len: number of bits to clear
6917 */
David Sterba2b489662020-04-29 03:04:10 +02006918void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
6919 unsigned long start, unsigned long pos,
6920 unsigned long len)
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006921{
Omar Sandoval2fe1d552016-09-22 17:24:20 -07006922 u8 *kaddr;
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006923 struct page *page;
6924 unsigned long i;
6925 size_t offset;
6926 const unsigned int size = pos + len;
6927 int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
Omar Sandoval2fe1d552016-09-22 17:24:20 -07006928 u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006929
6930 eb_bitmap_offset(eb, start, pos, &i, &offset);
6931 page = eb->pages[i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006932 assert_eb_page_uptodate(eb, page);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006933 kaddr = page_address(page);
6934
6935 while (len >= bits_to_clear) {
6936 kaddr[offset] &= ~mask_to_clear;
6937 len -= bits_to_clear;
6938 bits_to_clear = BITS_PER_BYTE;
Dan Carpenter9c894692016-10-12 11:33:21 +03006939 mask_to_clear = ~0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006940 if (++offset >= PAGE_SIZE && len > 0) {
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006941 offset = 0;
6942 page = eb->pages[++i];
Qu Wenruob8f95772021-03-25 15:14:42 +08006943 assert_eb_page_uptodate(eb, page);
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07006944 kaddr = page_address(page);
6945 }
6946 }
6947 if (len) {
6948 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
6949 kaddr[offset] &= ~mask_to_clear;
6950 }
6951}
6952
Sergei Trofimovich33872062011-04-11 21:52:52 +00006953static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
6954{
6955 unsigned long distance = (src > dst) ? src - dst : dst - src;
6956 return distance < len;
6957}
6958
Chris Masond1310b22008-01-24 16:13:08 -05006959static void copy_pages(struct page *dst_page, struct page *src_page,
6960 unsigned long dst_off, unsigned long src_off,
6961 unsigned long len)
6962{
Chris Masona6591712011-07-19 12:04:14 -04006963 char *dst_kaddr = page_address(dst_page);
Chris Masond1310b22008-01-24 16:13:08 -05006964 char *src_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04006965 int must_memmove = 0;
Chris Masond1310b22008-01-24 16:13:08 -05006966
Sergei Trofimovich33872062011-04-11 21:52:52 +00006967 if (dst_page != src_page) {
Chris Masona6591712011-07-19 12:04:14 -04006968 src_kaddr = page_address(src_page);
Sergei Trofimovich33872062011-04-11 21:52:52 +00006969 } else {
Chris Masond1310b22008-01-24 16:13:08 -05006970 src_kaddr = dst_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04006971 if (areas_overlap(src_off, dst_off, len))
6972 must_memmove = 1;
Sergei Trofimovich33872062011-04-11 21:52:52 +00006973 }
Chris Masond1310b22008-01-24 16:13:08 -05006974
Chris Mason727011e2010-08-06 13:21:20 -04006975 if (must_memmove)
6976 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
6977 else
6978 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
Chris Masond1310b22008-01-24 16:13:08 -05006979}
6980
David Sterba2b489662020-04-29 03:04:10 +02006981void memcpy_extent_buffer(const struct extent_buffer *dst,
6982 unsigned long dst_offset, unsigned long src_offset,
6983 unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05006984{
6985 size_t cur;
6986 size_t dst_off_in_page;
6987 size_t src_off_in_page;
Chris Masond1310b22008-01-24 16:13:08 -05006988 unsigned long dst_i;
6989 unsigned long src_i;
6990
Qu Wenruof98b6212020-08-19 14:35:47 +08006991 if (check_eb_range(dst, dst_offset, len) ||
6992 check_eb_range(dst, src_offset, len))
6993 return;
Chris Masond1310b22008-01-24 16:13:08 -05006994
Chris Masond3977122009-01-05 21:25:51 -05006995 while (len > 0) {
Qu Wenruo884b07d2020-12-02 14:48:04 +08006996 dst_off_in_page = get_eb_offset_in_page(dst, dst_offset);
6997 src_off_in_page = get_eb_offset_in_page(dst, src_offset);
Chris Masond1310b22008-01-24 16:13:08 -05006998
Qu Wenruo884b07d2020-12-02 14:48:04 +08006999 dst_i = get_eb_page_index(dst_offset);
7000 src_i = get_eb_page_index(src_offset);
Chris Masond1310b22008-01-24 16:13:08 -05007001
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03007002 cur = min(len, (unsigned long)(PAGE_SIZE -
Chris Masond1310b22008-01-24 16:13:08 -05007003 src_off_in_page));
7004 cur = min_t(unsigned long, cur,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03007005 (unsigned long)(PAGE_SIZE - dst_off_in_page));
Chris Masond1310b22008-01-24 16:13:08 -05007006
David Sterbafb85fc92014-07-31 01:03:53 +02007007 copy_pages(dst->pages[dst_i], dst->pages[src_i],
Chris Masond1310b22008-01-24 16:13:08 -05007008 dst_off_in_page, src_off_in_page, cur);
7009
7010 src_offset += cur;
7011 dst_offset += cur;
7012 len -= cur;
7013 }
7014}
Chris Masond1310b22008-01-24 16:13:08 -05007015
David Sterba2b489662020-04-29 03:04:10 +02007016void memmove_extent_buffer(const struct extent_buffer *dst,
7017 unsigned long dst_offset, unsigned long src_offset,
7018 unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05007019{
7020 size_t cur;
7021 size_t dst_off_in_page;
7022 size_t src_off_in_page;
7023 unsigned long dst_end = dst_offset + len - 1;
7024 unsigned long src_end = src_offset + len - 1;
Chris Masond1310b22008-01-24 16:13:08 -05007025 unsigned long dst_i;
7026 unsigned long src_i;
7027
Qu Wenruof98b6212020-08-19 14:35:47 +08007028 if (check_eb_range(dst, dst_offset, len) ||
7029 check_eb_range(dst, src_offset, len))
7030 return;
Chris Mason727011e2010-08-06 13:21:20 -04007031 if (dst_offset < src_offset) {
Chris Masond1310b22008-01-24 16:13:08 -05007032 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
7033 return;
7034 }
Chris Masond3977122009-01-05 21:25:51 -05007035 while (len > 0) {
Qu Wenruo884b07d2020-12-02 14:48:04 +08007036 dst_i = get_eb_page_index(dst_end);
7037 src_i = get_eb_page_index(src_end);
Chris Masond1310b22008-01-24 16:13:08 -05007038
Qu Wenruo884b07d2020-12-02 14:48:04 +08007039 dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
7040 src_off_in_page = get_eb_offset_in_page(dst, src_end);
Chris Masond1310b22008-01-24 16:13:08 -05007041
7042 cur = min_t(unsigned long, len, src_off_in_page + 1);
7043 cur = min(cur, dst_off_in_page + 1);
David Sterbafb85fc92014-07-31 01:03:53 +02007044 copy_pages(dst->pages[dst_i], dst->pages[src_i],
Chris Masond1310b22008-01-24 16:13:08 -05007045 dst_off_in_page - cur + 1,
7046 src_off_in_page - cur + 1, cur);
7047
7048 dst_end -= cur;
7049 src_end -= cur;
7050 len -= cur;
7051 }
7052}
Chris Mason6af118ce2008-07-22 11:18:07 -04007053
Qu Wenruod1e86e32021-01-26 16:33:56 +08007054static struct extent_buffer *get_next_extent_buffer(
7055 struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
7056{
7057 struct extent_buffer *gang[BTRFS_SUBPAGE_BITMAP_SIZE];
7058 struct extent_buffer *found = NULL;
7059 u64 page_start = page_offset(page);
7060 int ret;
7061 int i;
7062
7063 ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
7064 ASSERT(PAGE_SIZE / fs_info->nodesize <= BTRFS_SUBPAGE_BITMAP_SIZE);
7065 lockdep_assert_held(&fs_info->buffer_lock);
7066
7067 ret = radix_tree_gang_lookup(&fs_info->buffer_radix, (void **)gang,
7068 bytenr >> fs_info->sectorsize_bits,
7069 PAGE_SIZE / fs_info->nodesize);
7070 for (i = 0; i < ret; i++) {
7071 /* Already beyond page end */
7072 if (gang[i]->start >= page_start + PAGE_SIZE)
7073 break;
7074 /* Found one */
7075 if (gang[i]->start >= bytenr) {
7076 found = gang[i];
7077 break;
7078 }
7079 }
7080 return found;
7081}
7082
7083static int try_release_subpage_extent_buffer(struct page *page)
7084{
7085 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
7086 u64 cur = page_offset(page);
7087 const u64 end = page_offset(page) + PAGE_SIZE;
7088 int ret;
7089
7090 while (cur < end) {
7091 struct extent_buffer *eb = NULL;
7092
7093 /*
7094 * Unlike try_release_extent_buffer() which uses page->private
7095 * to grab buffer, for subpage case we rely on radix tree, thus
7096 * we need to ensure radix tree consistency.
7097 *
7098 * We also want an atomic snapshot of the radix tree, thus go
7099 * with spinlock rather than RCU.
7100 */
7101 spin_lock(&fs_info->buffer_lock);
7102 eb = get_next_extent_buffer(fs_info, page, cur);
7103 if (!eb) {
7104 /* No more eb in the page range after or at cur */
7105 spin_unlock(&fs_info->buffer_lock);
7106 break;
7107 }
7108 cur = eb->start + eb->len;
7109
7110 /*
7111 * The same as try_release_extent_buffer(), to ensure the eb
7112 * won't disappear out from under us.
7113 */
7114 spin_lock(&eb->refs_lock);
7115 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
7116 spin_unlock(&eb->refs_lock);
7117 spin_unlock(&fs_info->buffer_lock);
7118 break;
7119 }
7120 spin_unlock(&fs_info->buffer_lock);
7121
7122 /*
7123 * If tree ref isn't set then we know the ref on this eb is a
7124 * real ref, so just return, this eb will likely be freed soon
7125 * anyway.
7126 */
7127 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
7128 spin_unlock(&eb->refs_lock);
7129 break;
7130 }
7131
7132 /*
7133 * Here we don't care about the return value, we will always
7134 * check the page private at the end. And
7135 * release_extent_buffer() will release the refs_lock.
7136 */
7137 release_extent_buffer(eb);
7138 }
7139 /*
7140 * Finally to check if we have cleared page private, as if we have
7141 * released all ebs in the page, the page private should be cleared now.
7142 */
7143 spin_lock(&page->mapping->private_lock);
7144 if (!PagePrivate(page))
7145 ret = 1;
7146 else
7147 ret = 0;
7148 spin_unlock(&page->mapping->private_lock);
7149 return ret;
7150
7151}
7152
David Sterbaf7a52a42013-04-26 14:56:29 +00007153int try_release_extent_buffer(struct page *page)
Miao Xie19fe0a82010-10-26 20:57:29 -04007154{
Chris Mason6af118ce2008-07-22 11:18:07 -04007155 struct extent_buffer *eb;
Miao Xie897ca6e92010-10-26 20:57:29 -04007156
Qu Wenruod1e86e32021-01-26 16:33:56 +08007157 if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
7158 return try_release_subpage_extent_buffer(page);
7159
Miao Xie19fe0a82010-10-26 20:57:29 -04007160 /*
Qu Wenruod1e86e32021-01-26 16:33:56 +08007161 * We need to make sure nobody is changing page->private, as we rely on
7162 * page->private as the pointer to extent buffer.
Miao Xie19fe0a82010-10-26 20:57:29 -04007163 */
Josef Bacik3083ee22012-03-09 16:01:49 -05007164 spin_lock(&page->mapping->private_lock);
7165 if (!PagePrivate(page)) {
7166 spin_unlock(&page->mapping->private_lock);
7167 return 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04007168 }
7169
Josef Bacik3083ee22012-03-09 16:01:49 -05007170 eb = (struct extent_buffer *)page->private;
7171 BUG_ON(!eb);
Miao Xie19fe0a82010-10-26 20:57:29 -04007172
Josef Bacik0b32f4b2012-03-13 09:38:00 -04007173 /*
Josef Bacik3083ee22012-03-09 16:01:49 -05007174 * This is a little awful but should be ok, we need to make sure that
7175 * the eb doesn't disappear out from under us while we're looking at
7176 * this page.
7177 */
7178 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04007179 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
Josef Bacik3083ee22012-03-09 16:01:49 -05007180 spin_unlock(&eb->refs_lock);
7181 spin_unlock(&page->mapping->private_lock);
7182 return 0;
7183 }
7184 spin_unlock(&page->mapping->private_lock);
7185
Josef Bacik3083ee22012-03-09 16:01:49 -05007186 /*
7187 * If tree ref isn't set then we know the ref on this eb is a real ref,
7188 * so just return, this page will likely be freed soon anyway.
7189 */
7190 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
7191 spin_unlock(&eb->refs_lock);
7192 return 0;
7193 }
Josef Bacik3083ee22012-03-09 16:01:49 -05007194
David Sterbaf7a52a42013-04-26 14:56:29 +00007195 return release_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04007196}
Josef Bacikbfb484d2020-11-05 10:45:09 -05007197
7198/*
7199 * btrfs_readahead_tree_block - attempt to readahead a child block
7200 * @fs_info: the fs_info
7201 * @bytenr: bytenr to read
Josef Bacik3fbaf252020-11-05 10:45:20 -05007202 * @owner_root: objectid of the root that owns this eb
Josef Bacikbfb484d2020-11-05 10:45:09 -05007203 * @gen: generation for the uptodate check, can be 0
Josef Bacik3fbaf252020-11-05 10:45:20 -05007204 * @level: level for the eb
Josef Bacikbfb484d2020-11-05 10:45:09 -05007205 *
7206 * Attempt to readahead a tree block at @bytenr. If @gen is 0 then we do a
7207 * normal uptodate check of the eb, without checking the generation. If we have
7208 * to read the block we will not block on anything.
7209 */
7210void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
Josef Bacik3fbaf252020-11-05 10:45:20 -05007211 u64 bytenr, u64 owner_root, u64 gen, int level)
Josef Bacikbfb484d2020-11-05 10:45:09 -05007212{
7213 struct extent_buffer *eb;
7214 int ret;
7215
Josef Bacik3fbaf252020-11-05 10:45:20 -05007216 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
Josef Bacikbfb484d2020-11-05 10:45:09 -05007217 if (IS_ERR(eb))
7218 return;
7219
7220 if (btrfs_buffer_uptodate(eb, gen, 1)) {
7221 free_extent_buffer(eb);
7222 return;
7223 }
7224
7225 ret = read_extent_buffer_pages(eb, WAIT_NONE, 0);
7226 if (ret < 0)
7227 free_extent_buffer_stale(eb);
7228 else
7229 free_extent_buffer(eb);
7230}
7231
7232/*
7233 * btrfs_readahead_node_child - readahead a node's child block
7234 * @node: parent node we're reading from
7235 * @slot: slot in the parent node for the child we want to read
7236 *
7237 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
7238 * the slot in the node provided.
7239 */
7240void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
7241{
7242 btrfs_readahead_tree_block(node->fs_info,
7243 btrfs_node_blockptr(node, slot),
Josef Bacik3fbaf252020-11-05 10:45:20 -05007244 btrfs_header_owner(node),
7245 btrfs_node_ptr_generation(node, slot),
7246 btrfs_header_level(node) - 1);
Josef Bacikbfb484d2020-11-05 10:45:09 -05007247}