blob: 864c08d08a353452edc957e75932cb91b2c19538 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason6cbd5572007-06-12 09:07:21 -04002/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
Chris Mason6cbd5572007-06-12 09:07:21 -04004 */
5
Chris Mason39279cc2007-06-12 06:35:45 -04006#include <linux/fs.h>
7#include <linux/pagemap.h>
Chris Mason39279cc2007-06-12 06:35:45 -04008#include <linux/time.h>
9#include <linux/init.h>
10#include <linux/string.h>
Chris Mason39279cc2007-06-12 06:35:45 -040011#include <linux/backing-dev.h>
Christoph Hellwig2fe17c12011-01-14 13:07:43 +010012#include <linux/falloc.h>
Chris Mason39279cc2007-06-12 06:35:45 -040013#include <linux/writeback.h>
Chris Mason39279cc2007-06-12 06:35:45 -040014#include <linux/compat.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Filipe Brandenburger55e301f2013-01-29 06:04:50 +000016#include <linux/btrfs.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080017#include <linux/uio.h>
Jeff Laytonae5e1652018-01-29 06:41:30 -050018#include <linux/iversion.h>
Chris Mason39279cc2007-06-12 06:35:45 -040019#include "ctree.h"
20#include "disk-io.h"
21#include "transaction.h"
22#include "btrfs_inode.h"
Chris Mason39279cc2007-06-12 06:35:45 -040023#include "print-tree.h"
Chris Masone02119d2008-09-05 16:13:11 -040024#include "tree-log.h"
25#include "locking.h"
Josef Bacik2aaa6652012-08-29 14:27:18 -040026#include "volumes.h"
Josef Bacikfcebe452014-05-13 17:30:47 -070027#include "qgroup.h"
Anand Jainebb87652016-03-10 17:26:59 +080028#include "compression.h"
Josef Bacik86736342019-06-19 15:12:00 -040029#include "delalloc-space.h"
Filipe Manana6a177382020-02-28 13:04:17 +000030#include "reflink.h"
Chris Mason39279cc2007-06-12 06:35:45 -040031
Miao Xie9247f312012-11-26 09:24:43 +000032static struct kmem_cache *btrfs_inode_defrag_cachep;
Chris Mason4cb53002011-05-24 15:35:30 -040033/*
34 * when auto defrag is enabled we
35 * queue up these defrag structs to remember which
36 * inodes need defragging passes
37 */
38struct inode_defrag {
39 struct rb_node rb_node;
40 /* objectid */
41 u64 ino;
42 /*
43 * transid where the defrag was added, we search for
44 * extents newer than this
45 */
46 u64 transid;
47
48 /* root objectid */
49 u64 root;
50
51 /* last offset we were able to defrag */
52 u64 last_offset;
53
54 /* if we've wrapped around back to zero once already */
55 int cycled;
56};
57
Miao Xie762f2262012-05-24 18:58:27 +080058static int __compare_inode_defrag(struct inode_defrag *defrag1,
59 struct inode_defrag *defrag2)
60{
61 if (defrag1->root > defrag2->root)
62 return 1;
63 else if (defrag1->root < defrag2->root)
64 return -1;
65 else if (defrag1->ino > defrag2->ino)
66 return 1;
67 else if (defrag1->ino < defrag2->ino)
68 return -1;
69 else
70 return 0;
71}
72
Chris Mason4cb53002011-05-24 15:35:30 -040073/* pop a record for an inode into the defrag tree. The lock
74 * must be held already
75 *
76 * If you're inserting a record for an older transid than an
77 * existing record, the transid already in the tree is lowered
78 *
79 * If an existing record is found the defrag item you
80 * pass in is freed
81 */
Nikolay Borisov6158e1c2017-02-20 13:50:43 +020082static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
Chris Mason4cb53002011-05-24 15:35:30 -040083 struct inode_defrag *defrag)
84{
David Sterba3ffbd682018-06-29 10:56:42 +020085 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Chris Mason4cb53002011-05-24 15:35:30 -040086 struct inode_defrag *entry;
87 struct rb_node **p;
88 struct rb_node *parent = NULL;
Miao Xie762f2262012-05-24 18:58:27 +080089 int ret;
Chris Mason4cb53002011-05-24 15:35:30 -040090
Jeff Mahoney0b246af2016-06-22 18:54:23 -040091 p = &fs_info->defrag_inodes.rb_node;
Chris Mason4cb53002011-05-24 15:35:30 -040092 while (*p) {
93 parent = *p;
94 entry = rb_entry(parent, struct inode_defrag, rb_node);
95
Miao Xie762f2262012-05-24 18:58:27 +080096 ret = __compare_inode_defrag(defrag, entry);
97 if (ret < 0)
Chris Mason4cb53002011-05-24 15:35:30 -040098 p = &parent->rb_left;
Miao Xie762f2262012-05-24 18:58:27 +080099 else if (ret > 0)
Chris Mason4cb53002011-05-24 15:35:30 -0400100 p = &parent->rb_right;
101 else {
102 /* if we're reinserting an entry for
103 * an old defrag run, make sure to
104 * lower the transid of our existing record
105 */
106 if (defrag->transid < entry->transid)
107 entry->transid = defrag->transid;
108 if (defrag->last_offset > entry->last_offset)
109 entry->last_offset = defrag->last_offset;
Miao Xie8ddc4732012-11-26 09:25:38 +0000110 return -EEXIST;
Chris Mason4cb53002011-05-24 15:35:30 -0400111 }
112 }
Nikolay Borisov6158e1c2017-02-20 13:50:43 +0200113 set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
Chris Mason4cb53002011-05-24 15:35:30 -0400114 rb_link_node(&defrag->rb_node, parent, p);
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400115 rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
Miao Xie8ddc4732012-11-26 09:25:38 +0000116 return 0;
117}
Chris Mason4cb53002011-05-24 15:35:30 -0400118
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400119static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
Miao Xie8ddc4732012-11-26 09:25:38 +0000120{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400121 if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
Miao Xie8ddc4732012-11-26 09:25:38 +0000122 return 0;
Chris Mason4cb53002011-05-24 15:35:30 -0400123
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400124 if (btrfs_fs_closing(fs_info))
Miao Xie8ddc4732012-11-26 09:25:38 +0000125 return 0;
126
127 return 1;
Chris Mason4cb53002011-05-24 15:35:30 -0400128}
129
130/*
131 * insert a defrag record for this inode if auto defrag is
132 * enabled
133 */
134int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
Nikolay Borisov6158e1c2017-02-20 13:50:43 +0200135 struct btrfs_inode *inode)
Chris Mason4cb53002011-05-24 15:35:30 -0400136{
Nikolay Borisov6158e1c2017-02-20 13:50:43 +0200137 struct btrfs_root *root = inode->root;
David Sterba3ffbd682018-06-29 10:56:42 +0200138 struct btrfs_fs_info *fs_info = root->fs_info;
Chris Mason4cb53002011-05-24 15:35:30 -0400139 struct inode_defrag *defrag;
Chris Mason4cb53002011-05-24 15:35:30 -0400140 u64 transid;
Miao Xie8ddc4732012-11-26 09:25:38 +0000141 int ret;
Chris Mason4cb53002011-05-24 15:35:30 -0400142
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400143 if (!__need_auto_defrag(fs_info))
Chris Mason4cb53002011-05-24 15:35:30 -0400144 return 0;
145
Nikolay Borisov6158e1c2017-02-20 13:50:43 +0200146 if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
Chris Mason4cb53002011-05-24 15:35:30 -0400147 return 0;
148
149 if (trans)
150 transid = trans->transid;
151 else
Nikolay Borisov6158e1c2017-02-20 13:50:43 +0200152 transid = inode->root->last_trans;
Chris Mason4cb53002011-05-24 15:35:30 -0400153
Miao Xie9247f312012-11-26 09:24:43 +0000154 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
Chris Mason4cb53002011-05-24 15:35:30 -0400155 if (!defrag)
156 return -ENOMEM;
157
Nikolay Borisov6158e1c2017-02-20 13:50:43 +0200158 defrag->ino = btrfs_ino(inode);
Chris Mason4cb53002011-05-24 15:35:30 -0400159 defrag->transid = transid;
160 defrag->root = root->root_key.objectid;
161
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400162 spin_lock(&fs_info->defrag_inodes_lock);
Nikolay Borisov6158e1c2017-02-20 13:50:43 +0200163 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
Miao Xie8ddc4732012-11-26 09:25:38 +0000164 /*
165 * If we set IN_DEFRAG flag and evict the inode from memory,
166 * and then re-read this inode, this new inode doesn't have
167 * IN_DEFRAG flag. At the case, we may find the existed defrag.
168 */
169 ret = __btrfs_add_inode_defrag(inode, defrag);
170 if (ret)
171 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
172 } else {
Miao Xie9247f312012-11-26 09:24:43 +0000173 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
Miao Xie8ddc4732012-11-26 09:25:38 +0000174 }
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400175 spin_unlock(&fs_info->defrag_inodes_lock);
Wanlong Gaoa0f98dd2011-07-18 12:19:35 +0000176 return 0;
Chris Mason4cb53002011-05-24 15:35:30 -0400177}
178
179/*
Miao Xie8ddc4732012-11-26 09:25:38 +0000180 * Requeue the defrag object. If there is a defrag object that points to
181 * the same inode in the tree, we will merge them together (by
182 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
Chris Mason4cb53002011-05-24 15:35:30 -0400183 */
Nikolay Borisov46e59792017-02-20 13:50:44 +0200184static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
Eric Sandeen48a3b632013-04-25 20:41:01 +0000185 struct inode_defrag *defrag)
Miao Xie8ddc4732012-11-26 09:25:38 +0000186{
David Sterba3ffbd682018-06-29 10:56:42 +0200187 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Miao Xie8ddc4732012-11-26 09:25:38 +0000188 int ret;
189
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400190 if (!__need_auto_defrag(fs_info))
Miao Xie8ddc4732012-11-26 09:25:38 +0000191 goto out;
192
193 /*
194 * Here we don't check the IN_DEFRAG flag, because we need merge
195 * them together.
196 */
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400197 spin_lock(&fs_info->defrag_inodes_lock);
Miao Xie8ddc4732012-11-26 09:25:38 +0000198 ret = __btrfs_add_inode_defrag(inode, defrag);
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400199 spin_unlock(&fs_info->defrag_inodes_lock);
Miao Xie8ddc4732012-11-26 09:25:38 +0000200 if (ret)
201 goto out;
202 return;
203out:
204 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
205}
206
207/*
Miao Xie26176e72012-11-26 09:26:20 +0000208 * pick the defragable inode that we want, if it doesn't exist, we will get
209 * the next one.
Chris Mason4cb53002011-05-24 15:35:30 -0400210 */
Miao Xie26176e72012-11-26 09:26:20 +0000211static struct inode_defrag *
212btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
Chris Mason4cb53002011-05-24 15:35:30 -0400213{
214 struct inode_defrag *entry = NULL;
Miao Xie762f2262012-05-24 18:58:27 +0800215 struct inode_defrag tmp;
Chris Mason4cb53002011-05-24 15:35:30 -0400216 struct rb_node *p;
217 struct rb_node *parent = NULL;
Miao Xie762f2262012-05-24 18:58:27 +0800218 int ret;
219
220 tmp.ino = ino;
221 tmp.root = root;
Chris Mason4cb53002011-05-24 15:35:30 -0400222
Miao Xie26176e72012-11-26 09:26:20 +0000223 spin_lock(&fs_info->defrag_inodes_lock);
224 p = fs_info->defrag_inodes.rb_node;
Chris Mason4cb53002011-05-24 15:35:30 -0400225 while (p) {
226 parent = p;
227 entry = rb_entry(parent, struct inode_defrag, rb_node);
228
Miao Xie762f2262012-05-24 18:58:27 +0800229 ret = __compare_inode_defrag(&tmp, entry);
230 if (ret < 0)
Chris Mason4cb53002011-05-24 15:35:30 -0400231 p = parent->rb_left;
Miao Xie762f2262012-05-24 18:58:27 +0800232 else if (ret > 0)
Chris Mason4cb53002011-05-24 15:35:30 -0400233 p = parent->rb_right;
234 else
Miao Xie26176e72012-11-26 09:26:20 +0000235 goto out;
Chris Mason4cb53002011-05-24 15:35:30 -0400236 }
237
Miao Xie26176e72012-11-26 09:26:20 +0000238 if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
239 parent = rb_next(parent);
240 if (parent)
Chris Mason4cb53002011-05-24 15:35:30 -0400241 entry = rb_entry(parent, struct inode_defrag, rb_node);
Miao Xie26176e72012-11-26 09:26:20 +0000242 else
243 entry = NULL;
Chris Mason4cb53002011-05-24 15:35:30 -0400244 }
Miao Xie26176e72012-11-26 09:26:20 +0000245out:
246 if (entry)
247 rb_erase(parent, &fs_info->defrag_inodes);
248 spin_unlock(&fs_info->defrag_inodes_lock);
249 return entry;
250}
251
252void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
253{
254 struct inode_defrag *defrag;
255 struct rb_node *node;
256
257 spin_lock(&fs_info->defrag_inodes_lock);
258 node = rb_first(&fs_info->defrag_inodes);
259 while (node) {
260 rb_erase(node, &fs_info->defrag_inodes);
261 defrag = rb_entry(node, struct inode_defrag, rb_node);
262 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
263
David Sterba351810c2015-01-08 15:20:54 +0100264 cond_resched_lock(&fs_info->defrag_inodes_lock);
Miao Xie26176e72012-11-26 09:26:20 +0000265
266 node = rb_first(&fs_info->defrag_inodes);
267 }
268 spin_unlock(&fs_info->defrag_inodes_lock);
269}
270
271#define BTRFS_DEFRAG_BATCH 1024
272
273static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
274 struct inode_defrag *defrag)
275{
276 struct btrfs_root *inode_root;
277 struct inode *inode;
Miao Xie26176e72012-11-26 09:26:20 +0000278 struct btrfs_ioctl_defrag_range_args range;
279 int num_defrag;
Liu Bo6f1c3602013-01-29 03:22:10 +0000280 int ret;
Miao Xie26176e72012-11-26 09:26:20 +0000281
282 /* get the inode */
David Sterba56e93572020-05-15 19:35:55 +0200283 inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
Miao Xie26176e72012-11-26 09:26:20 +0000284 if (IS_ERR(inode_root)) {
Liu Bo6f1c3602013-01-29 03:22:10 +0000285 ret = PTR_ERR(inode_root);
286 goto cleanup;
287 }
Miao Xie26176e72012-11-26 09:26:20 +0000288
David Sterba0202e832020-05-15 19:35:59 +0200289 inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
Josef Bacik00246522020-01-24 09:33:01 -0500290 btrfs_put_root(inode_root);
Miao Xie26176e72012-11-26 09:26:20 +0000291 if (IS_ERR(inode)) {
Liu Bo6f1c3602013-01-29 03:22:10 +0000292 ret = PTR_ERR(inode);
293 goto cleanup;
Miao Xie26176e72012-11-26 09:26:20 +0000294 }
295
296 /* do a chunk of defrag */
297 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
298 memset(&range, 0, sizeof(range));
299 range.len = (u64)-1;
300 range.start = defrag->last_offset;
Miao Xieb66f00d2012-11-26 09:27:29 +0000301
302 sb_start_write(fs_info->sb);
Miao Xie26176e72012-11-26 09:26:20 +0000303 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
304 BTRFS_DEFRAG_BATCH);
Miao Xieb66f00d2012-11-26 09:27:29 +0000305 sb_end_write(fs_info->sb);
Miao Xie26176e72012-11-26 09:26:20 +0000306 /*
307 * if we filled the whole defrag batch, there
308 * must be more work to do. Queue this defrag
309 * again
310 */
311 if (num_defrag == BTRFS_DEFRAG_BATCH) {
312 defrag->last_offset = range.start;
Nikolay Borisov46e59792017-02-20 13:50:44 +0200313 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
Miao Xie26176e72012-11-26 09:26:20 +0000314 } else if (defrag->last_offset && !defrag->cycled) {
315 /*
316 * we didn't fill our defrag batch, but
317 * we didn't start at zero. Make sure we loop
318 * around to the start of the file.
319 */
320 defrag->last_offset = 0;
321 defrag->cycled = 1;
Nikolay Borisov46e59792017-02-20 13:50:44 +0200322 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
Miao Xie26176e72012-11-26 09:26:20 +0000323 } else {
324 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
325 }
326
327 iput(inode);
328 return 0;
Liu Bo6f1c3602013-01-29 03:22:10 +0000329cleanup:
Liu Bo6f1c3602013-01-29 03:22:10 +0000330 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
331 return ret;
Chris Mason4cb53002011-05-24 15:35:30 -0400332}
333
334/*
335 * run through the list of inodes in the FS that need
336 * defragging
337 */
338int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
339{
340 struct inode_defrag *defrag;
Chris Mason4cb53002011-05-24 15:35:30 -0400341 u64 first_ino = 0;
Miao Xie762f2262012-05-24 18:58:27 +0800342 u64 root_objectid = 0;
Chris Mason4cb53002011-05-24 15:35:30 -0400343
344 atomic_inc(&fs_info->defrag_running);
Dulshani Gunawardhana67871252013-10-31 10:33:04 +0530345 while (1) {
Miao Xiedc81cdc2013-02-20 23:32:52 -0700346 /* Pause the auto defragger. */
347 if (test_bit(BTRFS_FS_STATE_REMOUNTING,
348 &fs_info->fs_state))
349 break;
350
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400351 if (!__need_auto_defrag(fs_info))
Miao Xie26176e72012-11-26 09:26:20 +0000352 break;
Chris Mason4cb53002011-05-24 15:35:30 -0400353
354 /* find an inode to defrag */
Miao Xie26176e72012-11-26 09:26:20 +0000355 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
356 first_ino);
Chris Mason4cb53002011-05-24 15:35:30 -0400357 if (!defrag) {
Miao Xie26176e72012-11-26 09:26:20 +0000358 if (root_objectid || first_ino) {
Miao Xie762f2262012-05-24 18:58:27 +0800359 root_objectid = 0;
Chris Mason4cb53002011-05-24 15:35:30 -0400360 first_ino = 0;
361 continue;
362 } else {
363 break;
364 }
365 }
366
Chris Mason4cb53002011-05-24 15:35:30 -0400367 first_ino = defrag->ino + 1;
Miao Xie762f2262012-05-24 18:58:27 +0800368 root_objectid = defrag->root;
Chris Mason4cb53002011-05-24 15:35:30 -0400369
Miao Xie26176e72012-11-26 09:26:20 +0000370 __btrfs_run_defrag_inode(fs_info, defrag);
Chris Mason4cb53002011-05-24 15:35:30 -0400371 }
Chris Mason4cb53002011-05-24 15:35:30 -0400372 atomic_dec(&fs_info->defrag_running);
373
374 /*
375 * during unmount, we use the transaction_wait queue to
376 * wait for the defragger to stop
377 */
378 wake_up(&fs_info->transaction_wait);
379 return 0;
380}
Chris Mason39279cc2007-06-12 06:35:45 -0400381
Chris Masond352ac62008-09-29 15:18:18 -0400382/* simple helper to fault in pages and copy. This should go away
383 * and be replaced with calls into generic code.
384 */
Zhao Leiee22f0c2016-01-06 18:47:31 +0800385static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
Chris Masona1b32a52008-09-05 16:09:51 -0400386 struct page **prepared_pages,
Josef Bacik11c65dc2010-05-23 11:07:21 -0400387 struct iov_iter *i)
Chris Mason39279cc2007-06-12 06:35:45 -0400388{
Xin Zhong914ee292010-12-09 09:30:14 +0000389 size_t copied = 0;
Josef Bacikd0215f32011-01-25 14:57:24 -0500390 size_t total_copied = 0;
Josef Bacik11c65dc2010-05-23 11:07:21 -0400391 int pg = 0;
Johannes Thumshirn70730172018-12-05 15:23:03 +0100392 int offset = offset_in_page(pos);
Chris Mason39279cc2007-06-12 06:35:45 -0400393
Josef Bacik11c65dc2010-05-23 11:07:21 -0400394 while (write_bytes > 0) {
Chris Mason39279cc2007-06-12 06:35:45 -0400395 size_t count = min_t(size_t,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300396 PAGE_SIZE - offset, write_bytes);
Josef Bacik11c65dc2010-05-23 11:07:21 -0400397 struct page *page = prepared_pages[pg];
Xin Zhong914ee292010-12-09 09:30:14 +0000398 /*
399 * Copy data from userspace to the current page
Xin Zhong914ee292010-12-09 09:30:14 +0000400 */
Xin Zhong914ee292010-12-09 09:30:14 +0000401 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
Josef Bacik11c65dc2010-05-23 11:07:21 -0400402
Chris Mason39279cc2007-06-12 06:35:45 -0400403 /* Flush processor's dcache for this page */
404 flush_dcache_page(page);
Chris Mason31339ac2011-03-07 11:10:24 -0500405
406 /*
407 * if we get a partial write, we can end up with
408 * partially up to date pages. These add
409 * a lot of complexity, so make sure they don't
410 * happen by forcing this copy to be retried.
411 *
412 * The rest of the btrfs_file_write code will fall
413 * back to page at a time copies after we return 0.
414 */
415 if (!PageUptodate(page) && copied < count)
416 copied = 0;
417
Josef Bacik11c65dc2010-05-23 11:07:21 -0400418 iov_iter_advance(i, copied);
419 write_bytes -= copied;
Xin Zhong914ee292010-12-09 09:30:14 +0000420 total_copied += copied;
Chris Mason39279cc2007-06-12 06:35:45 -0400421
Al Virob30ac0f2014-04-03 14:29:04 -0400422 /* Return to btrfs_file_write_iter to fault page */
Josef Bacik9f570b82011-01-25 12:42:37 -0500423 if (unlikely(copied == 0))
Xin Zhong914ee292010-12-09 09:30:14 +0000424 break;
Josef Bacik11c65dc2010-05-23 11:07:21 -0400425
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300426 if (copied < PAGE_SIZE - offset) {
Josef Bacik11c65dc2010-05-23 11:07:21 -0400427 offset += copied;
428 } else {
429 pg++;
430 offset = 0;
431 }
Chris Mason39279cc2007-06-12 06:35:45 -0400432 }
Xin Zhong914ee292010-12-09 09:30:14 +0000433 return total_copied;
Chris Mason39279cc2007-06-12 06:35:45 -0400434}
435
Chris Masond352ac62008-09-29 15:18:18 -0400436/*
437 * unlocks pages after btrfs_file_write is done with them
438 */
Eric Sandeen48a3b632013-04-25 20:41:01 +0000439static void btrfs_drop_pages(struct page **pages, size_t num_pages)
Chris Mason39279cc2007-06-12 06:35:45 -0400440{
441 size_t i;
442 for (i = 0; i < num_pages; i++) {
Chris Masond352ac62008-09-29 15:18:18 -0400443 /* page checked is some magic around finding pages that
444 * have been modified without going through btrfs_set_page_dirty
Mel Gorman2457aec2014-06-04 16:10:31 -0700445 * clear it here. There should be no need to mark the pages
446 * accessed as prepare_pages should have marked them accessed
447 * in prepare_pages via find_or_create_page()
Chris Masond352ac62008-09-29 15:18:18 -0400448 */
Chris Mason4a096752008-07-21 10:29:44 -0400449 ClearPageChecked(pages[i]);
Chris Mason39279cc2007-06-12 06:35:45 -0400450 unlock_page(pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300451 put_page(pages[i]);
Chris Mason39279cc2007-06-12 06:35:45 -0400452 }
453}
454
Chris Masond352ac62008-09-29 15:18:18 -0400455/*
Qu Wenruoc0fab482021-01-06 09:01:42 +0800456 * After btrfs_copy_from_user(), update the following things for delalloc:
457 * - Mark newly dirtied pages as DELALLOC in the io tree.
458 * Used to advise which range is to be written back.
459 * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
460 * - Update inode size for past EOF write
Chris Masond352ac62008-09-29 15:18:18 -0400461 */
Nikolay Borisov088545f2020-06-03 08:55:36 +0300462int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400463 size_t num_pages, loff_t pos, size_t write_bytes,
Goldwyn Rodriguesaa8c1a42020-10-14 09:55:45 -0500464 struct extent_state **cached, bool noreserve)
Chris Mason39279cc2007-06-12 06:35:45 -0400465{
Nikolay Borisov088545f2020-06-03 08:55:36 +0300466 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Chris Mason39279cc2007-06-12 06:35:45 -0400467 int err = 0;
Chris Masona52d9a82007-08-27 16:49:44 -0400468 int i;
Chris Masondb945352007-10-15 16:15:53 -0400469 u64 num_bytes;
Chris Masona52d9a82007-08-27 16:49:44 -0400470 u64 start_pos;
471 u64 end_of_last_block;
472 u64 end_pos = pos + write_bytes;
Nikolay Borisov088545f2020-06-03 08:55:36 +0300473 loff_t isize = i_size_read(&inode->vfs_inode);
Filipe Mananae3b8a482017-11-04 00:16:59 +0000474 unsigned int extra_bits = 0;
Chris Mason39279cc2007-06-12 06:35:45 -0400475
Goldwyn Rodriguesaa8c1a42020-10-14 09:55:45 -0500476 if (write_bytes == 0)
477 return 0;
478
479 if (noreserve)
480 extra_bits |= EXTENT_NORESERVE;
481
Goldwyn Rodrigues13f0dd82020-10-14 09:55:44 -0500482 start_pos = round_down(pos, fs_info->sectorsize);
Jeff Mahoneyda170662016-06-15 09:22:56 -0400483 num_bytes = round_up(write_bytes + pos - start_pos,
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400484 fs_info->sectorsize);
Chris Mason39279cc2007-06-12 06:35:45 -0400485
Chris Masondb945352007-10-15 16:15:53 -0400486 end_of_last_block = start_pos + num_bytes - 1;
Filipe Mananae3b8a482017-11-04 00:16:59 +0000487
Chris Mason7703bdd2018-06-20 07:56:11 -0700488 /*
489 * The pages may have already been dirty, clear out old accounting so
490 * we can set things up properly
491 */
Nikolay Borisov088545f2020-06-03 08:55:36 +0300492 clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
Omar Sandovale1821632019-08-15 14:04:04 -0700493 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
494 0, 0, cached);
Chris Mason7703bdd2018-06-20 07:56:11 -0700495
Nikolay Borisov088545f2020-06-03 08:55:36 +0300496 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
Nikolay Borisov330a5822019-07-17 16:18:17 +0300497 extra_bits, cached);
Josef Bacikd0215f32011-01-25 14:57:24 -0500498 if (err)
499 return err;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400500
Chris Masonc8b97812008-10-29 14:49:59 -0400501 for (i = 0; i < num_pages; i++) {
502 struct page *p = pages[i];
503 SetPageUptodate(p);
504 ClearPageChecked(p);
505 set_page_dirty(p);
Chris Masona52d9a82007-08-27 16:49:44 -0400506 }
Josef Bacik9f570b82011-01-25 12:42:37 -0500507
508 /*
509 * we've only changed i_size in ram, and we haven't updated
510 * the disk i_size. There is no need to log the inode
511 * at this time.
512 */
513 if (end_pos > isize)
Nikolay Borisov088545f2020-06-03 08:55:36 +0300514 i_size_write(&inode->vfs_inode, end_pos);
Yan, Zhenga22285a2010-05-16 10:48:46 -0400515 return 0;
Chris Mason39279cc2007-06-12 06:35:45 -0400516}
517
Chris Masond352ac62008-09-29 15:18:18 -0400518/*
519 * this drops all the extents in the cache that intersect the range
520 * [start, end]. Existing extents are split as required.
521 */
Nikolay Borisovdcdbc052017-02-20 13:50:45 +0200522void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
Josef Bacik7014cdb2012-08-30 20:06:49 -0400523 int skip_pinned)
Chris Masona52d9a82007-08-27 16:49:44 -0400524{
525 struct extent_map *em;
Chris Mason3b951512008-04-17 11:29:12 -0400526 struct extent_map *split = NULL;
527 struct extent_map *split2 = NULL;
Nikolay Borisovdcdbc052017-02-20 13:50:45 +0200528 struct extent_map_tree *em_tree = &inode->extent_tree;
Yan39b56372008-02-15 10:40:50 -0500529 u64 len = end - start + 1;
Josef Bacik5dc562c2012-08-17 13:14:17 -0400530 u64 gen;
Chris Mason3b951512008-04-17 11:29:12 -0400531 int ret;
532 int testend = 1;
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400533 unsigned long flags;
Chris Masonc8b97812008-10-29 14:49:59 -0400534 int compressed = 0;
Josef Bacik09a2a8f92013-04-05 16:51:15 -0400535 bool modified;
Chris Masona52d9a82007-08-27 16:49:44 -0400536
Chris Masone6dcd2d2008-07-17 12:53:50 -0400537 WARN_ON(end < start);
Chris Mason3b951512008-04-17 11:29:12 -0400538 if (end == (u64)-1) {
Yan39b56372008-02-15 10:40:50 -0500539 len = (u64)-1;
Chris Mason3b951512008-04-17 11:29:12 -0400540 testend = 0;
541 }
Chris Masond3977122009-01-05 21:25:51 -0500542 while (1) {
Josef Bacik7014cdb2012-08-30 20:06:49 -0400543 int no_splits = 0;
544
Josef Bacik09a2a8f92013-04-05 16:51:15 -0400545 modified = false;
Chris Mason3b951512008-04-17 11:29:12 -0400546 if (!split)
David Sterba172ddd62011-04-21 00:48:27 +0200547 split = alloc_extent_map();
Chris Mason3b951512008-04-17 11:29:12 -0400548 if (!split2)
David Sterba172ddd62011-04-21 00:48:27 +0200549 split2 = alloc_extent_map();
Josef Bacik7014cdb2012-08-30 20:06:49 -0400550 if (!split || !split2)
551 no_splits = 1;
Chris Mason3b951512008-04-17 11:29:12 -0400552
Chris Mason890871b2009-09-02 16:24:52 -0400553 write_lock(&em_tree->lock);
Yan39b56372008-02-15 10:40:50 -0500554 em = lookup_extent_mapping(em_tree, start, len);
Chris Masond1310b22008-01-24 16:13:08 -0500555 if (!em) {
Chris Mason890871b2009-09-02 16:24:52 -0400556 write_unlock(&em_tree->lock);
Chris Masona52d9a82007-08-27 16:49:44 -0400557 break;
Chris Masond1310b22008-01-24 16:13:08 -0500558 }
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400559 flags = em->flags;
Josef Bacik5dc562c2012-08-17 13:14:17 -0400560 gen = em->generation;
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400561 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
Yan, Zheng55ef6892009-11-12 09:36:44 +0000562 if (testend && em->start + em->len >= start + len) {
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400563 free_extent_map(em);
Chris Masona1ed8352009-09-11 12:27:37 -0400564 write_unlock(&em_tree->lock);
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400565 break;
566 }
Yan, Zheng55ef6892009-11-12 09:36:44 +0000567 start = em->start + em->len;
568 if (testend)
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400569 len = start + len - (em->start + em->len);
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400570 free_extent_map(em);
Chris Masona1ed8352009-09-11 12:27:37 -0400571 write_unlock(&em_tree->lock);
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400572 continue;
573 }
Chris Masonc8b97812008-10-29 14:49:59 -0400574 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
Chris Mason3ce7e672008-07-31 15:42:54 -0400575 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
Liu Bo3b277592013-03-15 08:46:39 -0600576 clear_bit(EXTENT_FLAG_LOGGING, &flags);
Josef Bacik09a2a8f92013-04-05 16:51:15 -0400577 modified = !list_empty(&em->list);
Josef Bacik7014cdb2012-08-30 20:06:49 -0400578 if (no_splits)
579 goto next;
Chris Mason3b951512008-04-17 11:29:12 -0400580
Josef Bacikee20a982013-07-11 10:34:59 -0400581 if (em->start < start) {
Chris Mason3b951512008-04-17 11:29:12 -0400582 split->start = em->start;
583 split->len = start - em->start;
Chris Masonc8b97812008-10-29 14:49:59 -0400584
Josef Bacikee20a982013-07-11 10:34:59 -0400585 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
586 split->orig_start = em->orig_start;
587 split->block_start = em->block_start;
588
589 if (compressed)
590 split->block_len = em->block_len;
591 else
592 split->block_len = split->len;
593 split->orig_block_len = max(split->block_len,
594 em->orig_block_len);
595 split->ram_bytes = em->ram_bytes;
596 } else {
597 split->orig_start = split->start;
598 split->block_len = 0;
599 split->block_start = em->block_start;
600 split->orig_block_len = 0;
601 split->ram_bytes = split->len;
602 }
603
Josef Bacik5dc562c2012-08-17 13:14:17 -0400604 split->generation = gen;
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400605 split->flags = flags;
Li Zefan261507a02010-12-17 14:21:50 +0800606 split->compress_type = em->compress_type;
Filipe Manana176840b2014-02-25 14:15:13 +0000607 replace_extent_mapping(em_tree, em, split, modified);
Chris Mason3b951512008-04-17 11:29:12 -0400608 free_extent_map(split);
609 split = split2;
610 split2 = NULL;
611 }
Josef Bacikee20a982013-07-11 10:34:59 -0400612 if (testend && em->start + em->len > start + len) {
Chris Mason3b951512008-04-17 11:29:12 -0400613 u64 diff = start + len - em->start;
614
615 split->start = start + len;
616 split->len = em->start + em->len - (start + len);
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400617 split->flags = flags;
Li Zefan261507a02010-12-17 14:21:50 +0800618 split->compress_type = em->compress_type;
Josef Bacik5dc562c2012-08-17 13:14:17 -0400619 split->generation = gen;
Chris Mason3b951512008-04-17 11:29:12 -0400620
Josef Bacikee20a982013-07-11 10:34:59 -0400621 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
622 split->orig_block_len = max(em->block_len,
623 em->orig_block_len);
624
625 split->ram_bytes = em->ram_bytes;
626 if (compressed) {
627 split->block_len = em->block_len;
628 split->block_start = em->block_start;
629 split->orig_start = em->orig_start;
630 } else {
631 split->block_len = split->len;
632 split->block_start = em->block_start
633 + diff;
634 split->orig_start = em->orig_start;
635 }
Chris Masonc8b97812008-10-29 14:49:59 -0400636 } else {
Josef Bacikee20a982013-07-11 10:34:59 -0400637 split->ram_bytes = split->len;
638 split->orig_start = split->start;
639 split->block_len = 0;
640 split->block_start = em->block_start;
641 split->orig_block_len = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400642 }
Chris Mason3b951512008-04-17 11:29:12 -0400643
Filipe Manana176840b2014-02-25 14:15:13 +0000644 if (extent_map_in_tree(em)) {
645 replace_extent_mapping(em_tree, em, split,
646 modified);
647 } else {
648 ret = add_extent_mapping(em_tree, split,
649 modified);
650 ASSERT(ret == 0); /* Logic error */
651 }
Chris Mason3b951512008-04-17 11:29:12 -0400652 free_extent_map(split);
653 split = NULL;
654 }
Josef Bacik7014cdb2012-08-30 20:06:49 -0400655next:
Filipe Manana176840b2014-02-25 14:15:13 +0000656 if (extent_map_in_tree(em))
657 remove_extent_mapping(em_tree, em);
Chris Mason890871b2009-09-02 16:24:52 -0400658 write_unlock(&em_tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500659
Chris Masona52d9a82007-08-27 16:49:44 -0400660 /* once for us */
661 free_extent_map(em);
662 /* once for the tree*/
663 free_extent_map(em);
664 }
Chris Mason3b951512008-04-17 11:29:12 -0400665 if (split)
666 free_extent_map(split);
667 if (split2)
668 free_extent_map(split2);
Chris Masona52d9a82007-08-27 16:49:44 -0400669}
670
Chris Mason39279cc2007-06-12 06:35:45 -0400671/*
672 * this is very complex, but the basic idea is to drop all extents
673 * in the range start - end. hint_block is filled in with a block number
674 * that would be a good hint to the block allocator for this file.
675 *
676 * If an extent intersects the range but is not entirely inside the range
677 * it is either truncated or split. Anything entirely inside the range
678 * is deleted from the tree.
Filipe Manana2766ff62020-11-04 11:07:34 +0000679 *
680 * Note: the VFS' inode number of bytes is not updated, it's up to the caller
681 * to deal with that. We set the field 'bytes_found' of the arguments structure
682 * with the number of allocated bytes found in the target range, so that the
683 * caller can update the inode's number of bytes in an atomic way when
684 * replacing extents in a range to avoid races with stat(2).
Chris Mason39279cc2007-06-12 06:35:45 -0400685 */
Filipe Manana5893dfb2020-11-04 11:07:32 +0000686int btrfs_drop_extents(struct btrfs_trans_handle *trans,
687 struct btrfs_root *root, struct btrfs_inode *inode,
688 struct btrfs_drop_extents_args *args)
Chris Mason39279cc2007-06-12 06:35:45 -0400689{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400690 struct btrfs_fs_info *fs_info = root->fs_info;
Chris Mason00f5c792007-11-30 10:09:33 -0500691 struct extent_buffer *leaf;
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000692 struct btrfs_file_extent_item *fi;
Qu Wenruo82fa1132019-04-04 14:45:35 +0800693 struct btrfs_ref ref = { 0 };
Chris Mason00f5c792007-11-30 10:09:33 -0500694 struct btrfs_key key;
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000695 struct btrfs_key new_key;
Nikolay Borisov906c4482020-06-03 08:55:08 +0300696 u64 ino = btrfs_ino(inode);
Filipe Manana5893dfb2020-11-04 11:07:32 +0000697 u64 search_start = args->start;
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000698 u64 disk_bytenr = 0;
699 u64 num_bytes = 0;
700 u64 extent_offset = 0;
701 u64 extent_end = 0;
Filipe Manana5893dfb2020-11-04 11:07:32 +0000702 u64 last_end = args->start;
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000703 int del_nr = 0;
704 int del_slot = 0;
705 int extent_type;
Chris Masonccd467d2007-06-28 15:57:36 -0400706 int recow;
Chris Mason00f5c792007-11-30 10:09:33 -0500707 int ret;
Chris Masondc7fdde2012-04-27 14:31:29 -0400708 int modify_tree = -1;
Miao Xie27cdeb72014-04-02 19:51:05 +0800709 int update_refs;
Josef Bacikc3308f82012-09-14 14:51:22 -0400710 int found = 0;
Filipe David Borba Manana1acae572014-01-07 11:42:27 +0000711 int leafs_visited = 0;
Filipe Manana5893dfb2020-11-04 11:07:32 +0000712 struct btrfs_path *path = args->path;
Chris Mason39279cc2007-06-12 06:35:45 -0400713
Filipe Manana2766ff62020-11-04 11:07:34 +0000714 args->bytes_found = 0;
Filipe Manana5893dfb2020-11-04 11:07:32 +0000715 args->extent_inserted = false;
Chris Masona52d9a82007-08-27 16:49:44 -0400716
Filipe Manana5893dfb2020-11-04 11:07:32 +0000717 /* Must always have a path if ->replace_extent is true */
718 ASSERT(!(args->replace_extent && !args->path));
719
720 if (!path) {
721 path = btrfs_alloc_path();
722 if (!path) {
723 ret = -ENOMEM;
724 goto out;
725 }
726 }
727
728 if (args->drop_cache)
729 btrfs_drop_extent_cache(inode, args->start, args->end - 1, 0);
730
731 if (args->start >= inode->disk_i_size && !args->replace_extent)
Chris Masondc7fdde2012-04-27 14:31:29 -0400732 modify_tree = 0;
733
Qu Wenruo92a7cc42020-05-15 14:01:40 +0800734 update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400735 root == fs_info->tree_root);
Chris Masond3977122009-01-05 21:25:51 -0500736 while (1) {
Chris Masonccd467d2007-06-28 15:57:36 -0400737 recow = 0;
Li Zefan33345d012011-04-20 10:31:50 +0800738 ret = btrfs_lookup_file_extent(trans, root, path, ino,
Chris Masondc7fdde2012-04-27 14:31:29 -0400739 search_start, modify_tree);
Chris Mason39279cc2007-06-12 06:35:45 -0400740 if (ret < 0)
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000741 break;
Filipe Manana5893dfb2020-11-04 11:07:32 +0000742 if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000743 leaf = path->nodes[0];
744 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
Li Zefan33345d012011-04-20 10:31:50 +0800745 if (key.objectid == ino &&
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000746 key.type == BTRFS_EXTENT_DATA_KEY)
747 path->slots[0]--;
Chris Mason39279cc2007-06-12 06:35:45 -0400748 }
Chris Mason8c2383c2007-06-18 09:57:58 -0400749 ret = 0;
Filipe David Borba Manana1acae572014-01-07 11:42:27 +0000750 leafs_visited++;
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000751next_slot:
752 leaf = path->nodes[0];
753 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
754 BUG_ON(del_nr > 0);
755 ret = btrfs_next_leaf(root, path);
756 if (ret < 0)
757 break;
758 if (ret > 0) {
759 ret = 0;
760 break;
Chris Mason8c2383c2007-06-18 09:57:58 -0400761 }
Filipe David Borba Manana1acae572014-01-07 11:42:27 +0000762 leafs_visited++;
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000763 leaf = path->nodes[0];
764 recow = 1;
765 }
766
767 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
Filipe Mananaaeafbf82015-11-06 13:33:33 +0000768
769 if (key.objectid > ino)
770 break;
771 if (WARN_ON_ONCE(key.objectid < ino) ||
772 key.type < BTRFS_EXTENT_DATA_KEY) {
773 ASSERT(del_nr == 0);
774 path->slots[0]++;
775 goto next_slot;
776 }
Filipe Manana5893dfb2020-11-04 11:07:32 +0000777 if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000778 break;
779
780 fi = btrfs_item_ptr(leaf, path->slots[0],
781 struct btrfs_file_extent_item);
782 extent_type = btrfs_file_extent_type(leaf, fi);
783
784 if (extent_type == BTRFS_FILE_EXTENT_REG ||
785 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
786 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
787 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
788 extent_offset = btrfs_file_extent_offset(leaf, fi);
789 extent_end = key.offset +
790 btrfs_file_extent_num_bytes(leaf, fi);
791 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
792 extent_end = key.offset +
Qu Wenruoe41ca582018-06-06 15:41:49 +0800793 btrfs_file_extent_ram_bytes(leaf, fi);
Chris Mason8c2383c2007-06-18 09:57:58 -0400794 } else {
Filipe Mananaaeafbf82015-11-06 13:33:33 +0000795 /* can't happen */
796 BUG();
Chris Mason39279cc2007-06-12 06:35:45 -0400797 }
798
Filipe Mananafc19c5e2014-04-29 13:18:40 +0100799 /*
800 * Don't skip extent items representing 0 byte lengths. They
801 * used to be created (bug) if while punching holes we hit
802 * -ENOSPC condition. So if we find one here, just ensure we
803 * delete it, otherwise we would insert a new file extent item
804 * with the same key (offset) as that 0 bytes length file
805 * extent item in the call to setup_items_for_insert() later
806 * in this function.
807 */
Josef Bacik62fe51c12016-11-16 09:13:39 -0500808 if (extent_end == key.offset && extent_end >= search_start) {
809 last_end = extent_end;
Filipe Mananafc19c5e2014-04-29 13:18:40 +0100810 goto delete_extent_item;
Josef Bacik62fe51c12016-11-16 09:13:39 -0500811 }
Filipe Mananafc19c5e2014-04-29 13:18:40 +0100812
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000813 if (extent_end <= search_start) {
814 path->slots[0]++;
Chris Mason8c2383c2007-06-18 09:57:58 -0400815 goto next_slot;
Chris Mason39279cc2007-06-12 06:35:45 -0400816 }
817
Josef Bacikc3308f82012-09-14 14:51:22 -0400818 found = 1;
Filipe Manana5893dfb2020-11-04 11:07:32 +0000819 search_start = max(key.offset, args->start);
Chris Masondc7fdde2012-04-27 14:31:29 -0400820 if (recow || !modify_tree) {
821 modify_tree = -1;
David Sterbab3b4aa72011-04-21 01:20:15 +0200822 btrfs_release_path(path);
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000823 continue;
Chris Mason39279cc2007-06-12 06:35:45 -0400824 }
Chris Mason771ed682008-11-06 22:02:51 -0500825
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000826 /*
827 * | - range to drop - |
828 * | -------- extent -------- |
829 */
Filipe Manana5893dfb2020-11-04 11:07:32 +0000830 if (args->start > key.offset && args->end < extent_end) {
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000831 BUG_ON(del_nr > 0);
Liu Bo00fdf132014-03-10 18:56:07 +0800832 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
David Sterba3f9e3df2014-04-15 18:50:17 +0200833 ret = -EOPNOTSUPP;
Liu Bo00fdf132014-03-10 18:56:07 +0800834 break;
835 }
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000836
837 memcpy(&new_key, &key, sizeof(new_key));
Filipe Manana5893dfb2020-11-04 11:07:32 +0000838 new_key.offset = args->start;
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000839 ret = btrfs_duplicate_item(trans, root, path,
840 &new_key);
841 if (ret == -EAGAIN) {
David Sterbab3b4aa72011-04-21 01:20:15 +0200842 btrfs_release_path(path);
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000843 continue;
844 }
845 if (ret < 0)
846 break;
Chris Mason8c2383c2007-06-18 09:57:58 -0400847
Chris Mason5f39d392007-10-15 16:14:19 -0400848 leaf = path->nodes[0];
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000849 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
850 struct btrfs_file_extent_item);
851 btrfs_set_file_extent_num_bytes(leaf, fi,
Filipe Manana5893dfb2020-11-04 11:07:32 +0000852 args->start - key.offset);
Chris Mason39279cc2007-06-12 06:35:45 -0400853
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000854 fi = btrfs_item_ptr(leaf, path->slots[0],
855 struct btrfs_file_extent_item);
Chris Masonc8b97812008-10-29 14:49:59 -0400856
Filipe Manana5893dfb2020-11-04 11:07:32 +0000857 extent_offset += args->start - key.offset;
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000858 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
859 btrfs_set_file_extent_num_bytes(leaf, fi,
Filipe Manana5893dfb2020-11-04 11:07:32 +0000860 extent_end - args->start);
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000861 btrfs_mark_buffer_dirty(leaf);
Chris Masondb945352007-10-15 16:15:53 -0400862
Josef Bacik5dc562c2012-08-17 13:14:17 -0400863 if (update_refs && disk_bytenr > 0) {
Qu Wenruo82fa1132019-04-04 14:45:35 +0800864 btrfs_init_generic_ref(&ref,
865 BTRFS_ADD_DELAYED_REF,
866 disk_bytenr, num_bytes, 0);
867 btrfs_init_data_ref(&ref,
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000868 root->root_key.objectid,
869 new_key.objectid,
Filipe Manana5893dfb2020-11-04 11:07:32 +0000870 args->start - extent_offset);
Qu Wenruo82fa1132019-04-04 14:45:35 +0800871 ret = btrfs_inc_extent_ref(trans, &ref);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100872 BUG_ON(ret); /* -ENOMEM */
Zheng Yan31840ae2008-09-23 13:14:14 -0400873 }
Filipe Manana5893dfb2020-11-04 11:07:32 +0000874 key.offset = args->start;
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000875 }
876 /*
Josef Bacik62fe51c12016-11-16 09:13:39 -0500877 * From here on out we will have actually dropped something, so
878 * last_end can be updated.
879 */
880 last_end = extent_end;
881
882 /*
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000883 * | ---- range to drop ----- |
884 * | -------- extent -------- |
885 */
Filipe Manana5893dfb2020-11-04 11:07:32 +0000886 if (args->start <= key.offset && args->end < extent_end) {
Liu Bo00fdf132014-03-10 18:56:07 +0800887 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
David Sterba3f9e3df2014-04-15 18:50:17 +0200888 ret = -EOPNOTSUPP;
Liu Bo00fdf132014-03-10 18:56:07 +0800889 break;
890 }
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000891
892 memcpy(&new_key, &key, sizeof(new_key));
Filipe Manana5893dfb2020-11-04 11:07:32 +0000893 new_key.offset = args->end;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400894 btrfs_set_item_key_safe(fs_info, path, &new_key);
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000895
Filipe Manana5893dfb2020-11-04 11:07:32 +0000896 extent_offset += args->end - key.offset;
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000897 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
898 btrfs_set_file_extent_num_bytes(leaf, fi,
Filipe Manana5893dfb2020-11-04 11:07:32 +0000899 extent_end - args->end);
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000900 btrfs_mark_buffer_dirty(leaf);
Josef Bacik26714852012-08-29 12:24:27 -0400901 if (update_refs && disk_bytenr > 0)
Filipe Manana2766ff62020-11-04 11:07:34 +0000902 args->bytes_found += args->end - key.offset;
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000903 break;
Zheng Yan31840ae2008-09-23 13:14:14 -0400904 }
905
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000906 search_start = extent_end;
907 /*
908 * | ---- range to drop ----- |
909 * | -------- extent -------- |
910 */
Filipe Manana5893dfb2020-11-04 11:07:32 +0000911 if (args->start > key.offset && args->end >= extent_end) {
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000912 BUG_ON(del_nr > 0);
Liu Bo00fdf132014-03-10 18:56:07 +0800913 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
David Sterba3f9e3df2014-04-15 18:50:17 +0200914 ret = -EOPNOTSUPP;
Liu Bo00fdf132014-03-10 18:56:07 +0800915 break;
916 }
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000917
918 btrfs_set_file_extent_num_bytes(leaf, fi,
Filipe Manana5893dfb2020-11-04 11:07:32 +0000919 args->start - key.offset);
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000920 btrfs_mark_buffer_dirty(leaf);
Josef Bacik26714852012-08-29 12:24:27 -0400921 if (update_refs && disk_bytenr > 0)
Filipe Manana2766ff62020-11-04 11:07:34 +0000922 args->bytes_found += extent_end - args->start;
Filipe Manana5893dfb2020-11-04 11:07:32 +0000923 if (args->end == extent_end)
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000924 break;
925
926 path->slots[0]++;
927 goto next_slot;
Chris Mason39279cc2007-06-12 06:35:45 -0400928 }
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000929
930 /*
931 * | ---- range to drop ----- |
932 * | ------ extent ------ |
933 */
Filipe Manana5893dfb2020-11-04 11:07:32 +0000934 if (args->start <= key.offset && args->end >= extent_end) {
Filipe Mananafc19c5e2014-04-29 13:18:40 +0100935delete_extent_item:
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000936 if (del_nr == 0) {
937 del_slot = path->slots[0];
938 del_nr = 1;
939 } else {
940 BUG_ON(del_slot + del_nr != path->slots[0]);
941 del_nr++;
942 }
943
Josef Bacik5dc562c2012-08-17 13:14:17 -0400944 if (update_refs &&
945 extent_type == BTRFS_FILE_EXTENT_INLINE) {
Filipe Manana2766ff62020-11-04 11:07:34 +0000946 args->bytes_found += extent_end - key.offset;
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000947 extent_end = ALIGN(extent_end,
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400948 fs_info->sectorsize);
Josef Bacik5dc562c2012-08-17 13:14:17 -0400949 } else if (update_refs && disk_bytenr > 0) {
Qu Wenruoffd4bb22019-04-04 14:45:36 +0800950 btrfs_init_generic_ref(&ref,
951 BTRFS_DROP_DELAYED_REF,
952 disk_bytenr, num_bytes, 0);
953 btrfs_init_data_ref(&ref,
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000954 root->root_key.objectid,
Qu Wenruoffd4bb22019-04-04 14:45:36 +0800955 key.objectid,
956 key.offset - extent_offset);
957 ret = btrfs_free_extent(trans, &ref);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100958 BUG_ON(ret); /* -ENOMEM */
Filipe Manana2766ff62020-11-04 11:07:34 +0000959 args->bytes_found += extent_end - key.offset;
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000960 }
961
Filipe Manana5893dfb2020-11-04 11:07:32 +0000962 if (args->end == extent_end)
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000963 break;
964
965 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
966 path->slots[0]++;
967 goto next_slot;
968 }
969
970 ret = btrfs_del_items(trans, root, path, del_slot,
971 del_nr);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100972 if (ret) {
Jeff Mahoney66642832016-06-10 18:19:25 -0400973 btrfs_abort_transaction(trans, ret);
Josef Bacik5dc562c2012-08-17 13:14:17 -0400974 break;
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100975 }
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000976
977 del_nr = 0;
978 del_slot = 0;
979
David Sterbab3b4aa72011-04-21 01:20:15 +0200980 btrfs_release_path(path);
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000981 continue;
982 }
983
Arnd Bergmann290342f2019-03-25 14:02:25 +0100984 BUG();
Chris Mason39279cc2007-06-12 06:35:45 -0400985 }
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000986
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100987 if (!ret && del_nr > 0) {
Filipe David Borba Manana1acae572014-01-07 11:42:27 +0000988 /*
989 * Set path->slots[0] to first slot, so that after the delete
990 * if items are move off from our leaf to its immediate left or
991 * right neighbor leafs, we end up with a correct and adjusted
Filipe Manana5893dfb2020-11-04 11:07:32 +0000992 * path->slots[0] for our insertion (if args->replace_extent).
Filipe David Borba Manana1acae572014-01-07 11:42:27 +0000993 */
994 path->slots[0] = del_slot;
Yan, Zheng920bbbf2009-11-12 09:34:08 +0000995 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100996 if (ret)
Jeff Mahoney66642832016-06-10 18:19:25 -0400997 btrfs_abort_transaction(trans, ret);
Filipe David Borba Mananad5f37522014-02-09 23:45:12 +0000998 }
Filipe David Borba Manana1acae572014-01-07 11:42:27 +0000999
Filipe David Borba Mananad5f37522014-02-09 23:45:12 +00001000 leaf = path->nodes[0];
1001 /*
1002 * If btrfs_del_items() was called, it might have deleted a leaf, in
1003 * which case it unlocked our path, so check path->locks[0] matches a
1004 * write lock.
1005 */
Filipe Manana5893dfb2020-11-04 11:07:32 +00001006 if (!ret && args->replace_extent && leafs_visited == 1 &&
Josef Bacikac5887c2020-08-20 11:46:10 -04001007 path->locks[0] == BTRFS_WRITE_LOCK &&
David Sterbae902baa2019-03-20 14:36:46 +01001008 btrfs_leaf_free_space(leaf) >=
Filipe Manana5893dfb2020-11-04 11:07:32 +00001009 sizeof(struct btrfs_item) + args->extent_item_size) {
Filipe David Borba Manana1acae572014-01-07 11:42:27 +00001010
Filipe David Borba Mananad5f37522014-02-09 23:45:12 +00001011 key.objectid = ino;
1012 key.type = BTRFS_EXTENT_DATA_KEY;
Filipe Manana5893dfb2020-11-04 11:07:32 +00001013 key.offset = args->start;
Filipe David Borba Mananad5f37522014-02-09 23:45:12 +00001014 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
1015 struct btrfs_key slot_key;
1016
1017 btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
1018 if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1019 path->slots[0]++;
Filipe David Borba Manana1acae572014-01-07 11:42:27 +00001020 }
Filipe Manana5893dfb2020-11-04 11:07:32 +00001021 setup_items_for_insert(root, path, &key,
1022 &args->extent_item_size, 1);
1023 args->extent_inserted = true;
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001024 }
1025
Filipe Manana5893dfb2020-11-04 11:07:32 +00001026 if (!args->path)
1027 btrfs_free_path(path);
1028 else if (!args->extent_inserted)
Filipe David Borba Manana1acae572014-01-07 11:42:27 +00001029 btrfs_release_path(path);
Filipe Manana5893dfb2020-11-04 11:07:32 +00001030out:
1031 args->drop_end = found ? min(args->end, last_end) : args->end;
Josef Bacik5dc562c2012-08-17 13:14:17 -04001032
Chris Mason39279cc2007-06-12 06:35:45 -04001033 return ret;
1034}
1035
Yan Zhengd899e052008-10-30 14:25:28 -04001036static int extent_mergeable(struct extent_buffer *leaf, int slot,
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001037 u64 objectid, u64 bytenr, u64 orig_offset,
1038 u64 *start, u64 *end)
Yan Zhengd899e052008-10-30 14:25:28 -04001039{
1040 struct btrfs_file_extent_item *fi;
1041 struct btrfs_key key;
1042 u64 extent_end;
1043
1044 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1045 return 0;
1046
1047 btrfs_item_key_to_cpu(leaf, &key, slot);
1048 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1049 return 0;
1050
1051 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1052 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1053 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001054 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
Yan Zhengd899e052008-10-30 14:25:28 -04001055 btrfs_file_extent_compression(leaf, fi) ||
1056 btrfs_file_extent_encryption(leaf, fi) ||
1057 btrfs_file_extent_other_encoding(leaf, fi))
1058 return 0;
1059
1060 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1061 if ((*start && *start != key.offset) || (*end && *end != extent_end))
1062 return 0;
1063
1064 *start = key.offset;
1065 *end = extent_end;
1066 return 1;
1067}
1068
1069/*
1070 * Mark extent in the range start - end as written.
1071 *
1072 * This changes extent type from 'pre-allocated' to 'regular'. If only
1073 * part of extent is marked as written, the extent will be split into
1074 * two or three.
1075 */
1076int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
Nikolay Borisov7a6d7062017-02-20 13:50:48 +02001077 struct btrfs_inode *inode, u64 start, u64 end)
Yan Zhengd899e052008-10-30 14:25:28 -04001078{
David Sterba3ffbd682018-06-29 10:56:42 +02001079 struct btrfs_fs_info *fs_info = trans->fs_info;
Nikolay Borisov7a6d7062017-02-20 13:50:48 +02001080 struct btrfs_root *root = inode->root;
Yan Zhengd899e052008-10-30 14:25:28 -04001081 struct extent_buffer *leaf;
1082 struct btrfs_path *path;
1083 struct btrfs_file_extent_item *fi;
Qu Wenruo82fa1132019-04-04 14:45:35 +08001084 struct btrfs_ref ref = { 0 };
Yan Zhengd899e052008-10-30 14:25:28 -04001085 struct btrfs_key key;
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001086 struct btrfs_key new_key;
Yan Zhengd899e052008-10-30 14:25:28 -04001087 u64 bytenr;
1088 u64 num_bytes;
1089 u64 extent_end;
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001090 u64 orig_offset;
Yan Zhengd899e052008-10-30 14:25:28 -04001091 u64 other_start;
1092 u64 other_end;
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001093 u64 split;
1094 int del_nr = 0;
1095 int del_slot = 0;
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001096 int recow;
Yan Zhengd899e052008-10-30 14:25:28 -04001097 int ret;
Nikolay Borisov7a6d7062017-02-20 13:50:48 +02001098 u64 ino = btrfs_ino(inode);
Yan Zhengd899e052008-10-30 14:25:28 -04001099
Yan Zhengd899e052008-10-30 14:25:28 -04001100 path = btrfs_alloc_path();
Mark Fashehd8926bb2011-07-13 10:38:47 -07001101 if (!path)
1102 return -ENOMEM;
Yan Zhengd899e052008-10-30 14:25:28 -04001103again:
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001104 recow = 0;
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001105 split = start;
Li Zefan33345d012011-04-20 10:31:50 +08001106 key.objectid = ino;
Yan Zhengd899e052008-10-30 14:25:28 -04001107 key.type = BTRFS_EXTENT_DATA_KEY;
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001108 key.offset = split;
Yan Zhengd899e052008-10-30 14:25:28 -04001109
1110 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
Josef Bacik41415732011-03-16 13:59:32 -04001111 if (ret < 0)
1112 goto out;
Yan Zhengd899e052008-10-30 14:25:28 -04001113 if (ret > 0 && path->slots[0] > 0)
1114 path->slots[0]--;
1115
1116 leaf = path->nodes[0];
1117 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
Josef Bacik9c8e63d2016-09-02 15:40:06 -04001118 if (key.objectid != ino ||
1119 key.type != BTRFS_EXTENT_DATA_KEY) {
1120 ret = -EINVAL;
1121 btrfs_abort_transaction(trans, ret);
1122 goto out;
1123 }
Yan Zhengd899e052008-10-30 14:25:28 -04001124 fi = btrfs_item_ptr(leaf, path->slots[0],
1125 struct btrfs_file_extent_item);
Josef Bacik9c8e63d2016-09-02 15:40:06 -04001126 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
1127 ret = -EINVAL;
1128 btrfs_abort_transaction(trans, ret);
1129 goto out;
1130 }
Yan Zhengd899e052008-10-30 14:25:28 -04001131 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
Josef Bacik9c8e63d2016-09-02 15:40:06 -04001132 if (key.offset > start || extent_end < end) {
1133 ret = -EINVAL;
1134 btrfs_abort_transaction(trans, ret);
1135 goto out;
1136 }
Yan Zhengd899e052008-10-30 14:25:28 -04001137
1138 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1139 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001140 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001141 memcpy(&new_key, &key, sizeof(new_key));
1142
1143 if (start == key.offset && end < extent_end) {
1144 other_start = 0;
1145 other_end = start;
1146 if (extent_mergeable(leaf, path->slots[0] - 1,
Li Zefan33345d012011-04-20 10:31:50 +08001147 ino, bytenr, orig_offset,
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001148 &other_start, &other_end)) {
1149 new_key.offset = end;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001150 btrfs_set_item_key_safe(fs_info, path, &new_key);
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001151 fi = btrfs_item_ptr(leaf, path->slots[0],
1152 struct btrfs_file_extent_item);
Josef Bacik224ecce2012-08-16 16:32:06 -04001153 btrfs_set_file_extent_generation(leaf, fi,
1154 trans->transid);
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001155 btrfs_set_file_extent_num_bytes(leaf, fi,
1156 extent_end - end);
1157 btrfs_set_file_extent_offset(leaf, fi,
1158 end - orig_offset);
1159 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1160 struct btrfs_file_extent_item);
Josef Bacik224ecce2012-08-16 16:32:06 -04001161 btrfs_set_file_extent_generation(leaf, fi,
1162 trans->transid);
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001163 btrfs_set_file_extent_num_bytes(leaf, fi,
1164 end - other_start);
1165 btrfs_mark_buffer_dirty(leaf);
1166 goto out;
1167 }
1168 }
1169
1170 if (start > key.offset && end == extent_end) {
1171 other_start = end;
1172 other_end = 0;
1173 if (extent_mergeable(leaf, path->slots[0] + 1,
Li Zefan33345d012011-04-20 10:31:50 +08001174 ino, bytenr, orig_offset,
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001175 &other_start, &other_end)) {
1176 fi = btrfs_item_ptr(leaf, path->slots[0],
1177 struct btrfs_file_extent_item);
1178 btrfs_set_file_extent_num_bytes(leaf, fi,
1179 start - key.offset);
Josef Bacik224ecce2012-08-16 16:32:06 -04001180 btrfs_set_file_extent_generation(leaf, fi,
1181 trans->transid);
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001182 path->slots[0]++;
1183 new_key.offset = start;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001184 btrfs_set_item_key_safe(fs_info, path, &new_key);
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001185
1186 fi = btrfs_item_ptr(leaf, path->slots[0],
1187 struct btrfs_file_extent_item);
Josef Bacik224ecce2012-08-16 16:32:06 -04001188 btrfs_set_file_extent_generation(leaf, fi,
1189 trans->transid);
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001190 btrfs_set_file_extent_num_bytes(leaf, fi,
1191 other_end - start);
1192 btrfs_set_file_extent_offset(leaf, fi,
1193 start - orig_offset);
1194 btrfs_mark_buffer_dirty(leaf);
1195 goto out;
1196 }
1197 }
Yan Zhengd899e052008-10-30 14:25:28 -04001198
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001199 while (start > key.offset || end < extent_end) {
1200 if (key.offset == start)
1201 split = end;
Yan Zhengd899e052008-10-30 14:25:28 -04001202
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001203 new_key.offset = split;
1204 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1205 if (ret == -EAGAIN) {
David Sterbab3b4aa72011-04-21 01:20:15 +02001206 btrfs_release_path(path);
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001207 goto again;
Yan Zhengd899e052008-10-30 14:25:28 -04001208 }
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001209 if (ret < 0) {
Jeff Mahoney66642832016-06-10 18:19:25 -04001210 btrfs_abort_transaction(trans, ret);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001211 goto out;
1212 }
Yan Zhengd899e052008-10-30 14:25:28 -04001213
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001214 leaf = path->nodes[0];
1215 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
Yan Zhengd899e052008-10-30 14:25:28 -04001216 struct btrfs_file_extent_item);
Josef Bacik224ecce2012-08-16 16:32:06 -04001217 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
Yan Zhengd899e052008-10-30 14:25:28 -04001218 btrfs_set_file_extent_num_bytes(leaf, fi,
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001219 split - key.offset);
1220
1221 fi = btrfs_item_ptr(leaf, path->slots[0],
1222 struct btrfs_file_extent_item);
1223
Josef Bacik224ecce2012-08-16 16:32:06 -04001224 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001225 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1226 btrfs_set_file_extent_num_bytes(leaf, fi,
1227 extent_end - split);
Yan Zhengd899e052008-10-30 14:25:28 -04001228 btrfs_mark_buffer_dirty(leaf);
1229
Qu Wenruo82fa1132019-04-04 14:45:35 +08001230 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
1231 num_bytes, 0);
1232 btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
1233 orig_offset);
1234 ret = btrfs_inc_extent_ref(trans, &ref);
Josef Bacik9c8e63d2016-09-02 15:40:06 -04001235 if (ret) {
1236 btrfs_abort_transaction(trans, ret);
1237 goto out;
1238 }
Yan Zhengd899e052008-10-30 14:25:28 -04001239
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001240 if (split == start) {
1241 key.offset = start;
1242 } else {
Josef Bacik9c8e63d2016-09-02 15:40:06 -04001243 if (start != key.offset) {
1244 ret = -EINVAL;
1245 btrfs_abort_transaction(trans, ret);
1246 goto out;
1247 }
Yan Zhengd899e052008-10-30 14:25:28 -04001248 path->slots[0]--;
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001249 extent_end = end;
Yan Zhengd899e052008-10-30 14:25:28 -04001250 }
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001251 recow = 1;
Yan Zhengd899e052008-10-30 14:25:28 -04001252 }
1253
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001254 other_start = end;
1255 other_end = 0;
Qu Wenruoffd4bb22019-04-04 14:45:36 +08001256 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1257 num_bytes, 0);
1258 btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset);
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001259 if (extent_mergeable(leaf, path->slots[0] + 1,
Li Zefan33345d012011-04-20 10:31:50 +08001260 ino, bytenr, orig_offset,
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001261 &other_start, &other_end)) {
1262 if (recow) {
David Sterbab3b4aa72011-04-21 01:20:15 +02001263 btrfs_release_path(path);
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001264 goto again;
1265 }
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001266 extent_end = other_end;
1267 del_slot = path->slots[0] + 1;
1268 del_nr++;
Qu Wenruoffd4bb22019-04-04 14:45:36 +08001269 ret = btrfs_free_extent(trans, &ref);
Josef Bacik9c8e63d2016-09-02 15:40:06 -04001270 if (ret) {
1271 btrfs_abort_transaction(trans, ret);
1272 goto out;
1273 }
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001274 }
1275 other_start = 0;
1276 other_end = start;
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001277 if (extent_mergeable(leaf, path->slots[0] - 1,
Li Zefan33345d012011-04-20 10:31:50 +08001278 ino, bytenr, orig_offset,
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001279 &other_start, &other_end)) {
1280 if (recow) {
David Sterbab3b4aa72011-04-21 01:20:15 +02001281 btrfs_release_path(path);
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001282 goto again;
1283 }
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001284 key.offset = other_start;
1285 del_slot = path->slots[0];
1286 del_nr++;
Qu Wenruoffd4bb22019-04-04 14:45:36 +08001287 ret = btrfs_free_extent(trans, &ref);
Josef Bacik9c8e63d2016-09-02 15:40:06 -04001288 if (ret) {
1289 btrfs_abort_transaction(trans, ret);
1290 goto out;
1291 }
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001292 }
1293 if (del_nr == 0) {
Shaohua Li3f6fae92010-02-11 07:43:00 +00001294 fi = btrfs_item_ptr(leaf, path->slots[0],
1295 struct btrfs_file_extent_item);
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001296 btrfs_set_file_extent_type(leaf, fi,
1297 BTRFS_FILE_EXTENT_REG);
Josef Bacik224ecce2012-08-16 16:32:06 -04001298 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001299 btrfs_mark_buffer_dirty(leaf);
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001300 } else {
Shaohua Li3f6fae92010-02-11 07:43:00 +00001301 fi = btrfs_item_ptr(leaf, del_slot - 1,
1302 struct btrfs_file_extent_item);
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001303 btrfs_set_file_extent_type(leaf, fi,
1304 BTRFS_FILE_EXTENT_REG);
Josef Bacik224ecce2012-08-16 16:32:06 -04001305 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
Yan, Zheng6c7d54a2010-01-15 08:43:09 +00001306 btrfs_set_file_extent_num_bytes(leaf, fi,
1307 extent_end - key.offset);
1308 btrfs_mark_buffer_dirty(leaf);
1309
1310 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001311 if (ret < 0) {
Jeff Mahoney66642832016-06-10 18:19:25 -04001312 btrfs_abort_transaction(trans, ret);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001313 goto out;
1314 }
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001315 }
Yan, Zheng920bbbf2009-11-12 09:34:08 +00001316out:
Yan Zhengd899e052008-10-30 14:25:28 -04001317 btrfs_free_path(path);
1318 return 0;
1319}
1320
Chris Mason39279cc2007-06-12 06:35:45 -04001321/*
Chris Masonb1bf8622011-02-28 09:52:08 -05001322 * on error we return an unlocked page and the error value
1323 * on success we return a locked page and 0
1324 */
Chris Masonbb1591b42015-12-14 15:40:44 -08001325static int prepare_uptodate_page(struct inode *inode,
1326 struct page *page, u64 pos,
Josef Bacikb63164292011-09-30 15:23:54 -04001327 bool force_uptodate)
Chris Masonb1bf8622011-02-28 09:52:08 -05001328{
1329 int ret = 0;
1330
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001331 if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
Josef Bacikb63164292011-09-30 15:23:54 -04001332 !PageUptodate(page)) {
Chris Masonb1bf8622011-02-28 09:52:08 -05001333 ret = btrfs_readpage(NULL, page);
1334 if (ret)
1335 return ret;
1336 lock_page(page);
1337 if (!PageUptodate(page)) {
1338 unlock_page(page);
1339 return -EIO;
1340 }
Chris Masonbb1591b42015-12-14 15:40:44 -08001341 if (page->mapping != inode->i_mapping) {
1342 unlock_page(page);
1343 return -EAGAIN;
1344 }
Chris Masonb1bf8622011-02-28 09:52:08 -05001345 }
1346 return 0;
1347}
1348
1349/*
Miao Xie376cc682013-12-10 19:25:04 +08001350 * this just gets pages into the page cache and locks them down.
Chris Mason39279cc2007-06-12 06:35:45 -04001351 */
Miao Xieb37392e2013-12-10 19:25:03 +08001352static noinline int prepare_pages(struct inode *inode, struct page **pages,
1353 size_t num_pages, loff_t pos,
1354 size_t write_bytes, bool force_uptodate)
Chris Mason39279cc2007-06-12 06:35:45 -04001355{
1356 int i;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001357 unsigned long index = pos >> PAGE_SHIFT;
Josef Bacik3b16a4e2011-09-21 15:05:58 -04001358 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
Filipe David Borba Mananafc28b622013-12-13 19:39:34 +00001359 int err = 0;
Miao Xie376cc682013-12-10 19:25:04 +08001360 int faili;
Chris Mason8c2383c2007-06-18 09:57:58 -04001361
Chris Mason39279cc2007-06-12 06:35:45 -04001362 for (i = 0; i < num_pages; i++) {
Chris Masonbb1591b42015-12-14 15:40:44 -08001363again:
Josef Bacika94733d2011-07-11 10:47:06 -04001364 pages[i] = find_or_create_page(inode->i_mapping, index + i,
Johannes Weinere3a41a52012-01-10 15:07:55 -08001365 mask | __GFP_WRITE);
Chris Mason39279cc2007-06-12 06:35:45 -04001366 if (!pages[i]) {
Chris Masonb1bf8622011-02-28 09:52:08 -05001367 faili = i - 1;
1368 err = -ENOMEM;
1369 goto fail;
1370 }
1371
Qu Wenruo32443de2021-01-26 16:34:00 +08001372 err = set_page_extent_mapped(pages[i]);
1373 if (err < 0) {
1374 faili = i;
1375 goto fail;
1376 }
1377
Chris Masonb1bf8622011-02-28 09:52:08 -05001378 if (i == 0)
Chris Masonbb1591b42015-12-14 15:40:44 -08001379 err = prepare_uptodate_page(inode, pages[i], pos,
Josef Bacikb63164292011-09-30 15:23:54 -04001380 force_uptodate);
Chris Masonbb1591b42015-12-14 15:40:44 -08001381 if (!err && i == num_pages - 1)
1382 err = prepare_uptodate_page(inode, pages[i],
Josef Bacikb63164292011-09-30 15:23:54 -04001383 pos + write_bytes, false);
Chris Masonb1bf8622011-02-28 09:52:08 -05001384 if (err) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001385 put_page(pages[i]);
Chris Masonbb1591b42015-12-14 15:40:44 -08001386 if (err == -EAGAIN) {
1387 err = 0;
1388 goto again;
1389 }
Chris Masonb1bf8622011-02-28 09:52:08 -05001390 faili = i - 1;
1391 goto fail;
Chris Mason39279cc2007-06-12 06:35:45 -04001392 }
Chris Masonccd467d2007-06-28 15:57:36 -04001393 wait_on_page_writeback(pages[i]);
Chris Mason39279cc2007-06-12 06:35:45 -04001394 }
Chris Masone6dcd2d2008-07-17 12:53:50 -04001395
Chris Mason39279cc2007-06-12 06:35:45 -04001396 return 0;
Chris Masonb1bf8622011-02-28 09:52:08 -05001397fail:
1398 while (faili >= 0) {
1399 unlock_page(pages[faili]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001400 put_page(pages[faili]);
Chris Masonb1bf8622011-02-28 09:52:08 -05001401 faili--;
1402 }
1403 return err;
1404
Chris Mason39279cc2007-06-12 06:35:45 -04001405}
1406
Miao Xie376cc682013-12-10 19:25:04 +08001407/*
1408 * This function locks the extent and properly waits for data=ordered extents
1409 * to finish before allowing the pages to be modified if need.
1410 *
1411 * The return value:
1412 * 1 - the extent is locked
1413 * 0 - the extent is not locked, and everything is OK
1414 * -EAGAIN - need re-prepare the pages
1415 * the other < 0 number - Something wrong happens
1416 */
1417static noinline int
Nikolay Borisov2cff578c2017-02-20 13:50:51 +02001418lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
Miao Xie376cc682013-12-10 19:25:04 +08001419 size_t num_pages, loff_t pos,
Chandan Rajendra2e78c922016-01-21 15:55:53 +05301420 size_t write_bytes,
Miao Xie376cc682013-12-10 19:25:04 +08001421 u64 *lockstart, u64 *lockend,
1422 struct extent_state **cached_state)
1423{
David Sterba3ffbd682018-06-29 10:56:42 +02001424 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Miao Xie376cc682013-12-10 19:25:04 +08001425 u64 start_pos;
1426 u64 last_pos;
1427 int i;
1428 int ret = 0;
1429
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001430 start_pos = round_down(pos, fs_info->sectorsize);
Qu Wenruoe21139c2020-08-13 14:33:52 +08001431 last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
Miao Xie376cc682013-12-10 19:25:04 +08001432
Filipe Mananae3b8a482017-11-04 00:16:59 +00001433 if (start_pos < inode->vfs_inode.i_size) {
Miao Xie376cc682013-12-10 19:25:04 +08001434 struct btrfs_ordered_extent *ordered;
Filipe Mananaa7e3b972017-04-03 10:45:46 +01001435
Nikolay Borisov2cff578c2017-02-20 13:50:51 +02001436 lock_extent_bits(&inode->io_tree, start_pos, last_pos,
1437 cached_state);
Miao Xieb88935b2014-03-06 13:54:58 +08001438 ordered = btrfs_lookup_ordered_range(inode, start_pos,
1439 last_pos - start_pos + 1);
Miao Xie376cc682013-12-10 19:25:04 +08001440 if (ordered &&
Omar Sandovalbffe6332019-12-02 17:34:19 -08001441 ordered->file_offset + ordered->num_bytes > start_pos &&
Miao Xie376cc682013-12-10 19:25:04 +08001442 ordered->file_offset <= last_pos) {
Nikolay Borisov2cff578c2017-02-20 13:50:51 +02001443 unlock_extent_cached(&inode->io_tree, start_pos,
David Sterbae43bbe52017-12-12 21:43:52 +01001444 last_pos, cached_state);
Miao Xie376cc682013-12-10 19:25:04 +08001445 for (i = 0; i < num_pages; i++) {
1446 unlock_page(pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001447 put_page(pages[i]);
Miao Xie376cc682013-12-10 19:25:04 +08001448 }
Nikolay Borisovc0a43602020-09-18 12:15:53 +03001449 btrfs_start_ordered_extent(ordered, 1);
Miao Xieb88935b2014-03-06 13:54:58 +08001450 btrfs_put_ordered_extent(ordered);
1451 return -EAGAIN;
Miao Xie376cc682013-12-10 19:25:04 +08001452 }
1453 if (ordered)
1454 btrfs_put_ordered_extent(ordered);
Chris Mason7703bdd2018-06-20 07:56:11 -07001455
Miao Xie376cc682013-12-10 19:25:04 +08001456 *lockstart = start_pos;
1457 *lockend = last_pos;
1458 ret = 1;
1459 }
1460
Chris Mason7703bdd2018-06-20 07:56:11 -07001461 /*
Qu Wenruo32443de2021-01-26 16:34:00 +08001462 * We should be called after prepare_pages() which should have locked
1463 * all pages in the range.
Chris Mason7703bdd2018-06-20 07:56:11 -07001464 */
Qu Wenruo32443de2021-01-26 16:34:00 +08001465 for (i = 0; i < num_pages; i++)
Miao Xie376cc682013-12-10 19:25:04 +08001466 WARN_ON(!PageLocked(pages[i]));
Miao Xie376cc682013-12-10 19:25:04 +08001467
1468 return ret;
1469}
1470
Qu Wenruo38d37aa2020-06-24 07:23:52 +08001471static int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
1472 size_t *write_bytes, bool nowait)
Josef Bacik7ee9e442013-06-21 16:37:03 -04001473{
David Sterba3ffbd682018-06-29 10:56:42 +02001474 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Nikolay Borisov85b7ab62017-02-20 13:50:50 +02001475 struct btrfs_root *root = inode->root;
Josef Bacik7ee9e442013-06-21 16:37:03 -04001476 u64 lockstart, lockend;
1477 u64 num_bytes;
1478 int ret;
1479
Qu Wenruo38d37aa2020-06-24 07:23:52 +08001480 if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1481 return 0;
1482
Filipe Manana5dbb75e2020-06-15 18:49:39 +01001483 if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock))
Nikolay Borisov5f791ec2019-05-07 10:23:46 +03001484 return -EAGAIN;
Miao Xie8257b2d2014-03-06 13:38:19 +08001485
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001486 lockstart = round_down(pos, fs_info->sectorsize);
Jeff Mahoneyda170662016-06-15 09:22:56 -04001487 lockend = round_up(pos + *write_bytes,
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001488 fs_info->sectorsize) - 1;
Josef Bacik7ee9e442013-06-21 16:37:03 -04001489 num_bytes = lockend - lockstart + 1;
Filipe Manana5dbb75e2020-06-15 18:49:39 +01001490
1491 if (nowait) {
1492 struct btrfs_ordered_extent *ordered;
1493
1494 if (!try_lock_extent(&inode->io_tree, lockstart, lockend))
1495 return -EAGAIN;
1496
1497 ordered = btrfs_lookup_ordered_range(inode, lockstart,
1498 num_bytes);
1499 if (ordered) {
1500 btrfs_put_ordered_extent(ordered);
1501 ret = -EAGAIN;
1502 goto out_unlock;
1503 }
1504 } else {
1505 btrfs_lock_and_flush_ordered_range(inode, lockstart,
1506 lockend, NULL);
1507 }
1508
Nikolay Borisov85b7ab62017-02-20 13:50:50 +02001509 ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
Boris Burkova84d5d42020-08-18 11:00:05 -07001510 NULL, NULL, NULL, false);
Josef Bacik7ee9e442013-06-21 16:37:03 -04001511 if (ret <= 0) {
1512 ret = 0;
Filipe Manana5dbb75e2020-06-15 18:49:39 +01001513 if (!nowait)
1514 btrfs_drew_write_unlock(&root->snapshot_lock);
Josef Bacik7ee9e442013-06-21 16:37:03 -04001515 } else {
Miao Xiec9339562014-02-27 13:58:04 +08001516 *write_bytes = min_t(size_t, *write_bytes ,
1517 num_bytes - pos + lockstart);
Josef Bacik7ee9e442013-06-21 16:37:03 -04001518 }
Filipe Manana5dbb75e2020-06-15 18:49:39 +01001519out_unlock:
Nikolay Borisov85b7ab62017-02-20 13:50:50 +02001520 unlock_extent(&inode->io_tree, lockstart, lockend);
Josef Bacik7ee9e442013-06-21 16:37:03 -04001521
1522 return ret;
1523}
1524
Qu Wenruo38d37aa2020-06-24 07:23:52 +08001525static int check_nocow_nolock(struct btrfs_inode *inode, loff_t pos,
1526 size_t *write_bytes)
1527{
1528 return check_can_nocow(inode, pos, write_bytes, true);
1529}
1530
1531/*
1532 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1533 *
1534 * @pos: File offset
1535 * @write_bytes: The length to write, will be updated to the nocow writeable
1536 * range
1537 *
1538 * This function will flush ordered extents in the range to ensure proper
1539 * nocow checks.
1540 *
1541 * Return:
1542 * >0 and update @write_bytes if we can do nocow write
1543 * 0 if we can't do nocow write
1544 * -EAGAIN if we can't get the needed lock or there are ordered extents
1545 * for * (nowait == true) case
1546 * <0 if other error happened
1547 *
1548 * NOTE: Callers need to release the lock by btrfs_check_nocow_unlock().
1549 */
1550int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1551 size_t *write_bytes)
1552{
1553 return check_can_nocow(inode, pos, write_bytes, false);
1554}
1555
1556void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1557{
1558 btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1559}
1560
Goldwyn Rodriguesb8d8e1f2020-09-24 11:39:15 -05001561static void update_time_for_write(struct inode *inode)
1562{
1563 struct timespec64 now;
1564
1565 if (IS_NOCMTIME(inode))
1566 return;
1567
1568 now = current_time(inode);
1569 if (!timespec64_equal(&inode->i_mtime, &now))
1570 inode->i_mtime = now;
1571
1572 if (!timespec64_equal(&inode->i_ctime, &now))
1573 inode->i_ctime = now;
1574
1575 if (IS_I_VERSION(inode))
1576 inode_inc_iversion(inode);
1577}
1578
1579static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
1580 size_t count)
1581{
1582 struct file *file = iocb->ki_filp;
1583 struct inode *inode = file_inode(file);
1584 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1585 loff_t pos = iocb->ki_pos;
1586 int ret;
1587 loff_t oldsize;
1588 loff_t start_pos;
1589
1590 if (iocb->ki_flags & IOCB_NOWAIT) {
1591 size_t nocow_bytes = count;
1592
1593 /* We will allocate space in case nodatacow is not set, so bail */
1594 if (check_nocow_nolock(BTRFS_I(inode), pos, &nocow_bytes) <= 0)
1595 return -EAGAIN;
1596 /*
1597 * There are holes in the range or parts of the range that must
1598 * be COWed (shared extents, RO block groups, etc), so just bail
1599 * out.
1600 */
1601 if (nocow_bytes < count)
1602 return -EAGAIN;
1603 }
1604
1605 current->backing_dev_info = inode_to_bdi(inode);
1606 ret = file_remove_privs(file);
1607 if (ret)
1608 return ret;
1609
1610 /*
1611 * We reserve space for updating the inode when we reserve space for the
1612 * extent we are going to write, so we will enospc out there. We don't
1613 * need to start yet another transaction to update the inode as we will
1614 * update the inode when we finish writing whatever data we write.
1615 */
1616 update_time_for_write(inode);
1617
1618 start_pos = round_down(pos, fs_info->sectorsize);
1619 oldsize = i_size_read(inode);
1620 if (start_pos > oldsize) {
1621 /* Expand hole size to cover write data, preventing empty gap */
1622 loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
1623
Nikolay Borisovb06359a2020-11-02 16:49:04 +02001624 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
Goldwyn Rodriguesb8d8e1f2020-09-24 11:39:15 -05001625 if (ret) {
1626 current->backing_dev_info = NULL;
1627 return ret;
1628 }
1629 }
1630
1631 return 0;
1632}
1633
Goldwyn Rodriguese4af4002018-06-17 12:39:47 -05001634static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1635 struct iov_iter *i)
Josef Bacik4b46fce2010-05-23 11:00:55 -04001636{
Goldwyn Rodriguese4af4002018-06-17 12:39:47 -05001637 struct file *file = iocb->ki_filp;
Goldwyn Rodriguesc3523702020-09-24 11:39:17 -05001638 loff_t pos;
Al Viro496ad9a2013-01-23 17:07:38 -05001639 struct inode *inode = file_inode(file);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001640 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Josef Bacik11c65dc2010-05-23 11:07:21 -04001641 struct page **pages = NULL;
Qu Wenruo364ecf32017-02-27 15:10:38 +08001642 struct extent_changeset *data_reserved = NULL;
Josef Bacik7ee9e442013-06-21 16:37:03 -04001643 u64 release_bytes = 0;
Miao Xie376cc682013-12-10 19:25:04 +08001644 u64 lockstart;
1645 u64 lockend;
Josef Bacikd0215f32011-01-25 14:57:24 -05001646 size_t num_written = 0;
1647 int nrptrs;
Goldwyn Rodriguesc3523702020-09-24 11:39:17 -05001648 ssize_t ret;
Josef Bacik7ee9e442013-06-21 16:37:03 -04001649 bool only_release_metadata = false;
Josef Bacikb63164292011-09-30 15:23:54 -04001650 bool force_page_uptodate = false;
Goldwyn Rodrigues5e8b9ef2020-09-24 11:39:13 -05001651 loff_t old_isize = i_size_read(inode);
Goldwyn Rodriguesc3523702020-09-24 11:39:17 -05001652 unsigned int ilock_flags = 0;
Chris Masoncb843a62008-10-03 12:30:02 -04001653
Goldwyn Rodriguesc3523702020-09-24 11:39:17 -05001654 if (iocb->ki_flags & IOCB_NOWAIT)
1655 ilock_flags |= BTRFS_ILOCK_TRY;
1656
1657 ret = btrfs_inode_lock(inode, ilock_flags);
1658 if (ret < 0)
1659 return ret;
1660
1661 ret = generic_write_checks(iocb, i);
1662 if (ret <= 0)
1663 goto out;
1664
1665 ret = btrfs_write_check(iocb, i, ret);
1666 if (ret < 0)
1667 goto out;
1668
1669 pos = iocb->ki_pos;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001670 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1671 PAGE_SIZE / (sizeof(struct page *)));
Wu Fengguang142349f2011-12-16 12:32:57 -05001672 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1673 nrptrs = max(nrptrs, 8);
David Sterba31e818f2015-02-20 18:00:26 +01001674 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
Goldwyn Rodriguesc3523702020-09-24 11:39:17 -05001675 if (!pages) {
1676 ret = -ENOMEM;
1677 goto out;
1678 }
Chris Masonab93dbe2009-10-01 12:29:10 -04001679
Josef Bacikd0215f32011-01-25 14:57:24 -05001680 while (iov_iter_count(i) > 0) {
Filipe Mananac67d9702019-09-30 10:20:25 +01001681 struct extent_state *cached_state = NULL;
Johannes Thumshirn70730172018-12-05 15:23:03 +01001682 size_t offset = offset_in_page(pos);
Chandan Rajendra2e78c922016-01-21 15:55:53 +05301683 size_t sector_offset;
Josef Bacikd0215f32011-01-25 14:57:24 -05001684 size_t write_bytes = min(iov_iter_count(i),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001685 nrptrs * (size_t)PAGE_SIZE -
Chris Mason8c2383c2007-06-18 09:57:58 -04001686 offset);
Goldwyn Rodrigueseefa45f52020-09-25 15:36:38 -05001687 size_t num_pages;
Josef Bacik7ee9e442013-06-21 16:37:03 -04001688 size_t reserve_bytes;
Josef Bacikd0215f32011-01-25 14:57:24 -05001689 size_t dirty_pages;
1690 size_t copied;
Chandan Rajendra2e78c922016-01-21 15:55:53 +05301691 size_t dirty_sectors;
1692 size_t num_sectors;
Goldwyn Rodrigues79f015f2017-10-16 05:43:21 -05001693 int extents_locked;
Chris Mason39279cc2007-06-12 06:35:45 -04001694
Xin Zhong914ee292010-12-09 09:30:14 +00001695 /*
1696 * Fault pages before locking them in prepare_pages
1697 * to avoid recursive lock
1698 */
Josef Bacikd0215f32011-01-25 14:57:24 -05001699 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
Xin Zhong914ee292010-12-09 09:30:14 +00001700 ret = -EFAULT;
Josef Bacikd0215f32011-01-25 14:57:24 -05001701 break;
Xin Zhong914ee292010-12-09 09:30:14 +00001702 }
1703
Filipe Mananaa0e248b2019-10-11 16:41:20 +01001704 only_release_metadata = false;
Jeff Mahoneyda170662016-06-15 09:22:56 -04001705 sector_offset = pos & (fs_info->sectorsize - 1);
Qu Wenruod9d8b2a2015-09-08 17:22:43 +08001706
Qu Wenruo364ecf32017-02-27 15:10:38 +08001707 extent_changeset_release(data_reserved);
Nikolay Borisov36ea6f32020-06-03 08:55:41 +03001708 ret = btrfs_check_data_free_space(BTRFS_I(inode),
1709 &data_reserved, pos,
Qu Wenruo364ecf32017-02-27 15:10:38 +08001710 write_bytes);
Josef Bacikc6887cd2016-03-25 13:26:00 -04001711 if (ret < 0) {
Goldwyn Rodrigueseefa45f52020-09-25 15:36:38 -05001712 /*
1713 * If we don't have to COW at the offset, reserve
1714 * metadata only. write_bytes may get smaller than
1715 * requested here.
1716 */
Qu Wenruo38d37aa2020-06-24 07:23:52 +08001717 if (btrfs_check_nocow_lock(BTRFS_I(inode), pos,
Goldwyn Rodrigueseefa45f52020-09-25 15:36:38 -05001718 &write_bytes) > 0)
Josef Bacikc6887cd2016-03-25 13:26:00 -04001719 only_release_metadata = true;
Goldwyn Rodrigueseefa45f52020-09-25 15:36:38 -05001720 else
Josef Bacikc6887cd2016-03-25 13:26:00 -04001721 break;
Josef Bacik7ee9e442013-06-21 16:37:03 -04001722 }
Zhao Lei4da2e262016-01-06 18:24:43 +08001723
Goldwyn Rodrigueseefa45f52020-09-25 15:36:38 -05001724 num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
1725 WARN_ON(num_pages > nrptrs);
1726 reserve_bytes = round_up(write_bytes + sector_offset,
1727 fs_info->sectorsize);
Josef Bacik8b62f872017-10-19 14:15:55 -04001728 WARN_ON(reserve_bytes == 0);
Nikolay Borisov9f3db422017-02-20 13:50:41 +02001729 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1730 reserve_bytes);
Josef Bacik7ee9e442013-06-21 16:37:03 -04001731 if (ret) {
1732 if (!only_release_metadata)
Nikolay Borisov25ce28c2020-06-03 08:55:39 +03001733 btrfs_free_reserved_data_space(BTRFS_I(inode),
Qu Wenruobc42bda2017-02-27 15:10:39 +08001734 data_reserved, pos,
1735 write_bytes);
Miao Xie8257b2d2014-03-06 13:38:19 +08001736 else
Qu Wenruo38d37aa2020-06-24 07:23:52 +08001737 btrfs_check_nocow_unlock(BTRFS_I(inode));
Josef Bacik7ee9e442013-06-21 16:37:03 -04001738 break;
1739 }
1740
1741 release_bytes = reserve_bytes;
Miao Xie376cc682013-12-10 19:25:04 +08001742again:
Josef Bacik4a640012011-01-25 15:10:08 -05001743 /*
1744 * This is going to setup the pages array with the number of
1745 * pages we want, so we don't really need to worry about the
1746 * contents of pages from loop to loop
1747 */
Miao Xieb37392e2013-12-10 19:25:03 +08001748 ret = prepare_pages(inode, pages, num_pages,
1749 pos, write_bytes,
Josef Bacikb63164292011-09-30 15:23:54 -04001750 force_page_uptodate);
Josef Bacik8b62f872017-10-19 14:15:55 -04001751 if (ret) {
1752 btrfs_delalloc_release_extents(BTRFS_I(inode),
Qu Wenruo8702ba92019-10-14 14:34:51 +08001753 reserve_bytes);
Josef Bacikd0215f32011-01-25 14:57:24 -05001754 break;
Josef Bacik8b62f872017-10-19 14:15:55 -04001755 }
Chris Mason39279cc2007-06-12 06:35:45 -04001756
Goldwyn Rodrigues79f015f2017-10-16 05:43:21 -05001757 extents_locked = lock_and_cleanup_extent_if_need(
1758 BTRFS_I(inode), pages,
Nikolay Borisov2cff578c2017-02-20 13:50:51 +02001759 num_pages, pos, write_bytes, &lockstart,
1760 &lockend, &cached_state);
Goldwyn Rodrigues79f015f2017-10-16 05:43:21 -05001761 if (extents_locked < 0) {
1762 if (extents_locked == -EAGAIN)
Miao Xie376cc682013-12-10 19:25:04 +08001763 goto again;
Josef Bacik8b62f872017-10-19 14:15:55 -04001764 btrfs_delalloc_release_extents(BTRFS_I(inode),
Qu Wenruo8702ba92019-10-14 14:34:51 +08001765 reserve_bytes);
Goldwyn Rodrigues79f015f2017-10-16 05:43:21 -05001766 ret = extents_locked;
Miao Xie376cc682013-12-10 19:25:04 +08001767 break;
Miao Xie376cc682013-12-10 19:25:04 +08001768 }
1769
Zhao Leiee22f0c2016-01-06 18:47:31 +08001770 copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
Chris Masonb1bf8622011-02-28 09:52:08 -05001771
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001772 num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
Chris Mason56244ef2016-05-16 09:21:01 -07001773 dirty_sectors = round_up(copied + sector_offset,
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001774 fs_info->sectorsize);
1775 dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
Chris Mason56244ef2016-05-16 09:21:01 -07001776
Chris Masonb1bf8622011-02-28 09:52:08 -05001777 /*
1778 * if we have trouble faulting in the pages, fall
1779 * back to one page at a time
1780 */
1781 if (copied < write_bytes)
1782 nrptrs = 1;
1783
Josef Bacikb63164292011-09-30 15:23:54 -04001784 if (copied == 0) {
1785 force_page_uptodate = true;
Chris Mason56244ef2016-05-16 09:21:01 -07001786 dirty_sectors = 0;
Chris Masonb1bf8622011-02-28 09:52:08 -05001787 dirty_pages = 0;
Josef Bacikb63164292011-09-30 15:23:54 -04001788 } else {
1789 force_page_uptodate = false;
David Sterbaed6078f2014-06-05 01:59:57 +02001790 dirty_pages = DIV_ROUND_UP(copied + offset,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001791 PAGE_SIZE);
Josef Bacikb63164292011-09-30 15:23:54 -04001792 }
Xin Zhong914ee292010-12-09 09:30:14 +00001793
Chandan Rajendra2e78c922016-01-21 15:55:53 +05301794 if (num_sectors > dirty_sectors) {
Chris Mason8b8b08c2016-07-19 05:52:36 -07001795 /* release everything except the sectors we dirtied */
David Sterba265fdfa2020-07-01 21:19:09 +02001796 release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
Qu Wenruo485290a2015-10-29 17:28:46 +08001797 if (only_release_metadata) {
Nikolay Borisov691fa052017-02-20 13:50:42 +02001798 btrfs_delalloc_release_metadata(BTRFS_I(inode),
Qu Wenruo43b18592017-12-12 15:34:32 +08001799 release_bytes, true);
Qu Wenruo485290a2015-10-29 17:28:46 +08001800 } else {
1801 u64 __pos;
1802
Jeff Mahoneyda170662016-06-15 09:22:56 -04001803 __pos = round_down(pos,
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001804 fs_info->sectorsize) +
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001805 (dirty_pages << PAGE_SHIFT);
Nikolay Borisov86d52922020-06-03 08:55:40 +03001806 btrfs_delalloc_release_space(BTRFS_I(inode),
Qu Wenruobc42bda2017-02-27 15:10:39 +08001807 data_reserved, __pos,
Qu Wenruo43b18592017-12-12 15:34:32 +08001808 release_bytes, true);
Qu Wenruo485290a2015-10-29 17:28:46 +08001809 }
Xin Zhong914ee292010-12-09 09:30:14 +00001810 }
1811
Chandan Rajendra2e78c922016-01-21 15:55:53 +05301812 release_bytes = round_up(copied + sector_offset,
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001813 fs_info->sectorsize);
Miao Xie376cc682013-12-10 19:25:04 +08001814
Goldwyn Rodriguesaa8c1a42020-10-14 09:55:45 -05001815 ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
1816 dirty_pages, pos, copied,
1817 &cached_state, only_release_metadata);
Filipe Mananac67d9702019-09-30 10:20:25 +01001818
1819 /*
1820 * If we have not locked the extent range, because the range's
1821 * start offset is >= i_size, we might still have a non-NULL
1822 * cached extent state, acquired while marking the extent range
1823 * as delalloc through btrfs_dirty_pages(). Therefore free any
1824 * possible cached extent state to avoid a memory leak.
1825 */
Goldwyn Rodrigues79f015f2017-10-16 05:43:21 -05001826 if (extents_locked)
Miao Xie376cc682013-12-10 19:25:04 +08001827 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
David Sterbae43bbe52017-12-12 21:43:52 +01001828 lockstart, lockend, &cached_state);
Filipe Mananac67d9702019-09-30 10:20:25 +01001829 else
1830 free_extent_state(cached_state);
1831
Qu Wenruo8702ba92019-10-14 14:34:51 +08001832 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
Miao Xief1de9682014-01-09 10:06:10 +08001833 if (ret) {
1834 btrfs_drop_pages(pages, num_pages);
Miao Xie376cc682013-12-10 19:25:04 +08001835 break;
Miao Xief1de9682014-01-09 10:06:10 +08001836 }
Chris Mason39279cc2007-06-12 06:35:45 -04001837
Josef Bacik7ee9e442013-06-21 16:37:03 -04001838 release_bytes = 0;
Miao Xie8257b2d2014-03-06 13:38:19 +08001839 if (only_release_metadata)
Qu Wenruo38d37aa2020-06-24 07:23:52 +08001840 btrfs_check_nocow_unlock(BTRFS_I(inode));
Miao Xie8257b2d2014-03-06 13:38:19 +08001841
Miao Xief1de9682014-01-09 10:06:10 +08001842 btrfs_drop_pages(pages, num_pages);
1843
Josef Bacikd0215f32011-01-25 14:57:24 -05001844 cond_resched();
1845
Namjae Jeond0e1d662012-12-11 16:00:21 -08001846 balance_dirty_pages_ratelimited(inode->i_mapping);
Chris Mason39279cc2007-06-12 06:35:45 -04001847
Xin Zhong914ee292010-12-09 09:30:14 +00001848 pos += copied;
1849 num_written += copied;
Chris Mason39279cc2007-06-12 06:35:45 -04001850 }
Chris Mason5b92ee72008-01-03 13:46:11 -05001851
Chris Mason8c2383c2007-06-18 09:57:58 -04001852 kfree(pages);
Josef Bacikd0215f32011-01-25 14:57:24 -05001853
Josef Bacik7ee9e442013-06-21 16:37:03 -04001854 if (release_bytes) {
Miao Xie8257b2d2014-03-06 13:38:19 +08001855 if (only_release_metadata) {
Qu Wenruo38d37aa2020-06-24 07:23:52 +08001856 btrfs_check_nocow_unlock(BTRFS_I(inode));
Nikolay Borisov691fa052017-02-20 13:50:42 +02001857 btrfs_delalloc_release_metadata(BTRFS_I(inode),
Qu Wenruo43b18592017-12-12 15:34:32 +08001858 release_bytes, true);
Miao Xie8257b2d2014-03-06 13:38:19 +08001859 } else {
Nikolay Borisov86d52922020-06-03 08:55:40 +03001860 btrfs_delalloc_release_space(BTRFS_I(inode),
1861 data_reserved,
Qu Wenruobc42bda2017-02-27 15:10:39 +08001862 round_down(pos, fs_info->sectorsize),
Qu Wenruo43b18592017-12-12 15:34:32 +08001863 release_bytes, true);
Miao Xie8257b2d2014-03-06 13:38:19 +08001864 }
Josef Bacik7ee9e442013-06-21 16:37:03 -04001865 }
1866
Qu Wenruo364ecf32017-02-27 15:10:38 +08001867 extent_changeset_free(data_reserved);
Goldwyn Rodrigues5e8b9ef2020-09-24 11:39:13 -05001868 if (num_written > 0) {
1869 pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
1870 iocb->ki_pos += num_written;
1871 }
Goldwyn Rodriguesc3523702020-09-24 11:39:17 -05001872out:
1873 btrfs_inode_unlock(inode, ilock_flags);
Josef Bacikd0215f32011-01-25 14:57:24 -05001874 return num_written ? num_written : ret;
1875}
1876
Goldwyn Rodrigues4e4cabe2020-09-24 11:39:12 -05001877static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
1878 const struct iov_iter *iter, loff_t offset)
1879{
1880 const u32 blocksize_mask = fs_info->sectorsize - 1;
1881
1882 if (offset & blocksize_mask)
1883 return -EINVAL;
1884
1885 if (iov_iter_alignment(iter) & blocksize_mask)
1886 return -EINVAL;
1887
1888 return 0;
1889}
1890
1891static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
Josef Bacikd0215f32011-01-25 14:57:24 -05001892{
1893 struct file *file = iocb->ki_filp;
Filipe Manana728404d2014-10-10 09:43:11 +01001894 struct inode *inode = file_inode(file);
Goldwyn Rodrigues4e4cabe2020-09-24 11:39:12 -05001895 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Goldwyn Rodriguesc3523702020-09-24 11:39:17 -05001896 loff_t pos;
Goldwyn Rodrigues4e4cabe2020-09-24 11:39:12 -05001897 ssize_t written = 0;
Josef Bacikd0215f32011-01-25 14:57:24 -05001898 ssize_t written_buffered;
1899 loff_t endbyte;
Goldwyn Rodriguesc3523702020-09-24 11:39:17 -05001900 ssize_t err;
1901 unsigned int ilock_flags = 0;
Goldwyn Rodriguesa42fa642020-09-24 11:39:20 -05001902 struct iomap_dio *dio = NULL;
Josef Bacikd0215f32011-01-25 14:57:24 -05001903
Goldwyn Rodriguesc3523702020-09-24 11:39:17 -05001904 if (iocb->ki_flags & IOCB_NOWAIT)
1905 ilock_flags |= BTRFS_ILOCK_TRY;
1906
Goldwyn Rodriguese9adabb2020-09-24 11:39:18 -05001907 /* If the write DIO is within EOF, use a shared lock */
1908 if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode))
1909 ilock_flags |= BTRFS_ILOCK_SHARED;
1910
1911relock:
Goldwyn Rodriguesc3523702020-09-24 11:39:17 -05001912 err = btrfs_inode_lock(inode, ilock_flags);
1913 if (err < 0)
1914 return err;
1915
1916 err = generic_write_checks(iocb, from);
1917 if (err <= 0) {
1918 btrfs_inode_unlock(inode, ilock_flags);
1919 return err;
1920 }
1921
1922 err = btrfs_write_check(iocb, from, err);
1923 if (err < 0) {
1924 btrfs_inode_unlock(inode, ilock_flags);
1925 goto out;
1926 }
1927
1928 pos = iocb->ki_pos;
Goldwyn Rodriguese9adabb2020-09-24 11:39:18 -05001929 /*
1930 * Re-check since file size may have changed just before taking the
1931 * lock or pos may have changed because of O_APPEND in generic_write_check()
1932 */
1933 if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
1934 pos + iov_iter_count(from) > i_size_read(inode)) {
1935 btrfs_inode_unlock(inode, ilock_flags);
1936 ilock_flags &= ~BTRFS_ILOCK_SHARED;
1937 goto relock;
1938 }
Goldwyn Rodriguesc3523702020-09-24 11:39:17 -05001939
1940 if (check_direct_IO(fs_info, from, pos)) {
1941 btrfs_inode_unlock(inode, ilock_flags);
Goldwyn Rodrigues4e4cabe2020-09-24 11:39:12 -05001942 goto buffered;
Goldwyn Rodriguesc3523702020-09-24 11:39:17 -05001943 }
Goldwyn Rodrigues4e4cabe2020-09-24 11:39:12 -05001944
Christoph Hellwig2f632962021-01-23 10:06:09 -08001945 dio = __iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
1946 0);
Goldwyn Rodrigues4e4cabe2020-09-24 11:39:12 -05001947
Goldwyn Rodriguese9adabb2020-09-24 11:39:18 -05001948 btrfs_inode_unlock(inode, ilock_flags);
Josef Bacikd0215f32011-01-25 14:57:24 -05001949
Goldwyn Rodriguesa42fa642020-09-24 11:39:20 -05001950 if (IS_ERR_OR_NULL(dio)) {
1951 err = PTR_ERR_OR_ZERO(dio);
1952 if (err < 0 && err != -ENOTBLK)
1953 goto out;
1954 } else {
1955 written = iomap_dio_complete(dio);
1956 }
1957
Goldwyn Rodriguesc3523702020-09-24 11:39:17 -05001958 if (written < 0 || !iov_iter_count(from)) {
1959 err = written;
1960 goto out;
1961 }
Josef Bacikd0215f32011-01-25 14:57:24 -05001962
Goldwyn Rodrigues4e4cabe2020-09-24 11:39:12 -05001963buffered:
Goldwyn Rodriguese4af4002018-06-17 12:39:47 -05001964 pos = iocb->ki_pos;
1965 written_buffered = btrfs_buffered_write(iocb, from);
Josef Bacikd0215f32011-01-25 14:57:24 -05001966 if (written_buffered < 0) {
1967 err = written_buffered;
1968 goto out;
1969 }
Filipe Manana075bdbd2014-10-09 21:18:55 +01001970 /*
1971 * Ensure all data is persisted. We want the next direct IO read to be
1972 * able to read what was just written.
1973 */
Josef Bacikd0215f32011-01-25 14:57:24 -05001974 endbyte = pos + written_buffered - 1;
Filipe Manana728404d2014-10-10 09:43:11 +01001975 err = btrfs_fdatawrite_range(inode, pos, endbyte);
Filipe Manana075bdbd2014-10-09 21:18:55 +01001976 if (err)
1977 goto out;
Filipe Manana728404d2014-10-10 09:43:11 +01001978 err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
Josef Bacikd0215f32011-01-25 14:57:24 -05001979 if (err)
1980 goto out;
1981 written += written_buffered;
Al Viro867c4f92014-02-11 19:31:06 -05001982 iocb->ki_pos = pos + written_buffered;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001983 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1984 endbyte >> PAGE_SHIFT);
Josef Bacikd0215f32011-01-25 14:57:24 -05001985out:
1986 return written ? written : err;
1987}
1988
Al Virob30ac0f2014-04-03 14:29:04 -04001989static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1990 struct iov_iter *from)
Josef Bacikd0215f32011-01-25 14:57:24 -05001991{
1992 struct file *file = iocb->ki_filp;
Nikolay Borisov14971652020-12-10 10:38:32 +02001993 struct btrfs_inode *inode = BTRFS_I(file_inode(file));
Josef Bacikd0215f32011-01-25 14:57:24 -05001994 ssize_t num_written = 0;
Omar Sandovalf50cb7a2019-08-15 14:04:03 -07001995 const bool sync = iocb->ki_flags & IOCB_DSYNC;
Josef Bacikd0215f32011-01-25 14:57:24 -05001996
Goldwyn Rodriguesc86537a2020-09-24 11:39:14 -05001997 /*
1998 * If the fs flips readonly due to some impossible error, although we
1999 * have opened a file as writable, we have to stop this write operation
2000 * to ensure consistency.
2001 */
Nikolay Borisov14971652020-12-10 10:38:32 +02002002 if (test_bit(BTRFS_FS_STATE_ERROR, &inode->root->fs_info->fs_state))
Goldwyn Rodriguesc86537a2020-09-24 11:39:14 -05002003 return -EROFS;
2004
Christoph Hellwig91f99432017-08-29 16:13:20 +02002005 if (!(iocb->ki_flags & IOCB_DIRECT) &&
2006 (iocb->ki_flags & IOCB_NOWAIT))
2007 return -EOPNOTSUPP;
2008
Josef Bacikb812ce22012-11-16 13:56:32 -05002009 if (sync)
Nikolay Borisov14971652020-12-10 10:38:32 +02002010 atomic_inc(&inode->sync_writers);
Josef Bacikb812ce22012-11-16 13:56:32 -05002011
Goldwyn Rodriguesecfdc082020-09-24 11:39:21 -05002012 if (iocb->ki_flags & IOCB_DIRECT)
Goldwyn Rodrigues4e4cabe2020-09-24 11:39:12 -05002013 num_written = btrfs_direct_write(iocb, from);
Goldwyn Rodriguesecfdc082020-09-24 11:39:21 -05002014 else
Goldwyn Rodriguese4af4002018-06-17 12:39:47 -05002015 num_written = btrfs_buffered_write(iocb, from);
Josef Bacikd0215f32011-01-25 14:57:24 -05002016
Filipe Mananabc0939f2021-02-23 12:08:48 +00002017 btrfs_set_inode_last_sub_trans(inode);
2018
Christoph Hellwige2592212016-04-07 08:52:01 -07002019 if (num_written > 0)
2020 num_written = generic_write_sync(iocb, num_written);
Miao Xie0a3404d2013-01-28 12:34:55 +00002021
Josef Bacikb812ce22012-11-16 13:56:32 -05002022 if (sync)
Nikolay Borisov14971652020-12-10 10:38:32 +02002023 atomic_dec(&inode->sync_writers);
Goldwyn Rodriguesb8d8e1f2020-09-24 11:39:15 -05002024
Chris Mason39279cc2007-06-12 06:35:45 -04002025 current->backing_dev_info = NULL;
Goldwyn Rodriguesc3523702020-09-24 11:39:17 -05002026 return num_written;
Chris Mason39279cc2007-06-12 06:35:45 -04002027}
2028
Chris Masond3977122009-01-05 21:25:51 -05002029int btrfs_release_file(struct inode *inode, struct file *filp)
Mingminge1b81e62008-05-27 10:55:43 -04002030{
Josef Bacik23b5ec72017-07-24 15:14:25 -04002031 struct btrfs_file_private *private = filp->private_data;
2032
Josef Bacik23b5ec72017-07-24 15:14:25 -04002033 if (private && private->filldir_buf)
2034 kfree(private->filldir_buf);
2035 kfree(private);
2036 filp->private_data = NULL;
2037
Chris Masonf6dc45c2014-08-20 07:15:33 -07002038 /*
Nikolay Borisov1fd40332020-10-01 09:40:39 +03002039 * Set by setattr when we are about to truncate a file from a non-zero
2040 * size to a zero size. This tries to flush down new bytes that may
2041 * have been written if the application were using truncate to replace
2042 * a file in place.
Chris Masonf6dc45c2014-08-20 07:15:33 -07002043 */
Nikolay Borisov1fd40332020-10-01 09:40:39 +03002044 if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
Chris Masonf6dc45c2014-08-20 07:15:33 -07002045 &BTRFS_I(inode)->runtime_flags))
2046 filemap_flush(inode->i_mapping);
Mingminge1b81e62008-05-27 10:55:43 -04002047 return 0;
2048}
2049
Filipe Manana669249e2014-09-02 11:09:58 +01002050static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
2051{
2052 int ret;
Liu Bo343e4fc2017-11-15 16:10:28 -07002053 struct blk_plug plug;
Filipe Manana669249e2014-09-02 11:09:58 +01002054
Liu Bo343e4fc2017-11-15 16:10:28 -07002055 /*
2056 * This is only called in fsync, which would do synchronous writes, so
2057 * a plug can merge adjacent IOs as much as possible. Esp. in case of
2058 * multiple disks using raid profile, a large IO can be split to
2059 * several segments of stripe length (currently 64K).
2060 */
2061 blk_start_plug(&plug);
Filipe Manana669249e2014-09-02 11:09:58 +01002062 atomic_inc(&BTRFS_I(inode)->sync_writers);
Filipe Manana728404d2014-10-10 09:43:11 +01002063 ret = btrfs_fdatawrite_range(inode, start, end);
Filipe Manana669249e2014-09-02 11:09:58 +01002064 atomic_dec(&BTRFS_I(inode)->sync_writers);
Liu Bo343e4fc2017-11-15 16:10:28 -07002065 blk_finish_plug(&plug);
Filipe Manana669249e2014-09-02 11:09:58 +01002066
2067 return ret;
2068}
2069
Chris Masond352ac62008-09-29 15:18:18 -04002070/*
2071 * fsync call for both files and directories. This logs the inode into
2072 * the tree log instead of forcing full commits whenever possible.
2073 *
2074 * It needs to call filemap_fdatawait so that all ordered extent updates are
2075 * in the metadata btree are up to date for copying to the log.
2076 *
2077 * It drops the inode mutex before doing the tree log commit. This is an
2078 * important optimization for directories because holding the mutex prevents
2079 * new operations on the dir while we write to disk.
2080 */
Josef Bacik02c24a82011-07-16 20:44:56 -04002081int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
Chris Mason39279cc2007-06-12 06:35:45 -04002082{
Filipe Mananade17e792016-03-30 19:03:13 -04002083 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +00002084 struct inode *inode = d_inode(dentry);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002085 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Chris Mason39279cc2007-06-12 06:35:45 -04002086 struct btrfs_root *root = BTRFS_I(inode)->root;
Chris Mason39279cc2007-06-12 06:35:45 -04002087 struct btrfs_trans_handle *trans;
Miao Xie8b050d32014-02-20 18:08:58 +08002088 struct btrfs_log_ctx ctx;
Jeff Layton333427a2017-07-06 07:02:31 -04002089 int ret = 0, err;
Filipe Manana48778172020-08-11 12:43:58 +01002090 u64 len;
2091 bool full_sync;
Chris Mason39279cc2007-06-12 06:35:45 -04002092
liubo1abe9b82011-03-24 11:18:59 +00002093 trace_btrfs_sync_file(file, datasync);
Chris Mason257c62e2009-10-13 13:21:08 -04002094
Liu Boebb70442017-11-21 14:35:40 -07002095 btrfs_init_log_ctx(&ctx, inode);
2096
Miao Xie90abccf2012-09-13 04:53:47 -06002097 /*
Filipe Manana48778172020-08-11 12:43:58 +01002098 * Always set the range to a full range, otherwise we can get into
2099 * several problems, from missing file extent items to represent holes
2100 * when not using the NO_HOLES feature, to log tree corruption due to
2101 * races between hole detection during logging and completion of ordered
2102 * extents outside the range, to missing checksums due to ordered extents
2103 * for which we flushed only a subset of their pages.
Filipe Manana95418ed2020-03-09 12:41:05 +00002104 */
Filipe Manana48778172020-08-11 12:43:58 +01002105 start = 0;
2106 end = LLONG_MAX;
2107 len = (u64)LLONG_MAX + 1;
Filipe Manana95418ed2020-03-09 12:41:05 +00002108
2109 /*
Miao Xie90abccf2012-09-13 04:53:47 -06002110 * We write the dirty pages in the range and wait until they complete
2111 * out of the ->i_mutex. If so, we can flush the dirty pages by
Josef Bacik2ab28f32012-10-12 15:27:49 -04002112 * multi-task, and make the performance up. See
2113 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
Miao Xie90abccf2012-09-13 04:53:47 -06002114 */
Filipe Manana669249e2014-09-02 11:09:58 +01002115 ret = start_ordered_ops(inode, start, end);
Miao Xie90abccf2012-09-13 04:53:47 -06002116 if (ret)
Jeff Layton333427a2017-07-06 07:02:31 -04002117 goto out;
Miao Xie90abccf2012-09-13 04:53:47 -06002118
Filipe Manana885f46d2021-02-23 12:08:47 +00002119 btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
Josef Bacikc4951442018-10-12 15:32:32 -04002120
Miao Xie2ecb7922012-09-06 04:04:27 -06002121 atomic_inc(&root->log_batch);
Josef Bacikb5e6c3e2018-05-23 11:58:33 -04002122
Filipe Manana669249e2014-09-02 11:09:58 +01002123 /*
Filipe Manana48778172020-08-11 12:43:58 +01002124 * Always check for the full sync flag while holding the inode's lock,
2125 * to avoid races with other tasks. The flag must be either set all the
2126 * time during logging or always off all the time while logging.
Filipe Manana7af59742020-04-07 11:37:44 +01002127 */
Filipe Manana48778172020-08-11 12:43:58 +01002128 full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2129 &BTRFS_I(inode)->runtime_flags);
Filipe Manana7af59742020-04-07 11:37:44 +01002130
2131 /*
Filipe Manana885f46d2021-02-23 12:08:47 +00002132 * Before we acquired the inode's lock and the mmap lock, someone may
2133 * have dirtied more pages in the target range. We need to make sure
2134 * that writeback for any such pages does not start while we are logging
2135 * the inode, because if it does, any of the following might happen when
2136 * we are not doing a full inode sync:
Filipe Mananaaab15e82018-11-12 10:23:58 +00002137 *
2138 * 1) We log an extent after its writeback finishes but before its
2139 * checksums are added to the csum tree, leading to -EIO errors
2140 * when attempting to read the extent after a log replay.
2141 *
2142 * 2) We can end up logging an extent before its writeback finishes.
2143 * Therefore after the log replay we will have a file extent item
2144 * pointing to an unwritten extent (and no data checksums as well).
2145 *
2146 * So trigger writeback for any eventual new dirty pages and then we
2147 * wait for all ordered extents to complete below.
2148 */
2149 ret = start_ordered_ops(inode, start, end);
2150 if (ret) {
Filipe Manana885f46d2021-02-23 12:08:47 +00002151 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
Filipe Mananaaab15e82018-11-12 10:23:58 +00002152 goto out;
2153 }
2154
2155 /*
Josef Bacikb5e6c3e2018-05-23 11:58:33 -04002156 * We have to do this here to avoid the priority inversion of waiting on
Andrea Gelmini52042d82018-11-28 12:05:13 +01002157 * IO of a lower priority task while holding a transaction open.
Filipe Mananaba0b0842019-10-16 16:28:52 +01002158 *
Filipe Manana48778172020-08-11 12:43:58 +01002159 * For a full fsync we wait for the ordered extents to complete while
2160 * for a fast fsync we wait just for writeback to complete, and then
2161 * attach the ordered extents to the transaction so that a transaction
2162 * commit waits for their completion, to avoid data loss if we fsync,
2163 * the current transaction commits before the ordered extents complete
2164 * and a power failure happens right after that.
Naohiro Aotad8e3fb12021-02-04 19:22:05 +09002165 *
2166 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
2167 * logical address recorded in the ordered extent may change. We need
2168 * to wait for the IO to stabilize the logical address.
Filipe Manana669249e2014-09-02 11:09:58 +01002169 */
Naohiro Aotad8e3fb12021-02-04 19:22:05 +09002170 if (full_sync || btrfs_is_zoned(fs_info)) {
Filipe Manana48778172020-08-11 12:43:58 +01002171 ret = btrfs_wait_ordered_range(inode, start, len);
2172 } else {
2173 /*
2174 * Get our ordered extents as soon as possible to avoid doing
2175 * checksum lookups in the csum tree, and use instead the
2176 * checksums attached to the ordered extents.
2177 */
2178 btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
2179 &ctx.ordered_extents);
2180 ret = filemap_fdatawait_range(inode->i_mapping, start, end);
Josef Bacik0ef8b722013-10-25 16:13:35 -04002181 }
Filipe Manana48778172020-08-11 12:43:58 +01002182
2183 if (ret)
2184 goto out_release_extents;
2185
Miao Xie2ecb7922012-09-06 04:04:27 -06002186 atomic_inc(&root->log_batch);
Chris Mason257c62e2009-10-13 13:21:08 -04002187
Filipe Manana48778172020-08-11 12:43:58 +01002188 /*
2189 * If we are doing a fast fsync we can not bail out if the inode's
2190 * last_trans is <= then the last committed transaction, because we only
2191 * update the last_trans of the inode during ordered extent completion,
2192 * and for a fast fsync we don't wait for that, we only wait for the
2193 * writeback to complete.
2194 */
Josef Bacika4abeea2011-04-11 17:25:13 -04002195 smp_mb();
Nikolay Borisov0f8939b2017-01-18 00:31:30 +02002196 if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
Filipe Manana48778172020-08-11 12:43:58 +01002197 (BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed &&
2198 (full_sync || list_empty(&ctx.ordered_extents)))) {
Josef Bacik5dc562c2012-08-17 13:14:17 -04002199 /*
Nicholas D Steeves01327612016-05-19 21:18:45 -04002200 * We've had everything committed since the last time we were
Josef Bacik5dc562c2012-08-17 13:14:17 -04002201 * modified so clear this flag in case it was set for whatever
2202 * reason, it's no longer relevant.
2203 */
2204 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2205 &BTRFS_I(inode)->runtime_flags);
Filipe Manana0596a902016-06-14 14:18:27 +01002206 /*
2207 * An ordered extent might have started before and completed
2208 * already with io errors, in which case the inode was not
2209 * updated and we end up here. So check the inode's mapping
Jeff Layton333427a2017-07-06 07:02:31 -04002210 * for any errors that might have happened since we last
2211 * checked called fsync.
Filipe Manana0596a902016-06-14 14:18:27 +01002212 */
Jeff Layton333427a2017-07-06 07:02:31 -04002213 ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
Filipe Manana48778172020-08-11 12:43:58 +01002214 goto out_release_extents;
Josef Bacik15ee9bc2007-08-10 16:22:09 -04002215 }
Josef Bacik15ee9bc2007-08-10 16:22:09 -04002216
2217 /*
Josef Bacik5039edd2014-01-15 13:34:13 -05002218 * We use start here because we will need to wait on the IO to complete
2219 * in btrfs_sync_log, which could require joining a transaction (for
2220 * example checking cross references in the nocow path). If we use join
2221 * here we could get into a situation where we're waiting on IO to
2222 * happen that is blocked on a transaction trying to commit. With start
2223 * we inc the extwriter counter, so we wait for all extwriters to exit
Andrea Gelmini52042d82018-11-28 12:05:13 +01002224 * before we start blocking joiners. This comment is to keep somebody
Josef Bacik5039edd2014-01-15 13:34:13 -05002225 * from thinking they are super smart and changing this to
2226 * btrfs_join_transaction *cough*Josef*cough*.
2227 */
Yan, Zhenga22285a2010-05-16 10:48:46 -04002228 trans = btrfs_start_transaction(root, 0);
2229 if (IS_ERR(trans)) {
2230 ret = PTR_ERR(trans);
Filipe Manana48778172020-08-11 12:43:58 +01002231 goto out_release_extents;
Chris Mason39279cc2007-06-12 06:35:45 -04002232 }
Filipe Mananad0c2f4f2021-01-27 10:35:00 +00002233 trans->in_fsync = true;
Chris Masone02119d2008-09-05 16:13:11 -04002234
Filipe Manana48778172020-08-11 12:43:58 +01002235 ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
2236 btrfs_release_log_ctx_extents(&ctx);
Josef Bacik02c24a82011-07-16 20:44:56 -04002237 if (ret < 0) {
Filipe David Borba Mananaa0634be2013-09-11 20:36:44 +01002238 /* Fallthrough and commit/free transaction. */
2239 ret = 1;
Josef Bacik02c24a82011-07-16 20:44:56 -04002240 }
Chris Mason49eb7e42008-09-11 15:53:12 -04002241
2242 /* we've logged all the items and now have a consistent
2243 * version of the file in the log. It is possible that
2244 * someone will come in and modify the file, but that's
2245 * fine because the log is consistent on disk, and we
2246 * have references to all of the file's extents
2247 *
2248 * It is possible that someone will come in and log the
2249 * file again, but that will end up using the synchronization
2250 * inside btrfs_sync_log to keep things safe.
2251 */
Filipe Manana885f46d2021-02-23 12:08:47 +00002252 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
Chris Mason49eb7e42008-09-11 15:53:12 -04002253
Chris Mason257c62e2009-10-13 13:21:08 -04002254 if (ret != BTRFS_NO_LOG_SYNC) {
Josef Bacik0ef8b722013-10-25 16:13:35 -04002255 if (!ret) {
Miao Xie8b050d32014-02-20 18:08:58 +08002256 ret = btrfs_sync_log(trans, root, &ctx);
Josef Bacik0ef8b722013-10-25 16:13:35 -04002257 if (!ret) {
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04002258 ret = btrfs_end_transaction(trans);
Josef Bacik0ef8b722013-10-25 16:13:35 -04002259 goto out;
Josef Bacik2ab28f32012-10-12 15:27:49 -04002260 }
Chris Mason257c62e2009-10-13 13:21:08 -04002261 }
Filipe Manana48778172020-08-11 12:43:58 +01002262 if (!full_sync) {
2263 ret = btrfs_wait_ordered_range(inode, start, len);
2264 if (ret) {
2265 btrfs_end_transaction(trans);
2266 goto out;
2267 }
2268 }
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04002269 ret = btrfs_commit_transaction(trans);
Chris Mason257c62e2009-10-13 13:21:08 -04002270 } else {
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04002271 ret = btrfs_end_transaction(trans);
Chris Masone02119d2008-09-05 16:13:11 -04002272 }
Chris Mason39279cc2007-06-12 06:35:45 -04002273out:
Liu Boebb70442017-11-21 14:35:40 -07002274 ASSERT(list_empty(&ctx.list));
Jeff Layton333427a2017-07-06 07:02:31 -04002275 err = file_check_and_advance_wb_err(file);
2276 if (!ret)
2277 ret = err;
Roel Kluin014e4ac2010-01-29 10:42:11 +00002278 return ret > 0 ? -EIO : ret;
Filipe Manana48778172020-08-11 12:43:58 +01002279
2280out_release_extents:
2281 btrfs_release_log_ctx_extents(&ctx);
Filipe Manana885f46d2021-02-23 12:08:47 +00002282 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
Filipe Manana48778172020-08-11 12:43:58 +01002283 goto out;
Chris Mason39279cc2007-06-12 06:35:45 -04002284}
2285
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +04002286static const struct vm_operations_struct btrfs_file_vm_ops = {
Chris Mason92fee662007-07-25 12:31:35 -04002287 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07002288 .map_pages = filemap_map_pages,
Chris Mason9ebefb182007-06-15 13:50:00 -04002289 .page_mkwrite = btrfs_page_mkwrite,
2290};
2291
2292static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
2293{
Miao Xie058a4572010-05-20 07:21:50 +00002294 struct address_space *mapping = filp->f_mapping;
2295
2296 if (!mapping->a_ops->readpage)
2297 return -ENOEXEC;
2298
Chris Mason9ebefb182007-06-15 13:50:00 -04002299 file_accessed(filp);
Miao Xie058a4572010-05-20 07:21:50 +00002300 vma->vm_ops = &btrfs_file_vm_ops;
Miao Xie058a4572010-05-20 07:21:50 +00002301
Chris Mason9ebefb182007-06-15 13:50:00 -04002302 return 0;
2303}
2304
Nikolay Borisov35339c22017-02-20 13:50:46 +02002305static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
Josef Bacik2aaa6652012-08-29 14:27:18 -04002306 int slot, u64 start, u64 end)
2307{
2308 struct btrfs_file_extent_item *fi;
2309 struct btrfs_key key;
2310
2311 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2312 return 0;
2313
2314 btrfs_item_key_to_cpu(leaf, &key, slot);
Nikolay Borisov35339c22017-02-20 13:50:46 +02002315 if (key.objectid != btrfs_ino(inode) ||
Josef Bacik2aaa6652012-08-29 14:27:18 -04002316 key.type != BTRFS_EXTENT_DATA_KEY)
2317 return 0;
2318
2319 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2320
2321 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2322 return 0;
2323
2324 if (btrfs_file_extent_disk_bytenr(leaf, fi))
2325 return 0;
2326
2327 if (key.offset == end)
2328 return 1;
2329 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2330 return 1;
2331 return 0;
2332}
2333
Nikolay Borisova012a742017-02-20 13:50:47 +02002334static int fill_holes(struct btrfs_trans_handle *trans,
2335 struct btrfs_inode *inode,
2336 struct btrfs_path *path, u64 offset, u64 end)
Josef Bacik2aaa6652012-08-29 14:27:18 -04002337{
David Sterba3ffbd682018-06-29 10:56:42 +02002338 struct btrfs_fs_info *fs_info = trans->fs_info;
Nikolay Borisova012a742017-02-20 13:50:47 +02002339 struct btrfs_root *root = inode->root;
Josef Bacik2aaa6652012-08-29 14:27:18 -04002340 struct extent_buffer *leaf;
2341 struct btrfs_file_extent_item *fi;
2342 struct extent_map *hole_em;
Nikolay Borisova012a742017-02-20 13:50:47 +02002343 struct extent_map_tree *em_tree = &inode->extent_tree;
Josef Bacik2aaa6652012-08-29 14:27:18 -04002344 struct btrfs_key key;
2345 int ret;
2346
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002347 if (btrfs_fs_incompat(fs_info, NO_HOLES))
Josef Bacik16e75492013-10-22 12:18:51 -04002348 goto out;
2349
Nikolay Borisova012a742017-02-20 13:50:47 +02002350 key.objectid = btrfs_ino(inode);
Josef Bacik2aaa6652012-08-29 14:27:18 -04002351 key.type = BTRFS_EXTENT_DATA_KEY;
2352 key.offset = offset;
2353
Josef Bacik2aaa6652012-08-29 14:27:18 -04002354 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
Josef Bacikf94480b2016-11-14 14:06:22 -05002355 if (ret <= 0) {
2356 /*
2357 * We should have dropped this offset, so if we find it then
2358 * something has gone horribly wrong.
2359 */
2360 if (ret == 0)
2361 ret = -EINVAL;
Josef Bacik2aaa6652012-08-29 14:27:18 -04002362 return ret;
Josef Bacikf94480b2016-11-14 14:06:22 -05002363 }
Josef Bacik2aaa6652012-08-29 14:27:18 -04002364
2365 leaf = path->nodes[0];
Nikolay Borisova012a742017-02-20 13:50:47 +02002366 if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
Josef Bacik2aaa6652012-08-29 14:27:18 -04002367 u64 num_bytes;
2368
2369 path->slots[0]--;
2370 fi = btrfs_item_ptr(leaf, path->slots[0],
2371 struct btrfs_file_extent_item);
2372 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2373 end - offset;
2374 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2375 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2376 btrfs_set_file_extent_offset(leaf, fi, 0);
2377 btrfs_mark_buffer_dirty(leaf);
2378 goto out;
2379 }
2380
chandan1707e262014-07-01 12:04:28 +05302381 if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
Josef Bacik2aaa6652012-08-29 14:27:18 -04002382 u64 num_bytes;
2383
Josef Bacik2aaa6652012-08-29 14:27:18 -04002384 key.offset = offset;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002385 btrfs_set_item_key_safe(fs_info, path, &key);
Josef Bacik2aaa6652012-08-29 14:27:18 -04002386 fi = btrfs_item_ptr(leaf, path->slots[0],
2387 struct btrfs_file_extent_item);
2388 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2389 offset;
2390 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2391 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2392 btrfs_set_file_extent_offset(leaf, fi, 0);
2393 btrfs_mark_buffer_dirty(leaf);
2394 goto out;
2395 }
2396 btrfs_release_path(path);
2397
Nikolay Borisova012a742017-02-20 13:50:47 +02002398 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
David Sterbaf85b7372017-01-20 14:54:07 +01002399 offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
Josef Bacik2aaa6652012-08-29 14:27:18 -04002400 if (ret)
2401 return ret;
2402
2403out:
2404 btrfs_release_path(path);
2405
2406 hole_em = alloc_extent_map();
2407 if (!hole_em) {
2408 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
Nikolay Borisova012a742017-02-20 13:50:47 +02002409 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
Josef Bacik2aaa6652012-08-29 14:27:18 -04002410 } else {
2411 hole_em->start = offset;
2412 hole_em->len = end - offset;
Josef Bacikcc95bef2013-04-04 14:31:27 -04002413 hole_em->ram_bytes = hole_em->len;
Josef Bacik2aaa6652012-08-29 14:27:18 -04002414 hole_em->orig_start = offset;
2415
2416 hole_em->block_start = EXTENT_MAP_HOLE;
2417 hole_em->block_len = 0;
Josef Bacikb4939682012-12-03 10:31:19 -05002418 hole_em->orig_block_len = 0;
Josef Bacik2aaa6652012-08-29 14:27:18 -04002419 hole_em->compress_type = BTRFS_COMPRESS_NONE;
2420 hole_em->generation = trans->transid;
2421
2422 do {
2423 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2424 write_lock(&em_tree->lock);
Josef Bacik09a2a8f92013-04-05 16:51:15 -04002425 ret = add_extent_mapping(em_tree, hole_em, 1);
Josef Bacik2aaa6652012-08-29 14:27:18 -04002426 write_unlock(&em_tree->lock);
2427 } while (ret == -EEXIST);
2428 free_extent_map(hole_em);
2429 if (ret)
2430 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
Nikolay Borisova012a742017-02-20 13:50:47 +02002431 &inode->runtime_flags);
Josef Bacik2aaa6652012-08-29 14:27:18 -04002432 }
2433
2434 return 0;
2435}
2436
Qu Wenruod7781542014-05-30 15:16:10 +08002437/*
2438 * Find a hole extent on given inode and change start/len to the end of hole
2439 * extent.(hole/vacuum extent whose em->start <= start &&
2440 * em->start + em->len > start)
2441 * When a hole extent is found, return 1 and modify start/len.
2442 */
Nikolay Borisovdea46d82020-11-02 16:49:01 +02002443static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
Qu Wenruod7781542014-05-30 15:16:10 +08002444{
Nikolay Borisovdea46d82020-11-02 16:49:01 +02002445 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Qu Wenruod7781542014-05-30 15:16:10 +08002446 struct extent_map *em;
2447 int ret = 0;
2448
Nikolay Borisovdea46d82020-11-02 16:49:01 +02002449 em = btrfs_get_extent(inode, NULL, 0,
Filipe Manana609805d2017-05-30 05:29:09 +01002450 round_down(*start, fs_info->sectorsize),
Omar Sandoval39b07b52019-12-02 17:34:23 -08002451 round_up(*len, fs_info->sectorsize));
Dan Carpenter99862772017-04-11 11:57:15 +03002452 if (IS_ERR(em))
2453 return PTR_ERR(em);
Qu Wenruod7781542014-05-30 15:16:10 +08002454
2455 /* Hole or vacuum extent(only exists in no-hole mode) */
2456 if (em->block_start == EXTENT_MAP_HOLE) {
2457 ret = 1;
2458 *len = em->start + em->len > *start + *len ?
2459 0 : *start + *len - em->start - em->len;
2460 *start = em->start + em->len;
2461 }
2462 free_extent_map(em);
2463 return ret;
2464}
2465
Filipe Mananaf27451f2017-10-25 11:55:28 +01002466static int btrfs_punch_hole_lock_range(struct inode *inode,
2467 const u64 lockstart,
2468 const u64 lockend,
2469 struct extent_state **cached_state)
2470{
2471 while (1) {
2472 struct btrfs_ordered_extent *ordered;
2473 int ret;
2474
2475 truncate_pagecache_range(inode, lockstart, lockend);
2476
2477 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2478 cached_state);
Nikolay Borisov6d072c82020-08-31 14:42:39 +03002479 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode),
2480 lockend);
Filipe Mananaf27451f2017-10-25 11:55:28 +01002481
2482 /*
2483 * We need to make sure we have no ordered extents in this range
2484 * and nobody raced in and read a page in this range, if we did
2485 * we need to try again.
2486 */
2487 if ((!ordered ||
Omar Sandovalbffe6332019-12-02 17:34:19 -08002488 (ordered->file_offset + ordered->num_bytes <= lockstart ||
Filipe Mananaf27451f2017-10-25 11:55:28 +01002489 ordered->file_offset > lockend)) &&
David Sterba051c98e2018-03-07 15:33:22 +01002490 !filemap_range_has_page(inode->i_mapping,
2491 lockstart, lockend)) {
Filipe Mananaf27451f2017-10-25 11:55:28 +01002492 if (ordered)
2493 btrfs_put_ordered_extent(ordered);
2494 break;
2495 }
2496 if (ordered)
2497 btrfs_put_ordered_extent(ordered);
2498 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2499 lockend, cached_state);
2500 ret = btrfs_wait_ordered_range(inode, lockstart,
2501 lockend - lockstart + 1);
2502 if (ret)
2503 return ret;
2504 }
2505 return 0;
2506}
2507
Filipe Manana0cbb5bd2020-09-08 11:27:24 +01002508static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
Nikolay Borisov03fcb1a2020-11-02 16:49:02 +02002509 struct btrfs_inode *inode,
Filipe Manana690a5db2019-07-05 11:09:50 +01002510 struct btrfs_path *path,
Filipe Mananabf385642020-09-08 11:27:22 +01002511 struct btrfs_replace_extent_info *extent_info,
Filipe Manana2766ff62020-11-04 11:07:34 +00002512 const u64 replace_len,
2513 const u64 bytes_to_drop)
Filipe Manana690a5db2019-07-05 11:09:50 +01002514{
Nikolay Borisov03fcb1a2020-11-02 16:49:02 +02002515 struct btrfs_fs_info *fs_info = trans->fs_info;
2516 struct btrfs_root *root = inode->root;
Filipe Manana690a5db2019-07-05 11:09:50 +01002517 struct btrfs_file_extent_item *extent;
2518 struct extent_buffer *leaf;
2519 struct btrfs_key key;
2520 int slot;
2521 struct btrfs_ref ref = { 0 };
Filipe Manana690a5db2019-07-05 11:09:50 +01002522 int ret;
2523
Filipe Mananabf385642020-09-08 11:27:22 +01002524 if (replace_len == 0)
Filipe Manana690a5db2019-07-05 11:09:50 +01002525 return 0;
2526
Filipe Mananabf385642020-09-08 11:27:22 +01002527 if (extent_info->disk_offset == 0 &&
Filipe Manana2766ff62020-11-04 11:07:34 +00002528 btrfs_fs_incompat(fs_info, NO_HOLES)) {
Nikolay Borisov03fcb1a2020-11-02 16:49:02 +02002529 btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
Filipe Manana690a5db2019-07-05 11:09:50 +01002530 return 0;
Filipe Manana2766ff62020-11-04 11:07:34 +00002531 }
Filipe Manana690a5db2019-07-05 11:09:50 +01002532
Nikolay Borisov03fcb1a2020-11-02 16:49:02 +02002533 key.objectid = btrfs_ino(inode);
Filipe Manana690a5db2019-07-05 11:09:50 +01002534 key.type = BTRFS_EXTENT_DATA_KEY;
Filipe Mananabf385642020-09-08 11:27:22 +01002535 key.offset = extent_info->file_offset;
Filipe Manana690a5db2019-07-05 11:09:50 +01002536 ret = btrfs_insert_empty_item(trans, root, path, &key,
Filipe Mananafb870f62020-09-08 11:27:21 +01002537 sizeof(struct btrfs_file_extent_item));
Filipe Manana690a5db2019-07-05 11:09:50 +01002538 if (ret)
2539 return ret;
2540 leaf = path->nodes[0];
2541 slot = path->slots[0];
Filipe Mananabf385642020-09-08 11:27:22 +01002542 write_extent_buffer(leaf, extent_info->extent_buf,
Filipe Manana690a5db2019-07-05 11:09:50 +01002543 btrfs_item_ptr_offset(leaf, slot),
Filipe Mananafb870f62020-09-08 11:27:21 +01002544 sizeof(struct btrfs_file_extent_item));
Filipe Manana690a5db2019-07-05 11:09:50 +01002545 extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
Filipe Mananafb870f62020-09-08 11:27:21 +01002546 ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
Filipe Mananabf385642020-09-08 11:27:22 +01002547 btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
2548 btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
2549 if (extent_info->is_new_extent)
Filipe Manana8fccebf2020-09-08 11:27:20 +01002550 btrfs_set_file_extent_generation(leaf, extent, trans->transid);
Filipe Manana690a5db2019-07-05 11:09:50 +01002551 btrfs_mark_buffer_dirty(leaf);
2552 btrfs_release_path(path);
2553
Nikolay Borisov03fcb1a2020-11-02 16:49:02 +02002554 ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
2555 replace_len);
Josef Bacik9ddc9592020-01-17 09:02:22 -05002556 if (ret)
2557 return ret;
2558
Filipe Manana690a5db2019-07-05 11:09:50 +01002559 /* If it's a hole, nothing more needs to be done. */
Filipe Manana2766ff62020-11-04 11:07:34 +00002560 if (extent_info->disk_offset == 0) {
Nikolay Borisov03fcb1a2020-11-02 16:49:02 +02002561 btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
Filipe Manana690a5db2019-07-05 11:09:50 +01002562 return 0;
Filipe Manana2766ff62020-11-04 11:07:34 +00002563 }
Filipe Manana690a5db2019-07-05 11:09:50 +01002564
Nikolay Borisov03fcb1a2020-11-02 16:49:02 +02002565 btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
Filipe Manana8fccebf2020-09-08 11:27:20 +01002566
Filipe Mananabf385642020-09-08 11:27:22 +01002567 if (extent_info->is_new_extent && extent_info->insertions == 0) {
2568 key.objectid = extent_info->disk_offset;
Filipe Manana8fccebf2020-09-08 11:27:20 +01002569 key.type = BTRFS_EXTENT_ITEM_KEY;
Filipe Mananabf385642020-09-08 11:27:22 +01002570 key.offset = extent_info->disk_len;
Filipe Manana8fccebf2020-09-08 11:27:20 +01002571 ret = btrfs_alloc_reserved_file_extent(trans, root,
Nikolay Borisov03fcb1a2020-11-02 16:49:02 +02002572 btrfs_ino(inode),
Filipe Mananabf385642020-09-08 11:27:22 +01002573 extent_info->file_offset,
2574 extent_info->qgroup_reserved,
Filipe Manana8fccebf2020-09-08 11:27:20 +01002575 &key);
2576 } else {
2577 u64 ref_offset;
2578
2579 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
Filipe Mananabf385642020-09-08 11:27:22 +01002580 extent_info->disk_offset,
2581 extent_info->disk_len, 0);
2582 ref_offset = extent_info->file_offset - extent_info->data_offset;
Filipe Manana8fccebf2020-09-08 11:27:20 +01002583 btrfs_init_data_ref(&ref, root->root_key.objectid,
Nikolay Borisov03fcb1a2020-11-02 16:49:02 +02002584 btrfs_ino(inode), ref_offset);
Filipe Manana8fccebf2020-09-08 11:27:20 +01002585 ret = btrfs_inc_extent_ref(trans, &ref);
2586 }
2587
Filipe Mananabf385642020-09-08 11:27:22 +01002588 extent_info->insertions++;
Filipe Manana690a5db2019-07-05 11:09:50 +01002589
2590 return ret;
2591}
2592
Filipe Manana9cba40a2019-06-28 23:11:26 +01002593/*
2594 * The respective range must have been previously locked, as well as the inode.
2595 * The end offset is inclusive (last byte of the range).
Filipe Mananabf385642020-09-08 11:27:22 +01002596 * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
2597 * the file range with an extent.
2598 * When not punching a hole, we don't want to end up in a state where we dropped
2599 * extents without inserting a new one, so we must abort the transaction to avoid
2600 * a corruption.
Filipe Manana9cba40a2019-06-28 23:11:26 +01002601 */
Nikolay Borisovbfc78472021-02-17 15:12:47 +02002602int btrfs_replace_file_extents(struct btrfs_inode *inode,
2603 struct btrfs_path *path, const u64 start,
2604 const u64 end,
2605 struct btrfs_replace_extent_info *extent_info,
2606 struct btrfs_trans_handle **trans_out)
Filipe Manana9cba40a2019-06-28 23:11:26 +01002607{
Filipe Manana5893dfb2020-11-04 11:07:32 +00002608 struct btrfs_drop_extents_args drop_args = { 0 };
Nikolay Borisovbfc78472021-02-17 15:12:47 +02002609 struct btrfs_root *root = inode->root;
2610 struct btrfs_fs_info *fs_info = root->fs_info;
Josef Bacik2bd36e72019-08-22 15:14:33 -04002611 u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
Nikolay Borisovbfc78472021-02-17 15:12:47 +02002612 u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
Filipe Manana9cba40a2019-06-28 23:11:26 +01002613 struct btrfs_trans_handle *trans = NULL;
2614 struct btrfs_block_rsv *rsv;
2615 unsigned int rsv_count;
2616 u64 cur_offset;
Filipe Manana9cba40a2019-06-28 23:11:26 +01002617 u64 len = end - start;
2618 int ret = 0;
2619
2620 if (end <= start)
2621 return -EINVAL;
2622
2623 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2624 if (!rsv) {
2625 ret = -ENOMEM;
2626 goto out;
2627 }
Josef Bacik2bd36e72019-08-22 15:14:33 -04002628 rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
Filipe Manana9cba40a2019-06-28 23:11:26 +01002629 rsv->failfast = 1;
2630
2631 /*
2632 * 1 - update the inode
2633 * 1 - removing the extents in the range
Filipe Mananabf385642020-09-08 11:27:22 +01002634 * 1 - adding the hole extent if no_holes isn't set or if we are
2635 * replacing the range with a new extent
Filipe Manana9cba40a2019-06-28 23:11:26 +01002636 */
Filipe Mananabf385642020-09-08 11:27:22 +01002637 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
Filipe Manana690a5db2019-07-05 11:09:50 +01002638 rsv_count = 3;
2639 else
2640 rsv_count = 2;
2641
Filipe Manana9cba40a2019-06-28 23:11:26 +01002642 trans = btrfs_start_transaction(root, rsv_count);
2643 if (IS_ERR(trans)) {
2644 ret = PTR_ERR(trans);
2645 trans = NULL;
2646 goto out_free;
2647 }
2648
2649 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2650 min_size, false);
2651 BUG_ON(ret);
2652 trans->block_rsv = rsv;
2653
2654 cur_offset = start;
Filipe Manana5893dfb2020-11-04 11:07:32 +00002655 drop_args.path = path;
2656 drop_args.end = end + 1;
2657 drop_args.drop_cache = true;
Filipe Manana9cba40a2019-06-28 23:11:26 +01002658 while (cur_offset < end) {
Filipe Manana5893dfb2020-11-04 11:07:32 +00002659 drop_args.start = cur_offset;
Nikolay Borisovbfc78472021-02-17 15:12:47 +02002660 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
Filipe Manana2766ff62020-11-04 11:07:34 +00002661 /* If we are punching a hole decrement the inode's byte count */
2662 if (!extent_info)
Nikolay Borisovbfc78472021-02-17 15:12:47 +02002663 btrfs_update_inode_bytes(inode, 0,
Filipe Manana2766ff62020-11-04 11:07:34 +00002664 drop_args.bytes_found);
Filipe Manana690a5db2019-07-05 11:09:50 +01002665 if (ret != -ENOSPC) {
2666 /*
2667 * When cloning we want to avoid transaction aborts when
2668 * nothing was done and we are attempting to clone parts
2669 * of inline extents, in such cases -EOPNOTSUPP is
2670 * returned by __btrfs_drop_extents() without having
2671 * changed anything in the file.
2672 */
Filipe Mananabf385642020-09-08 11:27:22 +01002673 if (extent_info && !extent_info->is_new_extent &&
Filipe Manana8fccebf2020-09-08 11:27:20 +01002674 ret && ret != -EOPNOTSUPP)
Filipe Manana690a5db2019-07-05 11:09:50 +01002675 btrfs_abort_transaction(trans, ret);
Filipe Manana9cba40a2019-06-28 23:11:26 +01002676 break;
Filipe Manana690a5db2019-07-05 11:09:50 +01002677 }
Filipe Manana9cba40a2019-06-28 23:11:26 +01002678
2679 trans->block_rsv = &fs_info->trans_block_rsv;
2680
Filipe Manana5893dfb2020-11-04 11:07:32 +00002681 if (!extent_info && cur_offset < drop_args.drop_end &&
Filipe Manana690a5db2019-07-05 11:09:50 +01002682 cur_offset < ino_size) {
Nikolay Borisovbfc78472021-02-17 15:12:47 +02002683 ret = fill_holes(trans, inode, path, cur_offset,
2684 drop_args.drop_end);
Filipe Manana9cba40a2019-06-28 23:11:26 +01002685 if (ret) {
2686 /*
2687 * If we failed then we didn't insert our hole
2688 * entries for the area we dropped, so now the
2689 * fs is corrupted, so we must abort the
2690 * transaction.
2691 */
2692 btrfs_abort_transaction(trans, ret);
2693 break;
2694 }
Filipe Manana5893dfb2020-11-04 11:07:32 +00002695 } else if (!extent_info && cur_offset < drop_args.drop_end) {
Josef Bacik9ddc9592020-01-17 09:02:22 -05002696 /*
2697 * We are past the i_size here, but since we didn't
2698 * insert holes we need to clear the mapped area so we
2699 * know to not set disk_i_size in this area until a new
2700 * file extent is inserted here.
2701 */
Nikolay Borisovbfc78472021-02-17 15:12:47 +02002702 ret = btrfs_inode_clear_file_extent_range(inode,
Filipe Manana5893dfb2020-11-04 11:07:32 +00002703 cur_offset,
2704 drop_args.drop_end - cur_offset);
Josef Bacik9ddc9592020-01-17 09:02:22 -05002705 if (ret) {
2706 /*
2707 * We couldn't clear our area, so we could
2708 * presumably adjust up and corrupt the fs, so
2709 * we need to abort.
2710 */
2711 btrfs_abort_transaction(trans, ret);
2712 break;
2713 }
Filipe Manana9cba40a2019-06-28 23:11:26 +01002714 }
2715
Filipe Manana5893dfb2020-11-04 11:07:32 +00002716 if (extent_info &&
2717 drop_args.drop_end > extent_info->file_offset) {
2718 u64 replace_len = drop_args.drop_end -
2719 extent_info->file_offset;
Filipe Manana690a5db2019-07-05 11:09:50 +01002720
Nikolay Borisovbfc78472021-02-17 15:12:47 +02002721 ret = btrfs_insert_replace_extent(trans, inode, path,
2722 extent_info, replace_len,
Nikolay Borisov03fcb1a2020-11-02 16:49:02 +02002723 drop_args.bytes_found);
Filipe Manana690a5db2019-07-05 11:09:50 +01002724 if (ret) {
2725 btrfs_abort_transaction(trans, ret);
2726 break;
2727 }
Filipe Mananabf385642020-09-08 11:27:22 +01002728 extent_info->data_len -= replace_len;
2729 extent_info->data_offset += replace_len;
2730 extent_info->file_offset += replace_len;
Filipe Manana690a5db2019-07-05 11:09:50 +01002731 }
2732
Nikolay Borisovbfc78472021-02-17 15:12:47 +02002733 ret = btrfs_update_inode(trans, root, inode);
Filipe Manana9cba40a2019-06-28 23:11:26 +01002734 if (ret)
2735 break;
2736
2737 btrfs_end_transaction(trans);
2738 btrfs_btree_balance_dirty(fs_info);
2739
2740 trans = btrfs_start_transaction(root, rsv_count);
2741 if (IS_ERR(trans)) {
2742 ret = PTR_ERR(trans);
2743 trans = NULL;
2744 break;
2745 }
2746
2747 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2748 rsv, min_size, false);
2749 BUG_ON(ret); /* shouldn't happen */
2750 trans->block_rsv = rsv;
2751
BingJing Chang32277882021-03-25 09:56:22 +08002752 cur_offset = drop_args.drop_end;
2753 len = end - cur_offset;
2754 if (!extent_info && len) {
Nikolay Borisovbfc78472021-02-17 15:12:47 +02002755 ret = find_first_non_hole(inode, &cur_offset, &len);
Filipe Manana690a5db2019-07-05 11:09:50 +01002756 if (unlikely(ret < 0))
2757 break;
2758 if (ret && !len) {
2759 ret = 0;
2760 break;
2761 }
Filipe Manana9cba40a2019-06-28 23:11:26 +01002762 }
2763 }
2764
Filipe Manana690a5db2019-07-05 11:09:50 +01002765 /*
2766 * If we were cloning, force the next fsync to be a full one since we
2767 * we replaced (or just dropped in the case of cloning holes when
Filipe Mananae2b84212021-03-26 13:14:41 +00002768 * NO_HOLES is enabled) file extent items and did not setup new extent
2769 * maps for the replacement extents (or holes).
Filipe Manana690a5db2019-07-05 11:09:50 +01002770 */
Filipe Mananabf385642020-09-08 11:27:22 +01002771 if (extent_info && !extent_info->is_new_extent)
Nikolay Borisovbfc78472021-02-17 15:12:47 +02002772 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
Filipe Manana690a5db2019-07-05 11:09:50 +01002773
Filipe Manana9cba40a2019-06-28 23:11:26 +01002774 if (ret)
2775 goto out_trans;
2776
2777 trans->block_rsv = &fs_info->trans_block_rsv;
2778 /*
2779 * If we are using the NO_HOLES feature we might have had already an
2780 * hole that overlaps a part of the region [lockstart, lockend] and
2781 * ends at (or beyond) lockend. Since we have no file extent items to
2782 * represent holes, drop_end can be less than lockend and so we must
2783 * make sure we have an extent map representing the existing hole (the
2784 * call to __btrfs_drop_extents() might have dropped the existing extent
2785 * map representing the existing hole), otherwise the fast fsync path
2786 * will not record the existence of the hole region
2787 * [existing_hole_start, lockend].
2788 */
Filipe Manana5893dfb2020-11-04 11:07:32 +00002789 if (drop_args.drop_end <= end)
2790 drop_args.drop_end = end + 1;
Filipe Manana9cba40a2019-06-28 23:11:26 +01002791 /*
2792 * Don't insert file hole extent item if it's for a range beyond eof
2793 * (because it's useless) or if it represents a 0 bytes range (when
2794 * cur_offset == drop_end).
2795 */
Filipe Manana5893dfb2020-11-04 11:07:32 +00002796 if (!extent_info && cur_offset < ino_size &&
2797 cur_offset < drop_args.drop_end) {
Nikolay Borisovbfc78472021-02-17 15:12:47 +02002798 ret = fill_holes(trans, inode, path, cur_offset,
2799 drop_args.drop_end);
Filipe Manana9cba40a2019-06-28 23:11:26 +01002800 if (ret) {
2801 /* Same comment as above. */
2802 btrfs_abort_transaction(trans, ret);
2803 goto out_trans;
2804 }
Filipe Manana5893dfb2020-11-04 11:07:32 +00002805 } else if (!extent_info && cur_offset < drop_args.drop_end) {
Josef Bacik9ddc9592020-01-17 09:02:22 -05002806 /* See the comment in the loop above for the reasoning here. */
Nikolay Borisovbfc78472021-02-17 15:12:47 +02002807 ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
2808 drop_args.drop_end - cur_offset);
Josef Bacik9ddc9592020-01-17 09:02:22 -05002809 if (ret) {
2810 btrfs_abort_transaction(trans, ret);
2811 goto out_trans;
2812 }
2813
Filipe Manana9cba40a2019-06-28 23:11:26 +01002814 }
Filipe Mananabf385642020-09-08 11:27:22 +01002815 if (extent_info) {
Nikolay Borisovbfc78472021-02-17 15:12:47 +02002816 ret = btrfs_insert_replace_extent(trans, inode, path,
Nikolay Borisov03fcb1a2020-11-02 16:49:02 +02002817 extent_info, extent_info->data_len,
2818 drop_args.bytes_found);
Filipe Manana690a5db2019-07-05 11:09:50 +01002819 if (ret) {
2820 btrfs_abort_transaction(trans, ret);
2821 goto out_trans;
2822 }
2823 }
Filipe Manana9cba40a2019-06-28 23:11:26 +01002824
2825out_trans:
2826 if (!trans)
2827 goto out_free;
2828
2829 trans->block_rsv = &fs_info->trans_block_rsv;
2830 if (ret)
2831 btrfs_end_transaction(trans);
2832 else
2833 *trans_out = trans;
2834out_free:
2835 btrfs_free_block_rsv(fs_info, rsv);
2836out:
2837 return ret;
2838}
2839
Josef Bacik2aaa6652012-08-29 14:27:18 -04002840static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2841{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002842 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Josef Bacik2aaa6652012-08-29 14:27:18 -04002843 struct btrfs_root *root = BTRFS_I(inode)->root;
2844 struct extent_state *cached_state = NULL;
2845 struct btrfs_path *path;
Filipe Manana9cba40a2019-06-28 23:11:26 +01002846 struct btrfs_trans_handle *trans = NULL;
Qu Wenruod7781542014-05-30 15:16:10 +08002847 u64 lockstart;
2848 u64 lockend;
2849 u64 tail_start;
2850 u64 tail_len;
2851 u64 orig_start = offset;
Josef Bacik2aaa6652012-08-29 14:27:18 -04002852 int ret = 0;
Chandan Rajendra9703fef2016-01-21 15:55:56 +05302853 bool same_block;
Filipe Mananaa1a50f62014-04-26 01:35:31 +01002854 u64 ino_size;
Chandan Rajendra9703fef2016-01-21 15:55:56 +05302855 bool truncated_block = false;
Filipe Mananae8c1c762015-02-15 22:38:54 +00002856 bool updated_inode = false;
Josef Bacik2aaa6652012-08-29 14:27:18 -04002857
Josef Bacik0ef8b722013-10-25 16:13:35 -04002858 ret = btrfs_wait_ordered_range(inode, offset, len);
2859 if (ret)
2860 return ret;
Josef Bacik2aaa6652012-08-29 14:27:18 -04002861
Josef Bacik8d9b4a12021-02-10 17:14:36 -05002862 btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002863 ino_size = round_up(inode->i_size, fs_info->sectorsize);
Nikolay Borisovdea46d82020-11-02 16:49:01 +02002864 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
Qu Wenruod7781542014-05-30 15:16:10 +08002865 if (ret < 0)
2866 goto out_only_mutex;
2867 if (ret && !len) {
2868 /* Already in a large hole */
2869 ret = 0;
2870 goto out_only_mutex;
2871 }
2872
Nikolay Borisov6fee2482020-08-31 14:42:42 +03002873 lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode)));
Qu Wenruod7781542014-05-30 15:16:10 +08002874 lockend = round_down(offset + len,
Nikolay Borisov6fee2482020-08-31 14:42:42 +03002875 btrfs_inode_sectorsize(BTRFS_I(inode))) - 1;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002876 same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2877 == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
Miao Xie7426cc02012-12-05 10:54:52 +00002878 /*
Chandan Rajendra9703fef2016-01-21 15:55:56 +05302879 * We needn't truncate any block which is beyond the end of the file
Miao Xie7426cc02012-12-05 10:54:52 +00002880 * because we are sure there is no data there.
2881 */
Josef Bacik2aaa6652012-08-29 14:27:18 -04002882 /*
Chandan Rajendra9703fef2016-01-21 15:55:56 +05302883 * Only do this if we are in the same block and we aren't doing the
2884 * entire block.
Josef Bacik2aaa6652012-08-29 14:27:18 -04002885 */
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002886 if (same_block && len < fs_info->sectorsize) {
Filipe Mananae8c1c762015-02-15 22:38:54 +00002887 if (offset < ino_size) {
Chandan Rajendra9703fef2016-01-21 15:55:56 +05302888 truncated_block = true;
Nikolay Borisov217f42e2020-11-02 16:49:03 +02002889 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2890 0);
Filipe Mananae8c1c762015-02-15 22:38:54 +00002891 } else {
2892 ret = 0;
2893 }
Qu Wenruod7781542014-05-30 15:16:10 +08002894 goto out_only_mutex;
Josef Bacik2aaa6652012-08-29 14:27:18 -04002895 }
2896
Chandan Rajendra9703fef2016-01-21 15:55:56 +05302897 /* zero back part of the first block */
Filipe Manana12870f12014-02-15 15:55:58 +00002898 if (offset < ino_size) {
Chandan Rajendra9703fef2016-01-21 15:55:56 +05302899 truncated_block = true;
Nikolay Borisov217f42e2020-11-02 16:49:03 +02002900 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
Miao Xie7426cc02012-12-05 10:54:52 +00002901 if (ret) {
Josef Bacik8d9b4a12021-02-10 17:14:36 -05002902 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
Miao Xie7426cc02012-12-05 10:54:52 +00002903 return ret;
2904 }
Josef Bacik2aaa6652012-08-29 14:27:18 -04002905 }
2906
Qu Wenruod7781542014-05-30 15:16:10 +08002907 /* Check the aligned pages after the first unaligned page,
2908 * if offset != orig_start, which means the first unaligned page
Nicholas D Steeves01327612016-05-19 21:18:45 -04002909 * including several following pages are already in holes,
Qu Wenruod7781542014-05-30 15:16:10 +08002910 * the extra check can be skipped */
2911 if (offset == orig_start) {
2912 /* after truncate page, check hole again */
2913 len = offset + len - lockstart;
2914 offset = lockstart;
Nikolay Borisovdea46d82020-11-02 16:49:01 +02002915 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
Qu Wenruod7781542014-05-30 15:16:10 +08002916 if (ret < 0)
2917 goto out_only_mutex;
2918 if (ret && !len) {
2919 ret = 0;
2920 goto out_only_mutex;
2921 }
2922 lockstart = offset;
2923 }
2924
2925 /* Check the tail unaligned part is in a hole */
2926 tail_start = lockend + 1;
2927 tail_len = offset + len - tail_start;
2928 if (tail_len) {
Nikolay Borisovdea46d82020-11-02 16:49:01 +02002929 ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
Qu Wenruod7781542014-05-30 15:16:10 +08002930 if (unlikely(ret < 0))
2931 goto out_only_mutex;
2932 if (!ret) {
2933 /* zero the front end of the last page */
2934 if (tail_start + tail_len < ino_size) {
Chandan Rajendra9703fef2016-01-21 15:55:56 +05302935 truncated_block = true;
Nikolay Borisov217f42e2020-11-02 16:49:03 +02002936 ret = btrfs_truncate_block(BTRFS_I(inode),
Chandan Rajendra9703fef2016-01-21 15:55:56 +05302937 tail_start + tail_len,
2938 0, 1);
Qu Wenruod7781542014-05-30 15:16:10 +08002939 if (ret)
2940 goto out_only_mutex;
Qu Wenruo51f395a2014-08-08 13:06:20 +08002941 }
Miao Xie00612802012-12-05 10:54:12 +00002942 }
Josef Bacik2aaa6652012-08-29 14:27:18 -04002943 }
2944
2945 if (lockend < lockstart) {
Filipe Mananae8c1c762015-02-15 22:38:54 +00002946 ret = 0;
2947 goto out_only_mutex;
Josef Bacik2aaa6652012-08-29 14:27:18 -04002948 }
2949
Filipe Mananaf27451f2017-10-25 11:55:28 +01002950 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2951 &cached_state);
Josef Bacik8fca9552019-05-03 11:10:06 -04002952 if (ret)
Filipe Mananaf27451f2017-10-25 11:55:28 +01002953 goto out_only_mutex;
Josef Bacik2aaa6652012-08-29 14:27:18 -04002954
2955 path = btrfs_alloc_path();
2956 if (!path) {
2957 ret = -ENOMEM;
2958 goto out;
2959 }
2960
Nikolay Borisovbfc78472021-02-17 15:12:47 +02002961 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart,
2962 lockend, NULL, &trans);
Filipe Manana9cba40a2019-06-28 23:11:26 +01002963 btrfs_free_path(path);
2964 if (ret)
2965 goto out;
Josef Bacik2aaa6652012-08-29 14:27:18 -04002966
Filipe Manana9cba40a2019-06-28 23:11:26 +01002967 ASSERT(trans != NULL);
Tsutomu Itohe1f57902012-11-08 04:47:33 +00002968 inode_inc_iversion(inode);
Deepa Dinamanic2050a42016-09-14 07:48:06 -07002969 inode->i_mtime = inode->i_ctime = current_time(inode);
Nikolay Borisov9a56fcd2020-11-02 16:48:59 +02002970 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
Filipe Mananae8c1c762015-02-15 22:38:54 +00002971 updated_inode = true;
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04002972 btrfs_end_transaction(trans);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04002973 btrfs_btree_balance_dirty(fs_info);
Josef Bacik2aaa6652012-08-29 14:27:18 -04002974out:
2975 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
David Sterbae43bbe52017-12-12 21:43:52 +01002976 &cached_state);
Qu Wenruod7781542014-05-30 15:16:10 +08002977out_only_mutex:
Filipe Manana9cba40a2019-06-28 23:11:26 +01002978 if (!updated_inode && truncated_block && !ret) {
Filipe Mananae8c1c762015-02-15 22:38:54 +00002979 /*
2980 * If we only end up zeroing part of a page, we still need to
2981 * update the inode item, so that all the time fields are
2982 * updated as well as the necessary btrfs inode in memory fields
2983 * for detecting, at fsync time, if the inode isn't yet in the
2984 * log tree or it's there but not up to date.
2985 */
Filipe Manana17900662019-06-19 13:05:50 +01002986 struct timespec64 now = current_time(inode);
2987
2988 inode_inc_iversion(inode);
2989 inode->i_mtime = now;
2990 inode->i_ctime = now;
Filipe Mananae8c1c762015-02-15 22:38:54 +00002991 trans = btrfs_start_transaction(root, 1);
2992 if (IS_ERR(trans)) {
Filipe Manana9cba40a2019-06-28 23:11:26 +01002993 ret = PTR_ERR(trans);
Filipe Mananae8c1c762015-02-15 22:38:54 +00002994 } else {
Filipe Manana9cba40a2019-06-28 23:11:26 +01002995 int ret2;
2996
Nikolay Borisov9a56fcd2020-11-02 16:48:59 +02002997 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
Filipe Manana9cba40a2019-06-28 23:11:26 +01002998 ret2 = btrfs_end_transaction(trans);
2999 if (!ret)
3000 ret = ret2;
Filipe Mananae8c1c762015-02-15 22:38:54 +00003001 }
3002 }
Josef Bacik8d9b4a12021-02-10 17:14:36 -05003003 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
Filipe Manana9cba40a2019-06-28 23:11:26 +01003004 return ret;
Josef Bacik2aaa6652012-08-29 14:27:18 -04003005}
3006
Qu Wenruo14524a82015-09-08 17:22:44 +08003007/* Helper structure to record which range is already reserved */
3008struct falloc_range {
3009 struct list_head list;
3010 u64 start;
3011 u64 len;
3012};
3013
3014/*
3015 * Helper function to add falloc range
3016 *
3017 * Caller should have locked the larger range of extent containing
3018 * [start, len)
3019 */
3020static int add_falloc_range(struct list_head *head, u64 start, u64 len)
3021{
3022 struct falloc_range *prev = NULL;
3023 struct falloc_range *range = NULL;
3024
3025 if (list_empty(head))
3026 goto insert;
3027
3028 /*
3029 * As fallocate iterate by bytenr order, we only need to check
3030 * the last range.
3031 */
3032 prev = list_entry(head->prev, struct falloc_range, list);
3033 if (prev->start + prev->len == start) {
3034 prev->len += len;
3035 return 0;
3036 }
3037insert:
David Sterba32fc9322016-02-11 14:25:38 +01003038 range = kmalloc(sizeof(*range), GFP_KERNEL);
Qu Wenruo14524a82015-09-08 17:22:44 +08003039 if (!range)
3040 return -ENOMEM;
3041 range->start = start;
3042 range->len = len;
3043 list_add_tail(&range->list, head);
3044 return 0;
3045}
3046
Filipe Mananaf27451f2017-10-25 11:55:28 +01003047static int btrfs_fallocate_update_isize(struct inode *inode,
3048 const u64 end,
3049 const int mode)
3050{
3051 struct btrfs_trans_handle *trans;
3052 struct btrfs_root *root = BTRFS_I(inode)->root;
3053 int ret;
3054 int ret2;
3055
3056 if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
3057 return 0;
3058
3059 trans = btrfs_start_transaction(root, 1);
3060 if (IS_ERR(trans))
3061 return PTR_ERR(trans);
3062
3063 inode->i_ctime = current_time(inode);
3064 i_size_write(inode, end);
Nikolay Borisov76aea532020-11-02 16:48:53 +02003065 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
Nikolay Borisov9a56fcd2020-11-02 16:48:59 +02003066 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
Filipe Mananaf27451f2017-10-25 11:55:28 +01003067 ret2 = btrfs_end_transaction(trans);
3068
3069 return ret ? ret : ret2;
3070}
3071
Filipe Manana81fdf632018-01-18 11:34:31 +00003072enum {
David Sterbaf262fa82019-06-18 20:00:08 +02003073 RANGE_BOUNDARY_WRITTEN_EXTENT,
3074 RANGE_BOUNDARY_PREALLOC_EXTENT,
3075 RANGE_BOUNDARY_HOLE,
Filipe Manana81fdf632018-01-18 11:34:31 +00003076};
3077
Nikolay Borisov948dfeb2020-08-31 14:42:48 +03003078static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
Filipe Mananaf27451f2017-10-25 11:55:28 +01003079 u64 offset)
3080{
Nikolay Borisov948dfeb2020-08-31 14:42:48 +03003081 const u64 sectorsize = btrfs_inode_sectorsize(inode);
Filipe Mananaf27451f2017-10-25 11:55:28 +01003082 struct extent_map *em;
Filipe Manana81fdf632018-01-18 11:34:31 +00003083 int ret;
Filipe Mananaf27451f2017-10-25 11:55:28 +01003084
3085 offset = round_down(offset, sectorsize);
Nikolay Borisov948dfeb2020-08-31 14:42:48 +03003086 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize);
Filipe Mananaf27451f2017-10-25 11:55:28 +01003087 if (IS_ERR(em))
3088 return PTR_ERR(em);
3089
3090 if (em->block_start == EXTENT_MAP_HOLE)
Filipe Manana81fdf632018-01-18 11:34:31 +00003091 ret = RANGE_BOUNDARY_HOLE;
3092 else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3093 ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
3094 else
3095 ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
Filipe Mananaf27451f2017-10-25 11:55:28 +01003096
3097 free_extent_map(em);
3098 return ret;
3099}
3100
3101static int btrfs_zero_range(struct inode *inode,
3102 loff_t offset,
3103 loff_t len,
3104 const int mode)
3105{
3106 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3107 struct extent_map *em;
3108 struct extent_changeset *data_reserved = NULL;
3109 int ret;
3110 u64 alloc_hint = 0;
Nikolay Borisov6fee2482020-08-31 14:42:42 +03003111 const u64 sectorsize = btrfs_inode_sectorsize(BTRFS_I(inode));
Filipe Mananaf27451f2017-10-25 11:55:28 +01003112 u64 alloc_start = round_down(offset, sectorsize);
3113 u64 alloc_end = round_up(offset + len, sectorsize);
3114 u64 bytes_to_reserve = 0;
3115 bool space_reserved = false;
3116
3117 inode_dio_wait(inode);
3118
Omar Sandoval39b07b52019-12-02 17:34:23 -08003119 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3120 alloc_end - alloc_start);
Filipe Mananaf27451f2017-10-25 11:55:28 +01003121 if (IS_ERR(em)) {
3122 ret = PTR_ERR(em);
3123 goto out;
3124 }
3125
3126 /*
3127 * Avoid hole punching and extent allocation for some cases. More cases
3128 * could be considered, but these are unlikely common and we keep things
3129 * as simple as possible for now. Also, intentionally, if the target
3130 * range contains one or more prealloc extents together with regular
3131 * extents and holes, we drop all the existing extents and allocate a
3132 * new prealloc extent, so that we get a larger contiguous disk extent.
3133 */
3134 if (em->start <= alloc_start &&
3135 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3136 const u64 em_end = em->start + em->len;
3137
3138 if (em_end >= offset + len) {
3139 /*
3140 * The whole range is already a prealloc extent,
3141 * do nothing except updating the inode's i_size if
3142 * needed.
3143 */
3144 free_extent_map(em);
3145 ret = btrfs_fallocate_update_isize(inode, offset + len,
3146 mode);
3147 goto out;
3148 }
3149 /*
3150 * Part of the range is already a prealloc extent, so operate
3151 * only on the remaining part of the range.
3152 */
3153 alloc_start = em_end;
3154 ASSERT(IS_ALIGNED(alloc_start, sectorsize));
3155 len = offset + len - alloc_start;
3156 offset = alloc_start;
3157 alloc_hint = em->block_start + em->len;
3158 }
3159 free_extent_map(em);
3160
3161 if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
3162 BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
Omar Sandoval39b07b52019-12-02 17:34:23 -08003163 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3164 sectorsize);
Filipe Mananaf27451f2017-10-25 11:55:28 +01003165 if (IS_ERR(em)) {
3166 ret = PTR_ERR(em);
3167 goto out;
3168 }
3169
3170 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3171 free_extent_map(em);
3172 ret = btrfs_fallocate_update_isize(inode, offset + len,
3173 mode);
3174 goto out;
3175 }
3176 if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
3177 free_extent_map(em);
Nikolay Borisov217f42e2020-11-02 16:49:03 +02003178 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
3179 0);
Filipe Mananaf27451f2017-10-25 11:55:28 +01003180 if (!ret)
3181 ret = btrfs_fallocate_update_isize(inode,
3182 offset + len,
3183 mode);
3184 return ret;
3185 }
3186 free_extent_map(em);
3187 alloc_start = round_down(offset, sectorsize);
3188 alloc_end = alloc_start + sectorsize;
3189 goto reserve_space;
3190 }
3191
3192 alloc_start = round_up(offset, sectorsize);
3193 alloc_end = round_down(offset + len, sectorsize);
3194
3195 /*
3196 * For unaligned ranges, check the pages at the boundaries, they might
3197 * map to an extent, in which case we need to partially zero them, or
3198 * they might map to a hole, in which case we need our allocation range
3199 * to cover them.
3200 */
3201 if (!IS_ALIGNED(offset, sectorsize)) {
Nikolay Borisov948dfeb2020-08-31 14:42:48 +03003202 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
3203 offset);
Filipe Mananaf27451f2017-10-25 11:55:28 +01003204 if (ret < 0)
3205 goto out;
Filipe Manana81fdf632018-01-18 11:34:31 +00003206 if (ret == RANGE_BOUNDARY_HOLE) {
Filipe Mananaf27451f2017-10-25 11:55:28 +01003207 alloc_start = round_down(offset, sectorsize);
3208 ret = 0;
Filipe Manana81fdf632018-01-18 11:34:31 +00003209 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
Nikolay Borisov217f42e2020-11-02 16:49:03 +02003210 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
Filipe Mananaf27451f2017-10-25 11:55:28 +01003211 if (ret)
3212 goto out;
Filipe Manana81fdf632018-01-18 11:34:31 +00003213 } else {
3214 ret = 0;
Filipe Mananaf27451f2017-10-25 11:55:28 +01003215 }
3216 }
3217
3218 if (!IS_ALIGNED(offset + len, sectorsize)) {
Nikolay Borisov948dfeb2020-08-31 14:42:48 +03003219 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
Filipe Mananaf27451f2017-10-25 11:55:28 +01003220 offset + len);
3221 if (ret < 0)
3222 goto out;
Filipe Manana81fdf632018-01-18 11:34:31 +00003223 if (ret == RANGE_BOUNDARY_HOLE) {
Filipe Mananaf27451f2017-10-25 11:55:28 +01003224 alloc_end = round_up(offset + len, sectorsize);
3225 ret = 0;
Filipe Manana81fdf632018-01-18 11:34:31 +00003226 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
Nikolay Borisov217f42e2020-11-02 16:49:03 +02003227 ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
3228 0, 1);
Filipe Mananaf27451f2017-10-25 11:55:28 +01003229 if (ret)
3230 goto out;
Filipe Manana81fdf632018-01-18 11:34:31 +00003231 } else {
3232 ret = 0;
Filipe Mananaf27451f2017-10-25 11:55:28 +01003233 }
3234 }
3235
3236reserve_space:
3237 if (alloc_start < alloc_end) {
3238 struct extent_state *cached_state = NULL;
3239 const u64 lockstart = alloc_start;
3240 const u64 lockend = alloc_end - 1;
3241
3242 bytes_to_reserve = alloc_end - alloc_start;
3243 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3244 bytes_to_reserve);
3245 if (ret < 0)
3246 goto out;
3247 space_reserved = true;
Filipe Mananaf27451f2017-10-25 11:55:28 +01003248 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3249 &cached_state);
3250 if (ret)
3251 goto out;
Nikolay Borisov7661a3e2020-06-03 08:55:37 +03003252 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
Qu Wenruoa7f8b1c2020-06-10 09:04:42 +08003253 alloc_start, bytes_to_reserve);
Nikolay Borisov4f6a49d2021-02-23 15:20:42 +02003254 if (ret) {
3255 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3256 lockend, &cached_state);
Qu Wenruoa7f8b1c2020-06-10 09:04:42 +08003257 goto out;
Nikolay Borisov4f6a49d2021-02-23 15:20:42 +02003258 }
Filipe Mananaf27451f2017-10-25 11:55:28 +01003259 ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3260 alloc_end - alloc_start,
3261 i_blocksize(inode),
3262 offset + len, &alloc_hint);
3263 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3264 lockend, &cached_state);
3265 /* btrfs_prealloc_file_range releases reserved space on error */
Filipe Manana9f13ce72018-01-18 11:34:20 +00003266 if (ret) {
Filipe Mananaf27451f2017-10-25 11:55:28 +01003267 space_reserved = false;
Filipe Manana9f13ce72018-01-18 11:34:20 +00003268 goto out;
3269 }
Filipe Mananaf27451f2017-10-25 11:55:28 +01003270 }
Filipe Manana9f13ce72018-01-18 11:34:20 +00003271 ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
Filipe Mananaf27451f2017-10-25 11:55:28 +01003272 out:
3273 if (ret && space_reserved)
Nikolay Borisov25ce28c2020-06-03 08:55:39 +03003274 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
Filipe Mananaf27451f2017-10-25 11:55:28 +01003275 alloc_start, bytes_to_reserve);
3276 extent_changeset_free(data_reserved);
3277
3278 return ret;
3279}
3280
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003281static long btrfs_fallocate(struct file *file, int mode,
3282 loff_t offset, loff_t len)
3283{
Al Viro496ad9a2013-01-23 17:07:38 -05003284 struct inode *inode = file_inode(file);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003285 struct extent_state *cached_state = NULL;
Qu Wenruo364ecf32017-02-27 15:10:38 +08003286 struct extent_changeset *data_reserved = NULL;
Qu Wenruo14524a82015-09-08 17:22:44 +08003287 struct falloc_range *range;
3288 struct falloc_range *tmp;
3289 struct list_head reserve_list;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003290 u64 cur_offset;
3291 u64 last_byte;
3292 u64 alloc_start;
3293 u64 alloc_end;
3294 u64 alloc_hint = 0;
3295 u64 locked_end;
Qu Wenruo14524a82015-09-08 17:22:44 +08003296 u64 actual_end = 0;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003297 struct extent_map *em;
Nikolay Borisov6fee2482020-08-31 14:42:42 +03003298 int blocksize = btrfs_inode_sectorsize(BTRFS_I(inode));
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003299 int ret;
3300
Naohiro Aotaf1569c42020-11-10 20:26:12 +09003301 /* Do not allow fallocate in ZONED mode */
3302 if (btrfs_is_zoned(btrfs_sb(inode->i_sb)))
3303 return -EOPNOTSUPP;
3304
Miao Xie797f4272012-11-28 10:28:07 +00003305 alloc_start = round_down(offset, blocksize);
3306 alloc_end = round_up(offset + len, blocksize);
Wang Xiaoguang18513092016-07-25 15:51:40 +08003307 cur_offset = alloc_start;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003308
Josef Bacik2aaa6652012-08-29 14:27:18 -04003309 /* Make sure we aren't being give some crap mode */
Filipe Mananaf27451f2017-10-25 11:55:28 +01003310 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3311 FALLOC_FL_ZERO_RANGE))
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003312 return -EOPNOTSUPP;
3313
Josef Bacik2aaa6652012-08-29 14:27:18 -04003314 if (mode & FALLOC_FL_PUNCH_HOLE)
3315 return btrfs_punch_hole(inode, offset, len);
3316
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003317 /*
Qu Wenruo14524a82015-09-08 17:22:44 +08003318 * Only trigger disk allocation, don't trigger qgroup reserve
3319 *
3320 * For qgroup space, it will be checked later.
Chris Masond98456f2012-01-31 20:27:41 -05003321 */
Filipe Mananaf27451f2017-10-25 11:55:28 +01003322 if (!(mode & FALLOC_FL_ZERO_RANGE)) {
3323 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3324 alloc_end - alloc_start);
3325 if (ret < 0)
3326 return ret;
3327 }
Chris Masond98456f2012-01-31 20:27:41 -05003328
Josef Bacik8d9b4a12021-02-10 17:14:36 -05003329 btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
Davide Italiano2a162ce2015-04-06 22:09:15 -07003330
3331 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3332 ret = inode_newsize_ok(inode, offset + len);
3333 if (ret)
3334 goto out;
3335 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003336
Qu Wenruo14524a82015-09-08 17:22:44 +08003337 /*
3338 * TODO: Move these two operations after we have checked
3339 * accurate reserved space, or fallocate can still fail but
3340 * with page truncated or size expanded.
3341 *
3342 * But that's a minor problem and won't do much harm BTW.
3343 */
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003344 if (alloc_start > inode->i_size) {
Nikolay Borisovb06359a2020-11-02 16:49:04 +02003345 ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
Josef Bacika41ad392011-01-31 15:30:16 -05003346 alloc_start);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003347 if (ret)
3348 goto out;
Qu Wenruo0f6925f2015-10-14 15:26:13 +08003349 } else if (offset + len > inode->i_size) {
Josef Bacika71754f2013-06-17 17:14:39 -04003350 /*
3351 * If we are fallocating from the end of the file onward we
Chandan Rajendra9703fef2016-01-21 15:55:56 +05303352 * need to zero out the end of the block if i_size lands in the
3353 * middle of a block.
Josef Bacika71754f2013-06-17 17:14:39 -04003354 */
Nikolay Borisov217f42e2020-11-02 16:49:03 +02003355 ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
Josef Bacika71754f2013-06-17 17:14:39 -04003356 if (ret)
3357 goto out;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003358 }
3359
Josef Bacika71754f2013-06-17 17:14:39 -04003360 /*
3361 * wait for ordered IO before we have any locks. We'll loop again
3362 * below with the locks held.
3363 */
Josef Bacik0ef8b722013-10-25 16:13:35 -04003364 ret = btrfs_wait_ordered_range(inode, alloc_start,
3365 alloc_end - alloc_start);
3366 if (ret)
3367 goto out;
Josef Bacika71754f2013-06-17 17:14:39 -04003368
Filipe Mananaf27451f2017-10-25 11:55:28 +01003369 if (mode & FALLOC_FL_ZERO_RANGE) {
3370 ret = btrfs_zero_range(inode, offset, len, mode);
Josef Bacik8d9b4a12021-02-10 17:14:36 -05003371 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
Filipe Mananaf27451f2017-10-25 11:55:28 +01003372 return ret;
3373 }
3374
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003375 locked_end = alloc_end - 1;
3376 while (1) {
3377 struct btrfs_ordered_extent *ordered;
3378
3379 /* the extent lock is ordered inside the running
3380 * transaction
3381 */
3382 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
David Sterbaff13db42015-12-03 14:30:40 +01003383 locked_end, &cached_state);
Nikolay Borisov6d072c82020-08-31 14:42:39 +03003384 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode),
3385 locked_end);
Nikolay Borisov96b09dd2017-11-01 11:36:05 +02003386
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003387 if (ordered &&
Omar Sandovalbffe6332019-12-02 17:34:19 -08003388 ordered->file_offset + ordered->num_bytes > alloc_start &&
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003389 ordered->file_offset < alloc_end) {
3390 btrfs_put_ordered_extent(ordered);
3391 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
3392 alloc_start, locked_end,
David Sterbae43bbe52017-12-12 21:43:52 +01003393 &cached_state);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003394 /*
3395 * we can't wait on the range with the transaction
3396 * running or with the extent lock held
3397 */
Josef Bacik0ef8b722013-10-25 16:13:35 -04003398 ret = btrfs_wait_ordered_range(inode, alloc_start,
3399 alloc_end - alloc_start);
3400 if (ret)
3401 goto out;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003402 } else {
3403 if (ordered)
3404 btrfs_put_ordered_extent(ordered);
3405 break;
3406 }
3407 }
3408
Qu Wenruo14524a82015-09-08 17:22:44 +08003409 /* First, check if we exceed the qgroup limit */
3410 INIT_LIST_HEAD(&reserve_list);
Nikolay Borisov6b7d6e92017-11-01 11:32:18 +02003411 while (cur_offset < alloc_end) {
Nikolay Borisovfc4f21b12017-02-20 13:51:06 +02003412 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
Omar Sandoval39b07b52019-12-02 17:34:23 -08003413 alloc_end - cur_offset);
Dan Carpenter99862772017-04-11 11:57:15 +03003414 if (IS_ERR(em)) {
3415 ret = PTR_ERR(em);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003416 break;
3417 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003418 last_byte = min(extent_map_end(em), alloc_end);
Josef Bacikf1e490a2011-08-18 10:36:39 -04003419 actual_end = min_t(u64, extent_map_end(em), offset + len);
Miao Xie797f4272012-11-28 10:28:07 +00003420 last_byte = ALIGN(last_byte, blocksize);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003421 if (em->block_start == EXTENT_MAP_HOLE ||
3422 (cur_offset >= inode->i_size &&
3423 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
Qu Wenruo14524a82015-09-08 17:22:44 +08003424 ret = add_falloc_range(&reserve_list, cur_offset,
3425 last_byte - cur_offset);
3426 if (ret < 0) {
3427 free_extent_map(em);
3428 break;
Filipe Manana3d850dd2015-03-12 23:23:13 +00003429 }
Nikolay Borisov7661a3e2020-06-03 08:55:37 +03003430 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3431 &data_reserved, cur_offset,
3432 last_byte - cur_offset);
Filipe Mananabe2d2532017-04-03 15:57:17 +01003433 if (ret < 0) {
Robbie Ko39ad3172019-03-26 11:56:11 +08003434 cur_offset = last_byte;
Filipe Mananabe2d2532017-04-03 15:57:17 +01003435 free_extent_map(em);
Qu Wenruo14524a82015-09-08 17:22:44 +08003436 break;
Filipe Mananabe2d2532017-04-03 15:57:17 +01003437 }
Wang Xiaoguang18513092016-07-25 15:51:40 +08003438 } else {
3439 /*
3440 * Do not need to reserve unwritten extent for this
3441 * range, free reserved data space first, otherwise
3442 * it'll result in false ENOSPC error.
3443 */
Nikolay Borisov25ce28c2020-06-03 08:55:39 +03003444 btrfs_free_reserved_data_space(BTRFS_I(inode),
3445 data_reserved, cur_offset,
3446 last_byte - cur_offset);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003447 }
3448 free_extent_map(em);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003449 cur_offset = last_byte;
Qu Wenruo14524a82015-09-08 17:22:44 +08003450 }
3451
3452 /*
3453 * If ret is still 0, means we're OK to fallocate.
3454 * Or just cleanup the list and exit.
3455 */
3456 list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3457 if (!ret)
3458 ret = btrfs_prealloc_file_range(inode, mode,
3459 range->start,
Fabian Frederick93407472017-02-27 14:28:32 -08003460 range->len, i_blocksize(inode),
Qu Wenruo14524a82015-09-08 17:22:44 +08003461 offset + len, &alloc_hint);
Wang Xiaoguang18513092016-07-25 15:51:40 +08003462 else
Nikolay Borisov25ce28c2020-06-03 08:55:39 +03003463 btrfs_free_reserved_data_space(BTRFS_I(inode),
Qu Wenruobc42bda2017-02-27 15:10:39 +08003464 data_reserved, range->start,
3465 range->len);
Qu Wenruo14524a82015-09-08 17:22:44 +08003466 list_del(&range->list);
3467 kfree(range);
3468 }
3469 if (ret < 0)
3470 goto out_unlock;
3471
Filipe Mananaf27451f2017-10-25 11:55:28 +01003472 /*
3473 * We didn't need to allocate any more space, but we still extended the
3474 * size of the file so we need to update i_size and the inode item.
3475 */
3476 ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
Qu Wenruo14524a82015-09-08 17:22:44 +08003477out_unlock:
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003478 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
David Sterbae43bbe52017-12-12 21:43:52 +01003479 &cached_state);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003480out:
Josef Bacik8d9b4a12021-02-10 17:14:36 -05003481 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
Chris Masond98456f2012-01-31 20:27:41 -05003482 /* Let go of our reservation. */
Filipe Mananaf27451f2017-10-25 11:55:28 +01003483 if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
Nikolay Borisov25ce28c2020-06-03 08:55:39 +03003484 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
Robbie Ko39ad3172019-03-26 11:56:11 +08003485 cur_offset, alloc_end - cur_offset);
Qu Wenruo364ecf32017-02-27 15:10:38 +08003486 extent_changeset_free(data_reserved);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003487 return ret;
3488}
3489
Nikolay Borisovcca5de92021-02-17 15:12:48 +02003490static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
Nikolay Borisovbc802302019-09-27 13:23:18 +03003491 int whence)
Josef Bacikb2675152011-07-18 13:21:36 -04003492{
Nikolay Borisovcca5de92021-02-17 15:12:48 +02003493 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Josef Bacik7f4ca372013-10-18 11:44:46 -04003494 struct extent_map *em = NULL;
Josef Bacikb2675152011-07-18 13:21:36 -04003495 struct extent_state *cached_state = NULL;
Nikolay Borisovcca5de92021-02-17 15:12:48 +02003496 loff_t i_size = inode->vfs_inode.i_size;
Liu Bo4d1a40c2014-09-16 17:49:30 +08003497 u64 lockstart;
3498 u64 lockend;
3499 u64 start;
3500 u64 len;
Josef Bacikb2675152011-07-18 13:21:36 -04003501 int ret = 0;
3502
Nikolay Borisovbc802302019-09-27 13:23:18 +03003503 if (i_size == 0 || offset >= i_size)
Josef Bacikb2675152011-07-18 13:21:36 -04003504 return -ENXIO;
3505
Liu Bo4d1a40c2014-09-16 17:49:30 +08003506 /*
Nikolay Borisovbc802302019-09-27 13:23:18 +03003507 * offset can be negative, in this case we start finding DATA/HOLE from
Liu Bo4d1a40c2014-09-16 17:49:30 +08003508 * the very start of the file.
3509 */
Nikolay Borisovbc802302019-09-27 13:23:18 +03003510 start = max_t(loff_t, 0, offset);
Liu Bo4d1a40c2014-09-16 17:49:30 +08003511
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003512 lockstart = round_down(start, fs_info->sectorsize);
Nikolay Borisovd79b7c22019-09-27 13:23:16 +03003513 lockend = round_up(i_size, fs_info->sectorsize);
Liu Bo4d1a40c2014-09-16 17:49:30 +08003514 if (lockend <= lockstart)
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003515 lockend = lockstart + fs_info->sectorsize;
Liu Bo4d1a40c2014-09-16 17:49:30 +08003516 lockend--;
3517 len = lockend - lockstart + 1;
3518
Nikolay Borisovcca5de92021-02-17 15:12:48 +02003519 lock_extent_bits(&inode->io_tree, lockstart, lockend, &cached_state);
Josef Bacikb2675152011-07-18 13:21:36 -04003520
Nikolay Borisovd79b7c22019-09-27 13:23:16 +03003521 while (start < i_size) {
Nikolay Borisovcca5de92021-02-17 15:12:48 +02003522 em = btrfs_get_extent_fiemap(inode, start, len);
Josef Bacikb2675152011-07-18 13:21:36 -04003523 if (IS_ERR(em)) {
Jeff Liu6af021d2012-02-09 14:25:50 +08003524 ret = PTR_ERR(em);
Josef Bacik7f4ca372013-10-18 11:44:46 -04003525 em = NULL;
Josef Bacikb2675152011-07-18 13:21:36 -04003526 break;
3527 }
3528
Josef Bacik7f4ca372013-10-18 11:44:46 -04003529 if (whence == SEEK_HOLE &&
3530 (em->block_start == EXTENT_MAP_HOLE ||
3531 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3532 break;
3533 else if (whence == SEEK_DATA &&
3534 (em->block_start != EXTENT_MAP_HOLE &&
3535 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3536 break;
Josef Bacikb2675152011-07-18 13:21:36 -04003537
3538 start = em->start + em->len;
Josef Bacikb2675152011-07-18 13:21:36 -04003539 free_extent_map(em);
Josef Bacik7f4ca372013-10-18 11:44:46 -04003540 em = NULL;
Josef Bacikb2675152011-07-18 13:21:36 -04003541 cond_resched();
3542 }
Josef Bacik7f4ca372013-10-18 11:44:46 -04003543 free_extent_map(em);
Nikolay Borisovcca5de92021-02-17 15:12:48 +02003544 unlock_extent_cached(&inode->io_tree, lockstart, lockend,
David Sterbae43bbe52017-12-12 21:43:52 +01003545 &cached_state);
Nikolay Borisovbc802302019-09-27 13:23:18 +03003546 if (ret) {
3547 offset = ret;
3548 } else {
3549 if (whence == SEEK_DATA && start >= i_size)
3550 offset = -ENXIO;
3551 else
3552 offset = min_t(loff_t, start, i_size);
3553 }
3554
3555 return offset;
Josef Bacikb2675152011-07-18 13:21:36 -04003556}
3557
Andrew Morton965c8e52012-12-17 15:59:39 -08003558static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
Josef Bacikb2675152011-07-18 13:21:36 -04003559{
3560 struct inode *inode = file->f_mapping->host;
Josef Bacikb2675152011-07-18 13:21:36 -04003561
Andrew Morton965c8e52012-12-17 15:59:39 -08003562 switch (whence) {
Nikolay Borisov2034f3b2019-09-27 13:23:17 +03003563 default:
3564 return generic_file_llseek(file, offset, whence);
Josef Bacikb2675152011-07-18 13:21:36 -04003565 case SEEK_DATA:
3566 case SEEK_HOLE:
Goldwyn Rodriguesa14b78a2020-09-24 11:39:16 -05003567 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
Nikolay Borisovcca5de92021-02-17 15:12:48 +02003568 offset = find_desired_extent(BTRFS_I(inode), offset, whence);
Goldwyn Rodriguesa14b78a2020-09-24 11:39:16 -05003569 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
Nikolay Borisovbc802302019-09-27 13:23:18 +03003570 break;
Josef Bacikb2675152011-07-18 13:21:36 -04003571 }
3572
Nikolay Borisovbc802302019-09-27 13:23:18 +03003573 if (offset < 0)
3574 return offset;
3575
Nikolay Borisov2034f3b2019-09-27 13:23:17 +03003576 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
Josef Bacikb2675152011-07-18 13:21:36 -04003577}
3578
Goldwyn Rodriguesedf064e2017-06-20 07:05:49 -05003579static int btrfs_file_open(struct inode *inode, struct file *filp)
3580{
Jens Axboe8730f122020-05-22 10:19:22 -06003581 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
Goldwyn Rodriguesedf064e2017-06-20 07:05:49 -05003582 return generic_file_open(inode, filp);
3583}
3584
Goldwyn Rodrigues4e4cabe2020-09-24 11:39:12 -05003585static int check_direct_read(struct btrfs_fs_info *fs_info,
3586 const struct iov_iter *iter, loff_t offset)
3587{
3588 int ret;
3589 int i, seg;
3590
3591 ret = check_direct_IO(fs_info, iter, offset);
3592 if (ret < 0)
3593 return ret;
3594
3595 if (!iter_is_iovec(iter))
3596 return 0;
3597
3598 for (seg = 0; seg < iter->nr_segs; seg++)
3599 for (i = seg + 1; i < iter->nr_segs; i++)
3600 if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
3601 return -EINVAL;
3602 return 0;
3603}
3604
3605static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
3606{
3607 struct inode *inode = file_inode(iocb->ki_filp);
3608 ssize_t ret;
3609
3610 if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
3611 return 0;
3612
Goldwyn Rodriguesa14b78a2020-09-24 11:39:16 -05003613 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
Christoph Hellwig2f632962021-01-23 10:06:09 -08003614 ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 0);
Goldwyn Rodriguesa14b78a2020-09-24 11:39:16 -05003615 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
Goldwyn Rodrigues4e4cabe2020-09-24 11:39:12 -05003616 return ret;
3617}
3618
Goldwyn Rodriguesf85781f2020-08-17 11:18:21 -05003619static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3620{
3621 ssize_t ret = 0;
3622
3623 if (iocb->ki_flags & IOCB_DIRECT) {
Goldwyn Rodrigues4e4cabe2020-09-24 11:39:12 -05003624 ret = btrfs_direct_read(iocb, to);
Johannes Thumshirn0425e7b2020-10-22 23:05:05 +09003625 if (ret < 0 || !iov_iter_count(to) ||
3626 iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
Goldwyn Rodriguesf85781f2020-08-17 11:18:21 -05003627 return ret;
3628 }
3629
Christoph Hellwig87fa0f32021-02-24 12:02:42 -08003630 return filemap_read(iocb, to, ret);
Goldwyn Rodriguesf85781f2020-08-17 11:18:21 -05003631}
3632
Alexey Dobriyan828c0952009-10-01 15:43:56 -07003633const struct file_operations btrfs_file_operations = {
Josef Bacikb2675152011-07-18 13:21:36 -04003634 .llseek = btrfs_file_llseek,
Goldwyn Rodriguesf85781f2020-08-17 11:18:21 -05003635 .read_iter = btrfs_file_read_iter,
Chris Masone9906a92007-12-14 12:56:58 -05003636 .splice_read = generic_file_splice_read,
Al Virob30ac0f2014-04-03 14:29:04 -04003637 .write_iter = btrfs_file_write_iter,
Christoph Hellwigd7776592020-07-09 18:22:06 +02003638 .splice_write = iter_file_splice_write,
Chris Mason9ebefb182007-06-15 13:50:00 -04003639 .mmap = btrfs_file_mmap,
Goldwyn Rodriguesedf064e2017-06-20 07:05:49 -05003640 .open = btrfs_file_open,
Mingminge1b81e62008-05-27 10:55:43 -04003641 .release = btrfs_release_file,
Chris Mason39279cc2007-06-12 06:35:45 -04003642 .fsync = btrfs_sync_file,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01003643 .fallocate = btrfs_fallocate,
Christoph Hellwig34287aa2007-09-14 10:22:47 -04003644 .unlocked_ioctl = btrfs_ioctl,
Chris Mason39279cc2007-06-12 06:35:45 -04003645#ifdef CONFIG_COMPAT
Luke Dashjr4c63c242015-10-29 08:22:21 +00003646 .compat_ioctl = btrfs_compat_ioctl,
Chris Mason39279cc2007-06-12 06:35:45 -04003647#endif
Darrick J. Wong2e5dfc92018-10-30 10:41:21 +11003648 .remap_file_range = btrfs_remap_file_range,
Chris Mason39279cc2007-06-12 06:35:45 -04003649};
Miao Xie9247f312012-11-26 09:24:43 +00003650
David Sterbae67c7182018-02-19 17:24:18 +01003651void __cold btrfs_auto_defrag_exit(void)
Miao Xie9247f312012-11-26 09:24:43 +00003652{
Kinglong Mee5598e902016-01-29 21:36:35 +08003653 kmem_cache_destroy(btrfs_inode_defrag_cachep);
Miao Xie9247f312012-11-26 09:24:43 +00003654}
3655
Liu Bof5c29bd2017-11-02 17:21:50 -06003656int __init btrfs_auto_defrag_init(void)
Miao Xie9247f312012-11-26 09:24:43 +00003657{
3658 btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
3659 sizeof(struct inode_defrag), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +03003660 SLAB_MEM_SPREAD,
Miao Xie9247f312012-11-26 09:24:43 +00003661 NULL);
3662 if (!btrfs_inode_defrag_cachep)
3663 return -ENOMEM;
3664
3665 return 0;
3666}
Filipe Manana728404d2014-10-10 09:43:11 +01003667
3668int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3669{
3670 int ret;
3671
3672 /*
3673 * So with compression we will find and lock a dirty page and clear the
3674 * first one as dirty, setup an async extent, and immediately return
3675 * with the entire range locked but with nobody actually marked with
3676 * writeback. So we can't just filemap_write_and_wait_range() and
3677 * expect it to work since it will just kick off a thread to do the
3678 * actual work. So we need to call filemap_fdatawrite_range _again_
3679 * since it will wait on the page lock, which won't be unlocked until
3680 * after the pages have been marked as writeback and so we're good to go
3681 * from there. We have to do this otherwise we'll miss the ordered
3682 * extents and that results in badness. Please Josef, do not think you
3683 * know better and pull this out at some point in the future, it is
3684 * right and you are wrong.
3685 */
3686 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3687 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3688 &BTRFS_I(inode)->runtime_flags))
3689 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3690
3691 return ret;
3692}