blob: 74c62e49c0c9989df832c51187df16b0c688fde8 [file] [log] [blame]
Filipe Manana6a177382020-02-28 13:04:17 +00001// SPDX-License-Identifier: GPL-2.0
2
Filipe Manana05a5a762020-02-28 13:04:19 +00003#include <linux/blkdev.h>
Filipe Manana6a177382020-02-28 13:04:17 +00004#include <linux/iversion.h>
Filipe Manana05a5a762020-02-28 13:04:19 +00005#include "compression.h"
Filipe Manana6a177382020-02-28 13:04:17 +00006#include "ctree.h"
Filipe Manana05a5a762020-02-28 13:04:19 +00007#include "delalloc-space.h"
Filipe Manana6a177382020-02-28 13:04:17 +00008#include "reflink.h"
9#include "transaction.h"
10
11#define BTRFS_MAX_DEDUPE_LEN SZ_16M
12
13static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
14 struct inode *inode,
15 u64 endoff,
16 const u64 destoff,
17 const u64 olen,
18 int no_time_update)
19{
20 struct btrfs_root *root = BTRFS_I(inode)->root;
21 int ret;
22
23 inode_inc_iversion(inode);
24 if (!no_time_update)
25 inode->i_mtime = inode->i_ctime = current_time(inode);
26 /*
27 * We round up to the block size at eof when determining which
28 * extents to clone above, but shouldn't round up the file size.
29 */
30 if (endoff > destoff + olen)
31 endoff = destoff + olen;
32 if (endoff > inode->i_size) {
33 i_size_write(inode, endoff);
Nikolay Borisov76aea532020-11-02 16:48:53 +020034 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
Filipe Manana6a177382020-02-28 13:04:17 +000035 }
36
Nikolay Borisov9a56fcd2020-11-02 16:48:59 +020037 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
Filipe Manana6a177382020-02-28 13:04:17 +000038 if (ret) {
39 btrfs_abort_transaction(trans, ret);
40 btrfs_end_transaction(trans);
41 goto out;
42 }
43 ret = btrfs_end_transaction(trans);
44out:
45 return ret;
46}
47
Nikolay Borisov998acfe2020-08-31 14:42:47 +030048static int copy_inline_to_page(struct btrfs_inode *inode,
Filipe Manana05a5a762020-02-28 13:04:19 +000049 const u64 file_offset,
50 char *inline_data,
51 const u64 size,
52 const u64 datal,
53 const u8 comp_type)
54{
Nikolay Borisov998acfe2020-08-31 14:42:47 +030055 const u64 block_size = btrfs_inode_sectorsize(inode);
Filipe Manana05a5a762020-02-28 13:04:19 +000056 const u64 range_end = file_offset + block_size - 1;
57 const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0);
58 char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0);
59 struct extent_changeset *data_reserved = NULL;
60 struct page *page = NULL;
Nikolay Borisov998acfe2020-08-31 14:42:47 +030061 struct address_space *mapping = inode->vfs_inode.i_mapping;
Filipe Manana05a5a762020-02-28 13:04:19 +000062 int ret;
63
64 ASSERT(IS_ALIGNED(file_offset, block_size));
65
66 /*
67 * We have flushed and locked the ranges of the source and destination
68 * inodes, we also have locked the inodes, so we are safe to do a
69 * reservation here. Also we must not do the reservation while holding
70 * a transaction open, otherwise we would deadlock.
71 */
Nikolay Borisov998acfe2020-08-31 14:42:47 +030072 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset,
73 block_size);
Filipe Manana05a5a762020-02-28 13:04:19 +000074 if (ret)
75 goto out;
76
Nikolay Borisov998acfe2020-08-31 14:42:47 +030077 page = find_or_create_page(mapping, file_offset >> PAGE_SHIFT,
78 btrfs_alloc_write_mask(mapping));
Filipe Manana05a5a762020-02-28 13:04:19 +000079 if (!page) {
80 ret = -ENOMEM;
81 goto out_unlock;
82 }
83
84 set_page_extent_mapped(page);
Nikolay Borisov998acfe2020-08-31 14:42:47 +030085 clear_extent_bit(&inode->io_tree, file_offset, range_end,
Filipe Manana05a5a762020-02-28 13:04:19 +000086 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
87 0, 0, NULL);
Nikolay Borisov998acfe2020-08-31 14:42:47 +030088 ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL);
Filipe Manana05a5a762020-02-28 13:04:19 +000089 if (ret)
90 goto out_unlock;
91
Filipe Manana3d45f222020-12-02 11:55:58 +000092 /*
93 * After dirtying the page our caller will need to start a transaction,
94 * and if we are low on metadata free space, that can cause flushing of
95 * delalloc for all inodes in order to get metadata space released.
96 * However we are holding the range locked for the whole duration of
97 * the clone/dedupe operation, so we may deadlock if that happens and no
98 * other task releases enough space. So mark this inode as not being
99 * possible to flush to avoid such deadlock. We will clear that flag
100 * when we finish cloning all extents, since a transaction is started
101 * after finding each extent to clone.
102 */
103 set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
104
Filipe Manana05a5a762020-02-28 13:04:19 +0000105 if (comp_type == BTRFS_COMPRESS_NONE) {
Ira Weiny3590ec52021-02-09 22:22:19 -0800106 memcpy_to_page(page, 0, data_start, datal);
Filipe Manana05a5a762020-02-28 13:04:19 +0000107 flush_dcache_page(page);
Filipe Manana05a5a762020-02-28 13:04:19 +0000108 } else {
109 ret = btrfs_decompress(comp_type, data_start, page, 0,
110 inline_size, datal);
111 if (ret)
112 goto out_unlock;
113 flush_dcache_page(page);
114 }
115
116 /*
117 * If our inline data is smaller then the block/page size, then the
118 * remaining of the block/page is equivalent to zeroes. We had something
119 * like the following done:
120 *
121 * $ xfs_io -f -c "pwrite -S 0xab 0 500" file
122 * $ sync # (or fsync)
123 * $ xfs_io -c "falloc 0 4K" file
124 * $ xfs_io -c "pwrite -S 0xcd 4K 4K"
125 *
126 * So what's in the range [500, 4095] corresponds to zeroes.
127 */
128 if (datal < block_size) {
129 char *map;
130
131 map = kmap(page);
132 memset(map + datal, 0, block_size - datal);
133 flush_dcache_page(page);
134 kunmap(page);
135 }
136
137 SetPageUptodate(page);
138 ClearPageChecked(page);
139 set_page_dirty(page);
140out_unlock:
141 if (page) {
142 unlock_page(page);
143 put_page(page);
144 }
145 if (ret)
Nikolay Borisov998acfe2020-08-31 14:42:47 +0300146 btrfs_delalloc_release_space(inode, data_reserved, file_offset,
147 block_size, true);
148 btrfs_delalloc_release_extents(inode, block_size);
Filipe Manana05a5a762020-02-28 13:04:19 +0000149out:
150 extent_changeset_free(data_reserved);
151
152 return ret;
153}
154
Filipe Manana6a177382020-02-28 13:04:17 +0000155/*
Filipe Manana05a5a762020-02-28 13:04:19 +0000156 * Deal with cloning of inline extents. We try to copy the inline extent from
157 * the source inode to destination inode when possible. When not possible we
158 * copy the inline extent's data into the respective page of the inode.
Filipe Manana6a177382020-02-28 13:04:17 +0000159 */
160static int clone_copy_inline_extent(struct inode *dst,
Filipe Manana6a177382020-02-28 13:04:17 +0000161 struct btrfs_path *path,
162 struct btrfs_key *new_key,
163 const u64 drop_start,
164 const u64 datal,
Filipe Manana6a177382020-02-28 13:04:17 +0000165 const u64 size,
Filipe Manana05a5a762020-02-28 13:04:19 +0000166 const u8 comp_type,
167 char *inline_data,
168 struct btrfs_trans_handle **trans_out)
Filipe Manana6a177382020-02-28 13:04:17 +0000169{
170 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
171 struct btrfs_root *root = BTRFS_I(dst)->root;
172 const u64 aligned_end = ALIGN(new_key->offset + datal,
173 fs_info->sectorsize);
Filipe Manana05a5a762020-02-28 13:04:19 +0000174 struct btrfs_trans_handle *trans = NULL;
Filipe Manana5893dfb2020-11-04 11:07:32 +0000175 struct btrfs_drop_extents_args drop_args = { 0 };
Filipe Manana6a177382020-02-28 13:04:17 +0000176 int ret;
177 struct btrfs_key key;
178
Filipe Manana05a5a762020-02-28 13:04:19 +0000179 if (new_key->offset > 0) {
Nikolay Borisov998acfe2020-08-31 14:42:47 +0300180 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
181 inline_data, size, datal, comp_type);
Filipe Manana05a5a762020-02-28 13:04:19 +0000182 goto out;
183 }
Filipe Manana6a177382020-02-28 13:04:17 +0000184
185 key.objectid = btrfs_ino(BTRFS_I(dst));
186 key.type = BTRFS_EXTENT_DATA_KEY;
187 key.offset = 0;
188 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
189 if (ret < 0) {
190 return ret;
191 } else if (ret > 0) {
192 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
193 ret = btrfs_next_leaf(root, path);
194 if (ret < 0)
195 return ret;
196 else if (ret > 0)
197 goto copy_inline_extent;
198 }
199 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
200 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
201 key.type == BTRFS_EXTENT_DATA_KEY) {
Filipe Manana05a5a762020-02-28 13:04:19 +0000202 /*
203 * There's an implicit hole at file offset 0, copy the
204 * inline extent's data to the page.
205 */
Filipe Manana6a177382020-02-28 13:04:17 +0000206 ASSERT(key.offset > 0);
Nikolay Borisov998acfe2020-08-31 14:42:47 +0300207 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
Filipe Manana05a5a762020-02-28 13:04:19 +0000208 inline_data, size, datal,
209 comp_type);
210 goto out;
Filipe Manana6a177382020-02-28 13:04:17 +0000211 }
212 } else if (i_size_read(dst) <= datal) {
213 struct btrfs_file_extent_item *ei;
Filipe Manana6a177382020-02-28 13:04:17 +0000214
Filipe Manana6a177382020-02-28 13:04:17 +0000215 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
216 struct btrfs_file_extent_item);
217 /*
Filipe Manana05a5a762020-02-28 13:04:19 +0000218 * If it's an inline extent replace it with the source inline
219 * extent, otherwise copy the source inline extent data into
220 * the respective page at the destination inode.
Filipe Manana6a177382020-02-28 13:04:17 +0000221 */
222 if (btrfs_file_extent_type(path->nodes[0], ei) ==
223 BTRFS_FILE_EXTENT_INLINE)
224 goto copy_inline_extent;
225
Nikolay Borisov998acfe2020-08-31 14:42:47 +0300226 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
227 inline_data, size, datal, comp_type);
Filipe Manana05a5a762020-02-28 13:04:19 +0000228 goto out;
Filipe Manana6a177382020-02-28 13:04:17 +0000229 }
230
231copy_inline_extent:
Filipe Manana05a5a762020-02-28 13:04:19 +0000232 ret = 0;
Filipe Manana6a177382020-02-28 13:04:17 +0000233 /*
234 * We have no extent items, or we have an extent at offset 0 which may
235 * or may not be inlined. All these cases are dealt the same way.
236 */
237 if (i_size_read(dst) > datal) {
238 /*
Filipe Manana05a5a762020-02-28 13:04:19 +0000239 * At the destination offset 0 we have either a hole, a regular
240 * extent or an inline extent larger then the one we want to
241 * clone. Deal with all these cases by copying the inline extent
242 * data into the respective page at the destination inode.
Filipe Manana6a177382020-02-28 13:04:17 +0000243 */
Nikolay Borisov998acfe2020-08-31 14:42:47 +0300244 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
245 inline_data, size, datal, comp_type);
Filipe Manana05a5a762020-02-28 13:04:19 +0000246 goto out;
Filipe Manana6a177382020-02-28 13:04:17 +0000247 }
248
249 btrfs_release_path(path);
Filipe Manana05a5a762020-02-28 13:04:19 +0000250 /*
251 * If we end up here it means were copy the inline extent into a leaf
252 * of the destination inode. We know we will drop or adjust at most one
253 * extent item in the destination root.
254 *
255 * 1 unit - adjusting old extent (we may have to split it)
256 * 1 unit - add new extent
257 * 1 unit - inode update
258 */
259 trans = btrfs_start_transaction(root, 3);
260 if (IS_ERR(trans)) {
261 ret = PTR_ERR(trans);
262 trans = NULL;
263 goto out;
264 }
Filipe Manana5893dfb2020-11-04 11:07:32 +0000265 drop_args.path = path;
266 drop_args.start = drop_start;
267 drop_args.end = aligned_end;
268 drop_args.drop_cache = true;
269 ret = btrfs_drop_extents(trans, root, BTRFS_I(dst), &drop_args);
Filipe Manana6a177382020-02-28 13:04:17 +0000270 if (ret)
Filipe Manana05a5a762020-02-28 13:04:19 +0000271 goto out;
Filipe Manana6a177382020-02-28 13:04:17 +0000272 ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
273 if (ret)
Filipe Manana05a5a762020-02-28 13:04:19 +0000274 goto out;
Filipe Manana6a177382020-02-28 13:04:17 +0000275
Filipe Manana6a177382020-02-28 13:04:17 +0000276 write_extent_buffer(path->nodes[0], inline_data,
277 btrfs_item_ptr_offset(path->nodes[0],
278 path->slots[0]),
279 size);
Filipe Manana2766ff62020-11-04 11:07:34 +0000280 btrfs_update_inode_bytes(BTRFS_I(dst), datal, drop_args.bytes_found);
Filipe Manana6a177382020-02-28 13:04:17 +0000281 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags);
Filipe Manana4fdb6882020-04-04 21:20:22 +0100282 ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
Filipe Manana05a5a762020-02-28 13:04:19 +0000283out:
284 if (!ret && !trans) {
285 /*
286 * No transaction here means we copied the inline extent into a
287 * page of the destination inode.
288 *
289 * 1 unit to update inode item
290 */
291 trans = btrfs_start_transaction(root, 1);
292 if (IS_ERR(trans)) {
293 ret = PTR_ERR(trans);
294 trans = NULL;
295 }
296 }
297 if (ret && trans) {
298 btrfs_abort_transaction(trans, ret);
299 btrfs_end_transaction(trans);
300 }
301 if (!ret)
302 *trans_out = trans;
Filipe Manana6a177382020-02-28 13:04:17 +0000303
Filipe Manana05a5a762020-02-28 13:04:19 +0000304 return ret;
Filipe Manana6a177382020-02-28 13:04:17 +0000305}
306
307/**
308 * btrfs_clone() - clone a range from inode file to another
309 *
310 * @src: Inode to clone from
311 * @inode: Inode to clone to
312 * @off: Offset within source to start clone from
313 * @olen: Original length, passed by user, of range to clone
314 * @olen_aligned: Block-aligned value of olen
315 * @destoff: Offset within @inode to start clone
316 * @no_time_update: Whether to update mtime/ctime on the target inode
317 */
318static int btrfs_clone(struct inode *src, struct inode *inode,
319 const u64 off, const u64 olen, const u64 olen_aligned,
320 const u64 destoff, int no_time_update)
321{
322 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Filipe Manana6a177382020-02-28 13:04:17 +0000323 struct btrfs_path *path = NULL;
324 struct extent_buffer *leaf;
325 struct btrfs_trans_handle *trans;
326 char *buf = NULL;
327 struct btrfs_key key;
328 u32 nritems;
329 int slot;
330 int ret;
331 const u64 len = olen_aligned;
332 u64 last_dest_end = destoff;
333
334 ret = -ENOMEM;
335 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
336 if (!buf)
337 return ret;
338
339 path = btrfs_alloc_path();
340 if (!path) {
341 kvfree(buf);
342 return ret;
343 }
344
345 path->reada = READA_FORWARD;
346 /* Clone data */
347 key.objectid = btrfs_ino(BTRFS_I(src));
348 key.type = BTRFS_EXTENT_DATA_KEY;
349 key.offset = off;
350
351 while (1) {
352 u64 next_key_min_offset = key.offset + 1;
353 struct btrfs_file_extent_item *extent;
Filipe Manana3ebac172020-07-15 12:30:43 +0100354 u64 extent_gen;
Filipe Manana6a177382020-02-28 13:04:17 +0000355 int type;
356 u32 size;
357 struct btrfs_key new_key;
358 u64 disko = 0, diskl = 0;
359 u64 datao = 0, datal = 0;
Filipe Manana05a5a762020-02-28 13:04:19 +0000360 u8 comp;
Filipe Manana6a177382020-02-28 13:04:17 +0000361 u64 drop_start;
362
363 /* Note the key will change type as we walk through the tree */
Filipe Manana6a177382020-02-28 13:04:17 +0000364 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
365 0, 0);
366 if (ret < 0)
367 goto out;
368 /*
369 * First search, if no extent item that starts at offset off was
370 * found but the previous item is an extent item, it's possible
371 * it might overlap our target range, therefore process it.
372 */
373 if (key.offset == off && ret > 0 && path->slots[0] > 0) {
374 btrfs_item_key_to_cpu(path->nodes[0], &key,
375 path->slots[0] - 1);
376 if (key.type == BTRFS_EXTENT_DATA_KEY)
377 path->slots[0]--;
378 }
379
380 nritems = btrfs_header_nritems(path->nodes[0]);
381process_slot:
382 if (path->slots[0] >= nritems) {
383 ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
384 if (ret < 0)
385 goto out;
386 if (ret > 0)
387 break;
388 nritems = btrfs_header_nritems(path->nodes[0]);
389 }
390 leaf = path->nodes[0];
391 slot = path->slots[0];
392
393 btrfs_item_key_to_cpu(leaf, &key, slot);
394 if (key.type > BTRFS_EXTENT_DATA_KEY ||
395 key.objectid != btrfs_ino(BTRFS_I(src)))
396 break;
397
398 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY);
399
400 extent = btrfs_item_ptr(leaf, slot,
401 struct btrfs_file_extent_item);
Filipe Manana3ebac172020-07-15 12:30:43 +0100402 extent_gen = btrfs_file_extent_generation(leaf, extent);
Filipe Manana05a5a762020-02-28 13:04:19 +0000403 comp = btrfs_file_extent_compression(leaf, extent);
Filipe Manana6a177382020-02-28 13:04:17 +0000404 type = btrfs_file_extent_type(leaf, extent);
405 if (type == BTRFS_FILE_EXTENT_REG ||
406 type == BTRFS_FILE_EXTENT_PREALLOC) {
407 disko = btrfs_file_extent_disk_bytenr(leaf, extent);
408 diskl = btrfs_file_extent_disk_num_bytes(leaf, extent);
409 datao = btrfs_file_extent_offset(leaf, extent);
410 datal = btrfs_file_extent_num_bytes(leaf, extent);
411 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
412 /* Take upper bound, may be compressed */
413 datal = btrfs_file_extent_ram_bytes(leaf, extent);
414 }
415
416 /*
417 * The first search might have left us at an extent item that
418 * ends before our target range's start, can happen if we have
419 * holes and NO_HOLES feature enabled.
420 */
421 if (key.offset + datal <= off) {
422 path->slots[0]++;
423 goto process_slot;
424 } else if (key.offset >= off + len) {
425 break;
426 }
427 next_key_min_offset = key.offset + datal;
428 size = btrfs_item_size_nr(leaf, slot);
429 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot),
430 size);
431
432 btrfs_release_path(path);
Filipe Manana6a177382020-02-28 13:04:17 +0000433
434 memcpy(&new_key, &key, sizeof(new_key));
435 new_key.objectid = btrfs_ino(BTRFS_I(inode));
436 if (off <= key.offset)
437 new_key.offset = key.offset + destoff - off;
438 else
439 new_key.offset = destoff;
440
441 /*
442 * Deal with a hole that doesn't have an extent item that
443 * represents it (NO_HOLES feature enabled).
444 * This hole is either in the middle of the cloning range or at
445 * the beginning (fully overlaps it or partially overlaps it).
446 */
447 if (new_key.offset != last_dest_end)
448 drop_start = last_dest_end;
449 else
450 drop_start = new_key.offset;
451
452 if (type == BTRFS_FILE_EXTENT_REG ||
453 type == BTRFS_FILE_EXTENT_PREALLOC) {
Filipe Mananabf385642020-09-08 11:27:22 +0100454 struct btrfs_replace_extent_info clone_info;
Filipe Manana6a177382020-02-28 13:04:17 +0000455
456 /*
457 * a | --- range to clone ---| b
458 * | ------------- extent ------------- |
459 */
460
461 /* Subtract range b */
462 if (key.offset + datal > off + len)
463 datal = off + len - key.offset;
464
465 /* Subtract range a */
466 if (off > key.offset) {
467 datao += off - key.offset;
468 datal -= off - key.offset;
469 }
470
471 clone_info.disk_offset = disko;
472 clone_info.disk_len = diskl;
473 clone_info.data_offset = datao;
474 clone_info.data_len = datal;
475 clone_info.file_offset = new_key.offset;
476 clone_info.extent_buf = buf;
Filipe Manana8fccebf2020-09-08 11:27:20 +0100477 clone_info.is_new_extent = false;
Filipe Manana306bfec2020-09-08 11:27:23 +0100478 ret = btrfs_replace_file_extents(inode, path, drop_start,
Filipe Manana6a177382020-02-28 13:04:17 +0000479 new_key.offset + datal - 1, &clone_info,
480 &trans);
481 if (ret)
482 goto out;
483 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
Filipe Mananaa61e1e02020-02-28 13:04:18 +0000484 /*
485 * Inline extents always have to start at file offset 0
486 * and can never be bigger then the sector size. We can
487 * never clone only parts of an inline extent, since all
488 * reflink operations must start at a sector size aligned
489 * offset, and the length must be aligned too or end at
490 * the i_size (which implies the whole inlined data).
491 */
492 ASSERT(key.offset == 0);
493 ASSERT(datal <= fs_info->sectorsize);
494 if (key.offset != 0 || datal > fs_info->sectorsize)
495 return -EUCLEAN;
Filipe Manana6a177382020-02-28 13:04:17 +0000496
Filipe Manana05a5a762020-02-28 13:04:19 +0000497 ret = clone_copy_inline_extent(inode, path, &new_key,
498 drop_start, datal, size,
499 comp, buf, &trans);
500 if (ret)
Filipe Manana6a177382020-02-28 13:04:17 +0000501 goto out;
Filipe Manana6a177382020-02-28 13:04:17 +0000502 }
503
504 btrfs_release_path(path);
505
Filipe Manana3ebac172020-07-15 12:30:43 +0100506 /*
507 * If this is a new extent update the last_reflink_trans of both
508 * inodes. This is used by fsync to make sure it does not log
509 * multiple checksum items with overlapping ranges. For older
510 * extents we don't need to do it since inode logging skips the
511 * checksums for older extents. Also ignore holes and inline
512 * extents because they don't have checksums in the csum tree.
513 */
514 if (extent_gen == trans->transid && disko > 0) {
515 BTRFS_I(src)->last_reflink_trans = trans->transid;
516 BTRFS_I(inode)->last_reflink_trans = trans->transid;
517 }
518
Filipe Manana6a177382020-02-28 13:04:17 +0000519 last_dest_end = ALIGN(new_key.offset + datal,
520 fs_info->sectorsize);
521 ret = clone_finish_inode_update(trans, inode, last_dest_end,
522 destoff, olen, no_time_update);
523 if (ret)
524 goto out;
525 if (new_key.offset + datal >= destoff + len)
526 break;
527
528 btrfs_release_path(path);
529 key.offset = next_key_min_offset;
530
531 if (fatal_signal_pending(current)) {
532 ret = -EINTR;
533 goto out;
534 }
Johannes Thumshirn6b613cc2020-09-22 17:27:29 +0900535
536 cond_resched();
Filipe Manana6a177382020-02-28 13:04:17 +0000537 }
538 ret = 0;
539
540 if (last_dest_end < destoff + len) {
541 /*
542 * We have an implicit hole that fully or partially overlaps our
543 * cloning range at its end. This means that we either have the
544 * NO_HOLES feature enabled or the implicit hole happened due to
545 * mixing buffered and direct IO writes against this file.
546 */
547 btrfs_release_path(path);
Filipe Manana6a177382020-02-28 13:04:17 +0000548
Filipe Manana306bfec2020-09-08 11:27:23 +0100549 ret = btrfs_replace_file_extents(inode, path, last_dest_end,
Filipe Manana6a177382020-02-28 13:04:17 +0000550 destoff + len - 1, NULL, &trans);
551 if (ret)
552 goto out;
553
554 ret = clone_finish_inode_update(trans, inode, destoff + len,
555 destoff, olen, no_time_update);
556 }
557
558out:
559 btrfs_free_path(path);
560 kvfree(buf);
Filipe Manana3d45f222020-12-02 11:55:58 +0000561 clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags);
562
Filipe Manana6a177382020-02-28 13:04:17 +0000563 return ret;
564}
565
566static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
567 struct inode *inode2, u64 loff2, u64 len)
568{
569 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
570 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
571}
572
573static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
574 struct inode *inode2, u64 loff2, u64 len)
575{
576 if (inode1 < inode2) {
577 swap(inode1, inode2);
578 swap(loff1, loff2);
579 } else if (inode1 == inode2 && loff2 < loff1) {
580 swap(loff1, loff2);
581 }
582 lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
583 lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
584}
585
586static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
587 struct inode *dst, u64 dst_loff)
588{
589 const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
590 int ret;
591
592 /*
593 * Lock destination range to serialize with concurrent readpages() and
594 * source range to serialize with relocation.
595 */
596 btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
597 ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1);
598 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
599
600 return ret;
601}
602
603static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
604 struct inode *dst, u64 dst_loff)
605{
606 int ret;
607 u64 i, tail_len, chunk_count;
608 struct btrfs_root *root_dst = BTRFS_I(dst)->root;
609
610 spin_lock(&root_dst->root_item_lock);
611 if (root_dst->send_in_progress) {
612 btrfs_warn_rl(root_dst->fs_info,
613"cannot deduplicate to root %llu while send operations are using it (%d in progress)",
614 root_dst->root_key.objectid,
615 root_dst->send_in_progress);
616 spin_unlock(&root_dst->root_item_lock);
617 return -EAGAIN;
618 }
619 root_dst->dedupe_in_progress++;
620 spin_unlock(&root_dst->root_item_lock);
621
622 tail_len = olen % BTRFS_MAX_DEDUPE_LEN;
623 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
624
625 for (i = 0; i < chunk_count; i++) {
626 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
627 dst, dst_loff);
628 if (ret)
629 goto out;
630
631 loff += BTRFS_MAX_DEDUPE_LEN;
632 dst_loff += BTRFS_MAX_DEDUPE_LEN;
633 }
634
635 if (tail_len > 0)
636 ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff);
637out:
638 spin_lock(&root_dst->root_item_lock);
639 root_dst->dedupe_in_progress--;
640 spin_unlock(&root_dst->root_item_lock);
641
642 return ret;
643}
644
645static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
646 u64 off, u64 olen, u64 destoff)
647{
648 struct inode *inode = file_inode(file);
649 struct inode *src = file_inode(file_src);
650 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
651 int ret;
Filipe Manana05a5a762020-02-28 13:04:19 +0000652 int wb_ret;
Filipe Manana6a177382020-02-28 13:04:17 +0000653 u64 len = olen;
654 u64 bs = fs_info->sb->s_blocksize;
655
656 /*
Filipe Manana6a177382020-02-28 13:04:17 +0000657 * VFS's generic_remap_file_range_prep() protects us from cloning the
658 * eof block into the middle of a file, which would result in corruption
659 * if the file size is not blocksize aligned. So we don't need to check
660 * for that case here.
661 */
662 if (off + len == src->i_size)
663 len = ALIGN(src->i_size, bs) - off;
664
665 if (destoff > inode->i_size) {
666 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs);
667
Nikolay Borisovb06359a2020-11-02 16:49:04 +0200668 ret = btrfs_cont_expand(BTRFS_I(inode), inode->i_size, destoff);
Filipe Manana6a177382020-02-28 13:04:17 +0000669 if (ret)
670 return ret;
671 /*
672 * We may have truncated the last block if the inode's size is
673 * not sector size aligned, so we need to wait for writeback to
674 * complete before proceeding further, otherwise we can race
675 * with cloning and attempt to increment a reference to an
676 * extent that no longer exists (writeback completed right after
677 * we found the previous extent covering eof and before we
678 * attempted to increment its reference count).
679 */
680 ret = btrfs_wait_ordered_range(inode, wb_start,
681 destoff - wb_start);
682 if (ret)
683 return ret;
684 }
685
686 /*
687 * Lock destination range to serialize with concurrent readpages() and
688 * source range to serialize with relocation.
689 */
690 btrfs_double_extent_lock(src, off, inode, destoff, len);
691 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
692 btrfs_double_extent_unlock(src, off, inode, destoff, len);
Filipe Manana05a5a762020-02-28 13:04:19 +0000693
694 /*
695 * We may have copied an inline extent into a page of the destination
696 * range, so wait for writeback to complete before truncating pages
697 * from the page cache. This is a rare case.
698 */
699 wb_ret = btrfs_wait_ordered_range(inode, destoff, len);
700 ret = ret ? ret : wb_ret;
Filipe Manana6a177382020-02-28 13:04:17 +0000701 /*
702 * Truncate page cache pages so that future reads will see the cloned
703 * data immediately and not the previous data.
704 */
705 truncate_inode_pages_range(&inode->i_data,
706 round_down(destoff, PAGE_SIZE),
707 round_up(destoff + len, PAGE_SIZE) - 1);
708
709 return ret;
710}
711
712static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
713 struct file *file_out, loff_t pos_out,
714 loff_t *len, unsigned int remap_flags)
715{
716 struct inode *inode_in = file_inode(file_in);
717 struct inode *inode_out = file_inode(file_out);
718 u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize;
719 bool same_inode = inode_out == inode_in;
720 u64 wb_len;
721 int ret;
722
723 if (!(remap_flags & REMAP_FILE_DEDUP)) {
724 struct btrfs_root *root_out = BTRFS_I(inode_out)->root;
725
726 if (btrfs_root_readonly(root_out))
727 return -EROFS;
728
729 if (file_in->f_path.mnt != file_out->f_path.mnt ||
730 inode_in->i_sb != inode_out->i_sb)
731 return -EXDEV;
732 }
733
734 /* Don't make the dst file partly checksummed */
735 if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) !=
736 (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) {
737 return -EINVAL;
738 }
739
740 /*
741 * Now that the inodes are locked, we need to start writeback ourselves
742 * and can not rely on the writeback from the VFS's generic helper
743 * generic_remap_file_range_prep() because:
744 *
745 * 1) For compression we must call filemap_fdatawrite_range() range
746 * twice (btrfs_fdatawrite_range() does it for us), and the generic
747 * helper only calls it once;
748 *
749 * 2) filemap_fdatawrite_range(), called by the generic helper only
750 * waits for the writeback to complete, i.e. for IO to be done, and
751 * not for the ordered extents to complete. We need to wait for them
752 * to complete so that new file extent items are in the fs tree.
753 */
754 if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP))
755 wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs);
756 else
757 wb_len = ALIGN(*len, bs);
758
759 /*
760 * Since we don't lock ranges, wait for ongoing lockless dio writes (as
761 * any in progress could create its ordered extents after we wait for
762 * existing ordered extents below).
763 */
764 inode_dio_wait(inode_in);
765 if (!same_inode)
766 inode_dio_wait(inode_out);
767
768 /*
769 * Workaround to make sure NOCOW buffered write reach disk as NOCOW.
770 *
771 * Btrfs' back references do not have a block level granularity, they
772 * work at the whole extent level.
773 * NOCOW buffered write without data space reserved may not be able
774 * to fall back to CoW due to lack of data space, thus could cause
775 * data loss.
776 *
777 * Here we take a shortcut by flushing the whole inode, so that all
778 * nocow write should reach disk as nocow before we increase the
779 * reference of the extent. We could do better by only flushing NOCOW
780 * data, but that needs extra accounting.
781 *
782 * Also we don't need to check ASYNC_EXTENT, as async extent will be
783 * CoWed anyway, not affecting nocow part.
784 */
785 ret = filemap_flush(inode_in->i_mapping);
786 if (ret < 0)
787 return ret;
788
789 ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs),
790 wb_len);
791 if (ret < 0)
792 return ret;
793 ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs),
794 wb_len);
795 if (ret < 0)
796 return ret;
797
798 return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
799 len, remap_flags);
800}
801
802loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
803 struct file *dst_file, loff_t destoff, loff_t len,
804 unsigned int remap_flags)
805{
806 struct inode *src_inode = file_inode(src_file);
807 struct inode *dst_inode = file_inode(dst_file);
808 bool same_inode = dst_inode == src_inode;
809 int ret;
810
811 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
812 return -EINVAL;
813
814 if (same_inode)
815 inode_lock(src_inode);
816 else
817 lock_two_nondirectories(src_inode, dst_inode);
818
819 ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff,
820 &len, remap_flags);
821 if (ret < 0 || len == 0)
822 goto out_unlock;
823
824 if (remap_flags & REMAP_FILE_DEDUP)
825 ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff);
826 else
827 ret = btrfs_clone_files(dst_file, src_file, off, len, destoff);
828
829out_unlock:
830 if (same_inode)
831 inode_unlock(src_inode);
832 else
833 unlock_two_nondirectories(src_inode, dst_inode);
834
835 return ret < 0 ? ret : len;
836}