Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 3 | #include <linux/blkdev.h> |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 4 | #include <linux/iversion.h> |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 5 | #include "compression.h" |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 6 | #include "ctree.h" |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 7 | #include "delalloc-space.h" |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 8 | #include "reflink.h" |
| 9 | #include "transaction.h" |
| 10 | |
| 11 | #define BTRFS_MAX_DEDUPE_LEN SZ_16M |
| 12 | |
| 13 | static int clone_finish_inode_update(struct btrfs_trans_handle *trans, |
| 14 | struct inode *inode, |
| 15 | u64 endoff, |
| 16 | const u64 destoff, |
| 17 | const u64 olen, |
| 18 | int no_time_update) |
| 19 | { |
| 20 | struct btrfs_root *root = BTRFS_I(inode)->root; |
| 21 | int ret; |
| 22 | |
| 23 | inode_inc_iversion(inode); |
| 24 | if (!no_time_update) |
| 25 | inode->i_mtime = inode->i_ctime = current_time(inode); |
| 26 | /* |
| 27 | * We round up to the block size at eof when determining which |
| 28 | * extents to clone above, but shouldn't round up the file size. |
| 29 | */ |
| 30 | if (endoff > destoff + olen) |
| 31 | endoff = destoff + olen; |
| 32 | if (endoff > inode->i_size) { |
| 33 | i_size_write(inode, endoff); |
| 34 | btrfs_inode_safe_disk_i_size_write(inode, 0); |
| 35 | } |
| 36 | |
| 37 | ret = btrfs_update_inode(trans, root, inode); |
| 38 | if (ret) { |
| 39 | btrfs_abort_transaction(trans, ret); |
| 40 | btrfs_end_transaction(trans); |
| 41 | goto out; |
| 42 | } |
| 43 | ret = btrfs_end_transaction(trans); |
| 44 | out: |
| 45 | return ret; |
| 46 | } |
| 47 | |
Nikolay Borisov | 998acfe | 2020-08-31 14:42:47 +0300 | [diff] [blame] | 48 | static int copy_inline_to_page(struct btrfs_inode *inode, |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 49 | const u64 file_offset, |
| 50 | char *inline_data, |
| 51 | const u64 size, |
| 52 | const u64 datal, |
| 53 | const u8 comp_type) |
| 54 | { |
Nikolay Borisov | 998acfe | 2020-08-31 14:42:47 +0300 | [diff] [blame] | 55 | const u64 block_size = btrfs_inode_sectorsize(inode); |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 56 | const u64 range_end = file_offset + block_size - 1; |
| 57 | const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0); |
| 58 | char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0); |
| 59 | struct extent_changeset *data_reserved = NULL; |
| 60 | struct page *page = NULL; |
Nikolay Borisov | 998acfe | 2020-08-31 14:42:47 +0300 | [diff] [blame] | 61 | struct address_space *mapping = inode->vfs_inode.i_mapping; |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 62 | int ret; |
| 63 | |
| 64 | ASSERT(IS_ALIGNED(file_offset, block_size)); |
| 65 | |
| 66 | /* |
| 67 | * We have flushed and locked the ranges of the source and destination |
| 68 | * inodes, we also have locked the inodes, so we are safe to do a |
| 69 | * reservation here. Also we must not do the reservation while holding |
| 70 | * a transaction open, otherwise we would deadlock. |
| 71 | */ |
Nikolay Borisov | 998acfe | 2020-08-31 14:42:47 +0300 | [diff] [blame] | 72 | ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset, |
| 73 | block_size); |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 74 | if (ret) |
| 75 | goto out; |
| 76 | |
Nikolay Borisov | 998acfe | 2020-08-31 14:42:47 +0300 | [diff] [blame] | 77 | page = find_or_create_page(mapping, file_offset >> PAGE_SHIFT, |
| 78 | btrfs_alloc_write_mask(mapping)); |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 79 | if (!page) { |
| 80 | ret = -ENOMEM; |
| 81 | goto out_unlock; |
| 82 | } |
| 83 | |
| 84 | set_page_extent_mapped(page); |
Nikolay Borisov | 998acfe | 2020-08-31 14:42:47 +0300 | [diff] [blame] | 85 | clear_extent_bit(&inode->io_tree, file_offset, range_end, |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 86 | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, |
| 87 | 0, 0, NULL); |
Nikolay Borisov | 998acfe | 2020-08-31 14:42:47 +0300 | [diff] [blame] | 88 | ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL); |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 89 | if (ret) |
| 90 | goto out_unlock; |
| 91 | |
Filipe Manana | 17243f7 | 2020-12-02 11:55:58 +0000 | [diff] [blame] | 92 | /* |
| 93 | * After dirtying the page our caller will need to start a transaction, |
| 94 | * and if we are low on metadata free space, that can cause flushing of |
| 95 | * delalloc for all inodes in order to get metadata space released. |
| 96 | * However we are holding the range locked for the whole duration of |
| 97 | * the clone/dedupe operation, so we may deadlock if that happens and no |
| 98 | * other task releases enough space. So mark this inode as not being |
| 99 | * possible to flush to avoid such deadlock. We will clear that flag |
| 100 | * when we finish cloning all extents, since a transaction is started |
| 101 | * after finding each extent to clone. |
| 102 | */ |
| 103 | set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags); |
| 104 | |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 105 | if (comp_type == BTRFS_COMPRESS_NONE) { |
| 106 | char *map; |
| 107 | |
| 108 | map = kmap(page); |
| 109 | memcpy(map, data_start, datal); |
| 110 | flush_dcache_page(page); |
| 111 | kunmap(page); |
| 112 | } else { |
| 113 | ret = btrfs_decompress(comp_type, data_start, page, 0, |
| 114 | inline_size, datal); |
| 115 | if (ret) |
| 116 | goto out_unlock; |
| 117 | flush_dcache_page(page); |
| 118 | } |
| 119 | |
| 120 | /* |
| 121 | * If our inline data is smaller then the block/page size, then the |
| 122 | * remaining of the block/page is equivalent to zeroes. We had something |
| 123 | * like the following done: |
| 124 | * |
| 125 | * $ xfs_io -f -c "pwrite -S 0xab 0 500" file |
| 126 | * $ sync # (or fsync) |
| 127 | * $ xfs_io -c "falloc 0 4K" file |
| 128 | * $ xfs_io -c "pwrite -S 0xcd 4K 4K" |
| 129 | * |
| 130 | * So what's in the range [500, 4095] corresponds to zeroes. |
| 131 | */ |
| 132 | if (datal < block_size) { |
| 133 | char *map; |
| 134 | |
| 135 | map = kmap(page); |
| 136 | memset(map + datal, 0, block_size - datal); |
| 137 | flush_dcache_page(page); |
| 138 | kunmap(page); |
| 139 | } |
| 140 | |
| 141 | SetPageUptodate(page); |
| 142 | ClearPageChecked(page); |
| 143 | set_page_dirty(page); |
| 144 | out_unlock: |
| 145 | if (page) { |
| 146 | unlock_page(page); |
| 147 | put_page(page); |
| 148 | } |
| 149 | if (ret) |
Nikolay Borisov | 998acfe | 2020-08-31 14:42:47 +0300 | [diff] [blame] | 150 | btrfs_delalloc_release_space(inode, data_reserved, file_offset, |
| 151 | block_size, true); |
| 152 | btrfs_delalloc_release_extents(inode, block_size); |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 153 | out: |
| 154 | extent_changeset_free(data_reserved); |
| 155 | |
| 156 | return ret; |
| 157 | } |
| 158 | |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 159 | /* |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 160 | * Deal with cloning of inline extents. We try to copy the inline extent from |
| 161 | * the source inode to destination inode when possible. When not possible we |
| 162 | * copy the inline extent's data into the respective page of the inode. |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 163 | */ |
| 164 | static int clone_copy_inline_extent(struct inode *dst, |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 165 | struct btrfs_path *path, |
| 166 | struct btrfs_key *new_key, |
| 167 | const u64 drop_start, |
| 168 | const u64 datal, |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 169 | const u64 size, |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 170 | const u8 comp_type, |
| 171 | char *inline_data, |
| 172 | struct btrfs_trans_handle **trans_out) |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 173 | { |
| 174 | struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb); |
| 175 | struct btrfs_root *root = BTRFS_I(dst)->root; |
| 176 | const u64 aligned_end = ALIGN(new_key->offset + datal, |
| 177 | fs_info->sectorsize); |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 178 | struct btrfs_trans_handle *trans = NULL; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 179 | int ret; |
| 180 | struct btrfs_key key; |
| 181 | |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 182 | if (new_key->offset > 0) { |
Nikolay Borisov | 998acfe | 2020-08-31 14:42:47 +0300 | [diff] [blame] | 183 | ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, |
| 184 | inline_data, size, datal, comp_type); |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 185 | goto out; |
| 186 | } |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 187 | |
| 188 | key.objectid = btrfs_ino(BTRFS_I(dst)); |
| 189 | key.type = BTRFS_EXTENT_DATA_KEY; |
| 190 | key.offset = 0; |
| 191 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| 192 | if (ret < 0) { |
| 193 | return ret; |
| 194 | } else if (ret > 0) { |
| 195 | if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { |
| 196 | ret = btrfs_next_leaf(root, path); |
| 197 | if (ret < 0) |
| 198 | return ret; |
| 199 | else if (ret > 0) |
| 200 | goto copy_inline_extent; |
| 201 | } |
| 202 | btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); |
| 203 | if (key.objectid == btrfs_ino(BTRFS_I(dst)) && |
| 204 | key.type == BTRFS_EXTENT_DATA_KEY) { |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 205 | /* |
| 206 | * There's an implicit hole at file offset 0, copy the |
| 207 | * inline extent's data to the page. |
| 208 | */ |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 209 | ASSERT(key.offset > 0); |
Filipe Manana | baa6763 | 2021-05-25 11:05:28 +0100 | [diff] [blame] | 210 | goto copy_to_page; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 211 | } |
| 212 | } else if (i_size_read(dst) <= datal) { |
| 213 | struct btrfs_file_extent_item *ei; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 214 | |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 215 | ei = btrfs_item_ptr(path->nodes[0], path->slots[0], |
| 216 | struct btrfs_file_extent_item); |
| 217 | /* |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 218 | * If it's an inline extent replace it with the source inline |
| 219 | * extent, otherwise copy the source inline extent data into |
| 220 | * the respective page at the destination inode. |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 221 | */ |
| 222 | if (btrfs_file_extent_type(path->nodes[0], ei) == |
| 223 | BTRFS_FILE_EXTENT_INLINE) |
| 224 | goto copy_inline_extent; |
| 225 | |
Filipe Manana | baa6763 | 2021-05-25 11:05:28 +0100 | [diff] [blame] | 226 | goto copy_to_page; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 227 | } |
| 228 | |
| 229 | copy_inline_extent: |
| 230 | /* |
| 231 | * We have no extent items, or we have an extent at offset 0 which may |
| 232 | * or may not be inlined. All these cases are dealt the same way. |
| 233 | */ |
| 234 | if (i_size_read(dst) > datal) { |
| 235 | /* |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 236 | * At the destination offset 0 we have either a hole, a regular |
| 237 | * extent or an inline extent larger then the one we want to |
| 238 | * clone. Deal with all these cases by copying the inline extent |
| 239 | * data into the respective page at the destination inode. |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 240 | */ |
Filipe Manana | baa6763 | 2021-05-25 11:05:28 +0100 | [diff] [blame] | 241 | goto copy_to_page; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 242 | } |
| 243 | |
Filipe Manana | baa6763 | 2021-05-25 11:05:28 +0100 | [diff] [blame] | 244 | /* |
| 245 | * Release path before starting a new transaction so we don't hold locks |
| 246 | * that would confuse lockdep. |
| 247 | */ |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 248 | btrfs_release_path(path); |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 249 | /* |
| 250 | * If we end up here it means were copy the inline extent into a leaf |
| 251 | * of the destination inode. We know we will drop or adjust at most one |
| 252 | * extent item in the destination root. |
| 253 | * |
| 254 | * 1 unit - adjusting old extent (we may have to split it) |
| 255 | * 1 unit - add new extent |
| 256 | * 1 unit - inode update |
| 257 | */ |
| 258 | trans = btrfs_start_transaction(root, 3); |
| 259 | if (IS_ERR(trans)) { |
| 260 | ret = PTR_ERR(trans); |
| 261 | trans = NULL; |
| 262 | goto out; |
| 263 | } |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 264 | ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1); |
| 265 | if (ret) |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 266 | goto out; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 267 | ret = btrfs_insert_empty_item(trans, root, path, new_key, size); |
| 268 | if (ret) |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 269 | goto out; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 270 | |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 271 | write_extent_buffer(path->nodes[0], inline_data, |
| 272 | btrfs_item_ptr_offset(path->nodes[0], |
| 273 | path->slots[0]), |
| 274 | size); |
| 275 | inode_add_bytes(dst, datal); |
| 276 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags); |
Filipe Manana | 4fdb688 | 2020-04-04 21:20:22 +0100 | [diff] [blame] | 277 | ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end); |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 278 | out: |
| 279 | if (!ret && !trans) { |
| 280 | /* |
| 281 | * No transaction here means we copied the inline extent into a |
| 282 | * page of the destination inode. |
| 283 | * |
| 284 | * 1 unit to update inode item |
| 285 | */ |
| 286 | trans = btrfs_start_transaction(root, 1); |
| 287 | if (IS_ERR(trans)) { |
| 288 | ret = PTR_ERR(trans); |
| 289 | trans = NULL; |
| 290 | } |
| 291 | } |
| 292 | if (ret && trans) { |
| 293 | btrfs_abort_transaction(trans, ret); |
| 294 | btrfs_end_transaction(trans); |
| 295 | } |
| 296 | if (!ret) |
| 297 | *trans_out = trans; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 298 | |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 299 | return ret; |
Filipe Manana | baa6763 | 2021-05-25 11:05:28 +0100 | [diff] [blame] | 300 | |
| 301 | copy_to_page: |
| 302 | /* |
| 303 | * Release our path because we don't need it anymore and also because |
| 304 | * copy_inline_to_page() needs to reserve data and metadata, which may |
| 305 | * need to flush delalloc when we are low on available space and |
| 306 | * therefore cause a deadlock if writeback of an inline extent needs to |
| 307 | * write to the same leaf or an ordered extent completion needs to write |
| 308 | * to the same leaf. |
| 309 | */ |
| 310 | btrfs_release_path(path); |
| 311 | |
| 312 | ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, |
| 313 | inline_data, size, datal, comp_type); |
| 314 | goto out; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 315 | } |
| 316 | |
| 317 | /** |
| 318 | * btrfs_clone() - clone a range from inode file to another |
| 319 | * |
| 320 | * @src: Inode to clone from |
| 321 | * @inode: Inode to clone to |
| 322 | * @off: Offset within source to start clone from |
| 323 | * @olen: Original length, passed by user, of range to clone |
| 324 | * @olen_aligned: Block-aligned value of olen |
| 325 | * @destoff: Offset within @inode to start clone |
| 326 | * @no_time_update: Whether to update mtime/ctime on the target inode |
| 327 | */ |
| 328 | static int btrfs_clone(struct inode *src, struct inode *inode, |
| 329 | const u64 off, const u64 olen, const u64 olen_aligned, |
| 330 | const u64 destoff, int no_time_update) |
| 331 | { |
| 332 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 333 | struct btrfs_path *path = NULL; |
| 334 | struct extent_buffer *leaf; |
| 335 | struct btrfs_trans_handle *trans; |
| 336 | char *buf = NULL; |
| 337 | struct btrfs_key key; |
| 338 | u32 nritems; |
| 339 | int slot; |
| 340 | int ret; |
| 341 | const u64 len = olen_aligned; |
| 342 | u64 last_dest_end = destoff; |
| 343 | |
| 344 | ret = -ENOMEM; |
| 345 | buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); |
| 346 | if (!buf) |
| 347 | return ret; |
| 348 | |
| 349 | path = btrfs_alloc_path(); |
| 350 | if (!path) { |
| 351 | kvfree(buf); |
| 352 | return ret; |
| 353 | } |
| 354 | |
| 355 | path->reada = READA_FORWARD; |
| 356 | /* Clone data */ |
| 357 | key.objectid = btrfs_ino(BTRFS_I(src)); |
| 358 | key.type = BTRFS_EXTENT_DATA_KEY; |
| 359 | key.offset = off; |
| 360 | |
| 361 | while (1) { |
| 362 | u64 next_key_min_offset = key.offset + 1; |
| 363 | struct btrfs_file_extent_item *extent; |
Filipe Manana | 3ebac17 | 2020-07-15 12:30:43 +0100 | [diff] [blame] | 364 | u64 extent_gen; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 365 | int type; |
| 366 | u32 size; |
| 367 | struct btrfs_key new_key; |
| 368 | u64 disko = 0, diskl = 0; |
| 369 | u64 datao = 0, datal = 0; |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 370 | u8 comp; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 371 | u64 drop_start; |
| 372 | |
| 373 | /* Note the key will change type as we walk through the tree */ |
| 374 | path->leave_spinning = 1; |
| 375 | ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path, |
| 376 | 0, 0); |
| 377 | if (ret < 0) |
| 378 | goto out; |
| 379 | /* |
| 380 | * First search, if no extent item that starts at offset off was |
| 381 | * found but the previous item is an extent item, it's possible |
| 382 | * it might overlap our target range, therefore process it. |
| 383 | */ |
| 384 | if (key.offset == off && ret > 0 && path->slots[0] > 0) { |
| 385 | btrfs_item_key_to_cpu(path->nodes[0], &key, |
| 386 | path->slots[0] - 1); |
| 387 | if (key.type == BTRFS_EXTENT_DATA_KEY) |
| 388 | path->slots[0]--; |
| 389 | } |
| 390 | |
| 391 | nritems = btrfs_header_nritems(path->nodes[0]); |
| 392 | process_slot: |
| 393 | if (path->slots[0] >= nritems) { |
| 394 | ret = btrfs_next_leaf(BTRFS_I(src)->root, path); |
| 395 | if (ret < 0) |
| 396 | goto out; |
| 397 | if (ret > 0) |
| 398 | break; |
| 399 | nritems = btrfs_header_nritems(path->nodes[0]); |
| 400 | } |
| 401 | leaf = path->nodes[0]; |
| 402 | slot = path->slots[0]; |
| 403 | |
| 404 | btrfs_item_key_to_cpu(leaf, &key, slot); |
| 405 | if (key.type > BTRFS_EXTENT_DATA_KEY || |
| 406 | key.objectid != btrfs_ino(BTRFS_I(src))) |
| 407 | break; |
| 408 | |
| 409 | ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); |
| 410 | |
| 411 | extent = btrfs_item_ptr(leaf, slot, |
| 412 | struct btrfs_file_extent_item); |
Filipe Manana | 3ebac17 | 2020-07-15 12:30:43 +0100 | [diff] [blame] | 413 | extent_gen = btrfs_file_extent_generation(leaf, extent); |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 414 | comp = btrfs_file_extent_compression(leaf, extent); |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 415 | type = btrfs_file_extent_type(leaf, extent); |
| 416 | if (type == BTRFS_FILE_EXTENT_REG || |
| 417 | type == BTRFS_FILE_EXTENT_PREALLOC) { |
| 418 | disko = btrfs_file_extent_disk_bytenr(leaf, extent); |
| 419 | diskl = btrfs_file_extent_disk_num_bytes(leaf, extent); |
| 420 | datao = btrfs_file_extent_offset(leaf, extent); |
| 421 | datal = btrfs_file_extent_num_bytes(leaf, extent); |
| 422 | } else if (type == BTRFS_FILE_EXTENT_INLINE) { |
| 423 | /* Take upper bound, may be compressed */ |
| 424 | datal = btrfs_file_extent_ram_bytes(leaf, extent); |
| 425 | } |
| 426 | |
| 427 | /* |
| 428 | * The first search might have left us at an extent item that |
| 429 | * ends before our target range's start, can happen if we have |
| 430 | * holes and NO_HOLES feature enabled. |
| 431 | */ |
| 432 | if (key.offset + datal <= off) { |
| 433 | path->slots[0]++; |
| 434 | goto process_slot; |
| 435 | } else if (key.offset >= off + len) { |
| 436 | break; |
| 437 | } |
| 438 | next_key_min_offset = key.offset + datal; |
| 439 | size = btrfs_item_size_nr(leaf, slot); |
| 440 | read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot), |
| 441 | size); |
| 442 | |
| 443 | btrfs_release_path(path); |
| 444 | path->leave_spinning = 0; |
| 445 | |
| 446 | memcpy(&new_key, &key, sizeof(new_key)); |
| 447 | new_key.objectid = btrfs_ino(BTRFS_I(inode)); |
| 448 | if (off <= key.offset) |
| 449 | new_key.offset = key.offset + destoff - off; |
| 450 | else |
| 451 | new_key.offset = destoff; |
| 452 | |
| 453 | /* |
| 454 | * Deal with a hole that doesn't have an extent item that |
| 455 | * represents it (NO_HOLES feature enabled). |
| 456 | * This hole is either in the middle of the cloning range or at |
| 457 | * the beginning (fully overlaps it or partially overlaps it). |
| 458 | */ |
| 459 | if (new_key.offset != last_dest_end) |
| 460 | drop_start = last_dest_end; |
| 461 | else |
| 462 | drop_start = new_key.offset; |
| 463 | |
| 464 | if (type == BTRFS_FILE_EXTENT_REG || |
| 465 | type == BTRFS_FILE_EXTENT_PREALLOC) { |
Filipe Manana | bf38564 | 2020-09-08 11:27:22 +0100 | [diff] [blame] | 466 | struct btrfs_replace_extent_info clone_info; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 467 | |
| 468 | /* |
| 469 | * a | --- range to clone ---| b |
| 470 | * | ------------- extent ------------- | |
| 471 | */ |
| 472 | |
| 473 | /* Subtract range b */ |
| 474 | if (key.offset + datal > off + len) |
| 475 | datal = off + len - key.offset; |
| 476 | |
| 477 | /* Subtract range a */ |
| 478 | if (off > key.offset) { |
| 479 | datao += off - key.offset; |
| 480 | datal -= off - key.offset; |
| 481 | } |
| 482 | |
| 483 | clone_info.disk_offset = disko; |
| 484 | clone_info.disk_len = diskl; |
| 485 | clone_info.data_offset = datao; |
| 486 | clone_info.data_len = datal; |
| 487 | clone_info.file_offset = new_key.offset; |
| 488 | clone_info.extent_buf = buf; |
Filipe Manana | 8fccebf | 2020-09-08 11:27:20 +0100 | [diff] [blame] | 489 | clone_info.is_new_extent = false; |
Filipe Manana | 306bfec | 2020-09-08 11:27:23 +0100 | [diff] [blame] | 490 | ret = btrfs_replace_file_extents(inode, path, drop_start, |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 491 | new_key.offset + datal - 1, &clone_info, |
| 492 | &trans); |
| 493 | if (ret) |
| 494 | goto out; |
| 495 | } else if (type == BTRFS_FILE_EXTENT_INLINE) { |
Filipe Manana | a61e1e0 | 2020-02-28 13:04:18 +0000 | [diff] [blame] | 496 | /* |
| 497 | * Inline extents always have to start at file offset 0 |
| 498 | * and can never be bigger then the sector size. We can |
| 499 | * never clone only parts of an inline extent, since all |
| 500 | * reflink operations must start at a sector size aligned |
| 501 | * offset, and the length must be aligned too or end at |
| 502 | * the i_size (which implies the whole inlined data). |
| 503 | */ |
| 504 | ASSERT(key.offset == 0); |
| 505 | ASSERT(datal <= fs_info->sectorsize); |
| 506 | if (key.offset != 0 || datal > fs_info->sectorsize) |
| 507 | return -EUCLEAN; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 508 | |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 509 | ret = clone_copy_inline_extent(inode, path, &new_key, |
| 510 | drop_start, datal, size, |
| 511 | comp, buf, &trans); |
| 512 | if (ret) |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 513 | goto out; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 514 | } |
| 515 | |
| 516 | btrfs_release_path(path); |
| 517 | |
Filipe Manana | 3ebac17 | 2020-07-15 12:30:43 +0100 | [diff] [blame] | 518 | /* |
| 519 | * If this is a new extent update the last_reflink_trans of both |
| 520 | * inodes. This is used by fsync to make sure it does not log |
| 521 | * multiple checksum items with overlapping ranges. For older |
| 522 | * extents we don't need to do it since inode logging skips the |
| 523 | * checksums for older extents. Also ignore holes and inline |
| 524 | * extents because they don't have checksums in the csum tree. |
| 525 | */ |
| 526 | if (extent_gen == trans->transid && disko > 0) { |
| 527 | BTRFS_I(src)->last_reflink_trans = trans->transid; |
| 528 | BTRFS_I(inode)->last_reflink_trans = trans->transid; |
| 529 | } |
| 530 | |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 531 | last_dest_end = ALIGN(new_key.offset + datal, |
| 532 | fs_info->sectorsize); |
| 533 | ret = clone_finish_inode_update(trans, inode, last_dest_end, |
| 534 | destoff, olen, no_time_update); |
| 535 | if (ret) |
| 536 | goto out; |
| 537 | if (new_key.offset + datal >= destoff + len) |
| 538 | break; |
| 539 | |
| 540 | btrfs_release_path(path); |
| 541 | key.offset = next_key_min_offset; |
| 542 | |
| 543 | if (fatal_signal_pending(current)) { |
| 544 | ret = -EINTR; |
| 545 | goto out; |
| 546 | } |
Johannes Thumshirn | 6b613cc | 2020-09-22 17:27:29 +0900 | [diff] [blame] | 547 | |
| 548 | cond_resched(); |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 549 | } |
| 550 | ret = 0; |
| 551 | |
| 552 | if (last_dest_end < destoff + len) { |
| 553 | /* |
| 554 | * We have an implicit hole that fully or partially overlaps our |
| 555 | * cloning range at its end. This means that we either have the |
| 556 | * NO_HOLES feature enabled or the implicit hole happened due to |
| 557 | * mixing buffered and direct IO writes against this file. |
| 558 | */ |
| 559 | btrfs_release_path(path); |
| 560 | path->leave_spinning = 0; |
| 561 | |
Filipe Manana | 1559d94 | 2021-02-16 11:09:25 +0000 | [diff] [blame] | 562 | /* |
| 563 | * When using NO_HOLES and we are cloning a range that covers |
| 564 | * only a hole (no extents) into a range beyond the current |
| 565 | * i_size, punching a hole in the target range will not create |
| 566 | * an extent map defining a hole, because the range starts at or |
| 567 | * beyond current i_size. If the file previously had an i_size |
| 568 | * greater than the new i_size set by this clone operation, we |
| 569 | * need to make sure the next fsync is a full fsync, so that it |
| 570 | * detects and logs a hole covering a range from the current |
| 571 | * i_size to the new i_size. If the clone range covers extents, |
| 572 | * besides a hole, then we know the full sync flag was already |
| 573 | * set by previous calls to btrfs_replace_file_extents() that |
| 574 | * replaced file extent items. |
| 575 | */ |
| 576 | if (last_dest_end >= i_size_read(inode)) |
| 577 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, |
| 578 | &BTRFS_I(inode)->runtime_flags); |
| 579 | |
Filipe Manana | 306bfec | 2020-09-08 11:27:23 +0100 | [diff] [blame] | 580 | ret = btrfs_replace_file_extents(inode, path, last_dest_end, |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 581 | destoff + len - 1, NULL, &trans); |
| 582 | if (ret) |
| 583 | goto out; |
| 584 | |
| 585 | ret = clone_finish_inode_update(trans, inode, destoff + len, |
| 586 | destoff, olen, no_time_update); |
| 587 | } |
| 588 | |
| 589 | out: |
| 590 | btrfs_free_path(path); |
| 591 | kvfree(buf); |
Filipe Manana | 17243f7 | 2020-12-02 11:55:58 +0000 | [diff] [blame] | 592 | clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags); |
| 593 | |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 594 | return ret; |
| 595 | } |
| 596 | |
| 597 | static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, |
| 598 | struct inode *inode2, u64 loff2, u64 len) |
| 599 | { |
| 600 | unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); |
| 601 | unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); |
| 602 | } |
| 603 | |
| 604 | static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, |
| 605 | struct inode *inode2, u64 loff2, u64 len) |
| 606 | { |
| 607 | if (inode1 < inode2) { |
| 608 | swap(inode1, inode2); |
| 609 | swap(loff1, loff2); |
| 610 | } else if (inode1 == inode2 && loff2 < loff1) { |
| 611 | swap(loff1, loff2); |
| 612 | } |
| 613 | lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); |
| 614 | lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); |
| 615 | } |
| 616 | |
| 617 | static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, |
| 618 | struct inode *dst, u64 dst_loff) |
| 619 | { |
| 620 | const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize; |
| 621 | int ret; |
| 622 | |
| 623 | /* |
| 624 | * Lock destination range to serialize with concurrent readpages() and |
| 625 | * source range to serialize with relocation. |
| 626 | */ |
| 627 | btrfs_double_extent_lock(src, loff, dst, dst_loff, len); |
| 628 | ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1); |
| 629 | btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); |
| 630 | |
| 631 | return ret; |
| 632 | } |
| 633 | |
| 634 | static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, |
| 635 | struct inode *dst, u64 dst_loff) |
| 636 | { |
Sidong Yang | 428bb3d | 2021-08-26 14:44:36 +0000 | [diff] [blame] | 637 | int ret = 0; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 638 | u64 i, tail_len, chunk_count; |
| 639 | struct btrfs_root *root_dst = BTRFS_I(dst)->root; |
| 640 | |
| 641 | spin_lock(&root_dst->root_item_lock); |
| 642 | if (root_dst->send_in_progress) { |
| 643 | btrfs_warn_rl(root_dst->fs_info, |
| 644 | "cannot deduplicate to root %llu while send operations are using it (%d in progress)", |
| 645 | root_dst->root_key.objectid, |
| 646 | root_dst->send_in_progress); |
| 647 | spin_unlock(&root_dst->root_item_lock); |
| 648 | return -EAGAIN; |
| 649 | } |
| 650 | root_dst->dedupe_in_progress++; |
| 651 | spin_unlock(&root_dst->root_item_lock); |
| 652 | |
| 653 | tail_len = olen % BTRFS_MAX_DEDUPE_LEN; |
| 654 | chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN); |
| 655 | |
| 656 | for (i = 0; i < chunk_count; i++) { |
| 657 | ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN, |
| 658 | dst, dst_loff); |
| 659 | if (ret) |
| 660 | goto out; |
| 661 | |
| 662 | loff += BTRFS_MAX_DEDUPE_LEN; |
| 663 | dst_loff += BTRFS_MAX_DEDUPE_LEN; |
| 664 | } |
| 665 | |
| 666 | if (tail_len > 0) |
| 667 | ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff); |
| 668 | out: |
| 669 | spin_lock(&root_dst->root_item_lock); |
| 670 | root_dst->dedupe_in_progress--; |
| 671 | spin_unlock(&root_dst->root_item_lock); |
| 672 | |
| 673 | return ret; |
| 674 | } |
| 675 | |
| 676 | static noinline int btrfs_clone_files(struct file *file, struct file *file_src, |
| 677 | u64 off, u64 olen, u64 destoff) |
| 678 | { |
| 679 | struct inode *inode = file_inode(file); |
| 680 | struct inode *src = file_inode(file_src); |
| 681 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| 682 | int ret; |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 683 | int wb_ret; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 684 | u64 len = olen; |
| 685 | u64 bs = fs_info->sb->s_blocksize; |
| 686 | |
| 687 | /* |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 688 | * VFS's generic_remap_file_range_prep() protects us from cloning the |
| 689 | * eof block into the middle of a file, which would result in corruption |
| 690 | * if the file size is not blocksize aligned. So we don't need to check |
| 691 | * for that case here. |
| 692 | */ |
| 693 | if (off + len == src->i_size) |
| 694 | len = ALIGN(src->i_size, bs) - off; |
| 695 | |
| 696 | if (destoff > inode->i_size) { |
| 697 | const u64 wb_start = ALIGN_DOWN(inode->i_size, bs); |
| 698 | |
| 699 | ret = btrfs_cont_expand(inode, inode->i_size, destoff); |
| 700 | if (ret) |
| 701 | return ret; |
| 702 | /* |
| 703 | * We may have truncated the last block if the inode's size is |
| 704 | * not sector size aligned, so we need to wait for writeback to |
| 705 | * complete before proceeding further, otherwise we can race |
| 706 | * with cloning and attempt to increment a reference to an |
| 707 | * extent that no longer exists (writeback completed right after |
| 708 | * we found the previous extent covering eof and before we |
| 709 | * attempted to increment its reference count). |
| 710 | */ |
| 711 | ret = btrfs_wait_ordered_range(inode, wb_start, |
| 712 | destoff - wb_start); |
| 713 | if (ret) |
| 714 | return ret; |
| 715 | } |
| 716 | |
| 717 | /* |
| 718 | * Lock destination range to serialize with concurrent readpages() and |
| 719 | * source range to serialize with relocation. |
| 720 | */ |
| 721 | btrfs_double_extent_lock(src, off, inode, destoff, len); |
| 722 | ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); |
| 723 | btrfs_double_extent_unlock(src, off, inode, destoff, len); |
Filipe Manana | 05a5a76 | 2020-02-28 13:04:19 +0000 | [diff] [blame] | 724 | |
| 725 | /* |
| 726 | * We may have copied an inline extent into a page of the destination |
| 727 | * range, so wait for writeback to complete before truncating pages |
| 728 | * from the page cache. This is a rare case. |
| 729 | */ |
| 730 | wb_ret = btrfs_wait_ordered_range(inode, destoff, len); |
| 731 | ret = ret ? ret : wb_ret; |
Filipe Manana | 6a17738 | 2020-02-28 13:04:17 +0000 | [diff] [blame] | 732 | /* |
| 733 | * Truncate page cache pages so that future reads will see the cloned |
| 734 | * data immediately and not the previous data. |
| 735 | */ |
| 736 | truncate_inode_pages_range(&inode->i_data, |
| 737 | round_down(destoff, PAGE_SIZE), |
| 738 | round_up(destoff + len, PAGE_SIZE) - 1); |
| 739 | |
| 740 | return ret; |
| 741 | } |
| 742 | |
| 743 | static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in, |
| 744 | struct file *file_out, loff_t pos_out, |
| 745 | loff_t *len, unsigned int remap_flags) |
| 746 | { |
| 747 | struct inode *inode_in = file_inode(file_in); |
| 748 | struct inode *inode_out = file_inode(file_out); |
| 749 | u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize; |
| 750 | bool same_inode = inode_out == inode_in; |
| 751 | u64 wb_len; |
| 752 | int ret; |
| 753 | |
| 754 | if (!(remap_flags & REMAP_FILE_DEDUP)) { |
| 755 | struct btrfs_root *root_out = BTRFS_I(inode_out)->root; |
| 756 | |
| 757 | if (btrfs_root_readonly(root_out)) |
| 758 | return -EROFS; |
| 759 | |
| 760 | if (file_in->f_path.mnt != file_out->f_path.mnt || |
| 761 | inode_in->i_sb != inode_out->i_sb) |
| 762 | return -EXDEV; |
| 763 | } |
| 764 | |
| 765 | /* Don't make the dst file partly checksummed */ |
| 766 | if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) != |
| 767 | (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) { |
| 768 | return -EINVAL; |
| 769 | } |
| 770 | |
| 771 | /* |
| 772 | * Now that the inodes are locked, we need to start writeback ourselves |
| 773 | * and can not rely on the writeback from the VFS's generic helper |
| 774 | * generic_remap_file_range_prep() because: |
| 775 | * |
| 776 | * 1) For compression we must call filemap_fdatawrite_range() range |
| 777 | * twice (btrfs_fdatawrite_range() does it for us), and the generic |
| 778 | * helper only calls it once; |
| 779 | * |
| 780 | * 2) filemap_fdatawrite_range(), called by the generic helper only |
| 781 | * waits for the writeback to complete, i.e. for IO to be done, and |
| 782 | * not for the ordered extents to complete. We need to wait for them |
| 783 | * to complete so that new file extent items are in the fs tree. |
| 784 | */ |
| 785 | if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP)) |
| 786 | wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs); |
| 787 | else |
| 788 | wb_len = ALIGN(*len, bs); |
| 789 | |
| 790 | /* |
| 791 | * Since we don't lock ranges, wait for ongoing lockless dio writes (as |
| 792 | * any in progress could create its ordered extents after we wait for |
| 793 | * existing ordered extents below). |
| 794 | */ |
| 795 | inode_dio_wait(inode_in); |
| 796 | if (!same_inode) |
| 797 | inode_dio_wait(inode_out); |
| 798 | |
| 799 | /* |
| 800 | * Workaround to make sure NOCOW buffered write reach disk as NOCOW. |
| 801 | * |
| 802 | * Btrfs' back references do not have a block level granularity, they |
| 803 | * work at the whole extent level. |
| 804 | * NOCOW buffered write without data space reserved may not be able |
| 805 | * to fall back to CoW due to lack of data space, thus could cause |
| 806 | * data loss. |
| 807 | * |
| 808 | * Here we take a shortcut by flushing the whole inode, so that all |
| 809 | * nocow write should reach disk as nocow before we increase the |
| 810 | * reference of the extent. We could do better by only flushing NOCOW |
| 811 | * data, but that needs extra accounting. |
| 812 | * |
| 813 | * Also we don't need to check ASYNC_EXTENT, as async extent will be |
| 814 | * CoWed anyway, not affecting nocow part. |
| 815 | */ |
| 816 | ret = filemap_flush(inode_in->i_mapping); |
| 817 | if (ret < 0) |
| 818 | return ret; |
| 819 | |
| 820 | ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs), |
| 821 | wb_len); |
| 822 | if (ret < 0) |
| 823 | return ret; |
| 824 | ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs), |
| 825 | wb_len); |
| 826 | if (ret < 0) |
| 827 | return ret; |
| 828 | |
| 829 | return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, |
| 830 | len, remap_flags); |
| 831 | } |
| 832 | |
| 833 | loff_t btrfs_remap_file_range(struct file *src_file, loff_t off, |
| 834 | struct file *dst_file, loff_t destoff, loff_t len, |
| 835 | unsigned int remap_flags) |
| 836 | { |
| 837 | struct inode *src_inode = file_inode(src_file); |
| 838 | struct inode *dst_inode = file_inode(dst_file); |
| 839 | bool same_inode = dst_inode == src_inode; |
| 840 | int ret; |
| 841 | |
| 842 | if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) |
| 843 | return -EINVAL; |
| 844 | |
| 845 | if (same_inode) |
| 846 | inode_lock(src_inode); |
| 847 | else |
| 848 | lock_two_nondirectories(src_inode, dst_inode); |
| 849 | |
| 850 | ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff, |
| 851 | &len, remap_flags); |
| 852 | if (ret < 0 || len == 0) |
| 853 | goto out_unlock; |
| 854 | |
| 855 | if (remap_flags & REMAP_FILE_DEDUP) |
| 856 | ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff); |
| 857 | else |
| 858 | ret = btrfs_clone_files(dst_file, src_file, off, len, destoff); |
| 859 | |
| 860 | out_unlock: |
| 861 | if (same_inode) |
| 862 | inode_unlock(src_inode); |
| 863 | else |
| 864 | unlock_two_nondirectories(src_inode, dst_inode); |
| 865 | |
| 866 | return ret < 0 ? ret : len; |
| 867 | } |