blob: 6d4a9beaa0974315c9bb3696f19318df2b59a8ae [file] [log] [blame]
Darrick J. Wong02e83f42020-10-14 16:47:08 -07001// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/slab.h>
3#include <linux/stat.h>
4#include <linux/sched/xacct.h>
5#include <linux/fcntl.h>
6#include <linux/file.h>
7#include <linux/uio.h>
8#include <linux/fsnotify.h>
9#include <linux/security.h>
10#include <linux/export.h>
11#include <linux/syscalls.h>
12#include <linux/pagemap.h>
13#include <linux/splice.h>
14#include <linux/compat.h>
15#include <linux/mount.h>
16#include <linux/fs.h>
17#include "internal.h"
18
19#include <linux/uaccess.h>
20#include <asm/unistd.h>
21
22/*
23 * Performs necessary checks before doing a clone.
24 *
25 * Can adjust amount of bytes to clone via @req_count argument.
26 * Returns appropriate error code that caller should return or
27 * zero in case the clone should be allowed.
28 */
Darrick J. Wong1b2c54d2020-10-14 16:38:47 -070029static int generic_remap_checks(struct file *file_in, loff_t pos_in,
30 struct file *file_out, loff_t pos_out,
31 loff_t *req_count, unsigned int remap_flags)
Darrick J. Wong02e83f42020-10-14 16:47:08 -070032{
33 struct inode *inode_in = file_in->f_mapping->host;
34 struct inode *inode_out = file_out->f_mapping->host;
35 uint64_t count = *req_count;
36 uint64_t bcount;
37 loff_t size_in, size_out;
38 loff_t bs = inode_out->i_sb->s_blocksize;
39 int ret;
40
41 /* The start of both ranges must be aligned to an fs block. */
42 if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs))
43 return -EINVAL;
44
45 /* Ensure offsets don't wrap. */
46 if (pos_in + count < pos_in || pos_out + count < pos_out)
47 return -EINVAL;
48
49 size_in = i_size_read(inode_in);
50 size_out = i_size_read(inode_out);
51
52 /* Dedupe requires both ranges to be within EOF. */
53 if ((remap_flags & REMAP_FILE_DEDUP) &&
54 (pos_in >= size_in || pos_in + count > size_in ||
55 pos_out >= size_out || pos_out + count > size_out))
56 return -EINVAL;
57
58 /* Ensure the infile range is within the infile. */
59 if (pos_in >= size_in)
60 return -EINVAL;
61 count = min(count, size_in - (uint64_t)pos_in);
62
63 ret = generic_write_check_limits(file_out, pos_out, &count);
64 if (ret)
65 return ret;
66
67 /*
68 * If the user wanted us to link to the infile's EOF, round up to the
69 * next block boundary for this check.
70 *
71 * Otherwise, make sure the count is also block-aligned, having
72 * already confirmed the starting offsets' block alignment.
73 */
74 if (pos_in + count == size_in) {
75 bcount = ALIGN(size_in, bs) - pos_in;
76 } else {
77 if (!IS_ALIGNED(count, bs))
78 count = ALIGN_DOWN(count, bs);
79 bcount = count;
80 }
81
82 /* Don't allow overlapped cloning within the same file. */
83 if (inode_in == inode_out &&
84 pos_out + bcount > pos_in &&
85 pos_out < pos_in + bcount)
86 return -EINVAL;
87
88 /*
89 * We shortened the request but the caller can't deal with that, so
90 * bounce the request back to userspace.
91 */
92 if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
93 return -EINVAL;
94
95 *req_count = count;
96 return 0;
97}
Darrick J. Wong1b2c54d2020-10-14 16:38:47 -070098
99static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
100 bool write)
101{
Darrick J. Wong1b2c54d2020-10-14 16:38:47 -0700102 if (unlikely(pos < 0 || len < 0))
103 return -EINVAL;
104
105 if (unlikely((loff_t) (pos + len) < 0))
106 return -EINVAL;
107
Darrick J. Wong1b2c54d2020-10-14 16:38:47 -0700108 return security_file_permission(file, write ? MAY_WRITE : MAY_READ);
109}
110
111/*
112 * Ensure that we don't remap a partial EOF block in the middle of something
113 * else. Assume that the offsets have already been checked for block
114 * alignment.
115 *
116 * For clone we only link a partial EOF block above or at the destination file's
117 * EOF. For deduplication we accept a partial EOF block only if it ends at the
118 * destination file's EOF (can not link it into the middle of a file).
119 *
120 * Shorten the request if possible.
121 */
122static int generic_remap_check_len(struct inode *inode_in,
123 struct inode *inode_out,
124 loff_t pos_out,
125 loff_t *len,
126 unsigned int remap_flags)
127{
128 u64 blkmask = i_blocksize(inode_in) - 1;
129 loff_t new_len = *len;
130
131 if ((*len & blkmask) == 0)
132 return 0;
133
134 if (pos_out + *len < i_size_read(inode_out))
135 new_len &= ~blkmask;
136
137 if (new_len == *len)
138 return 0;
139
140 if (remap_flags & REMAP_FILE_CAN_SHORTEN) {
141 *len = new_len;
142 return 0;
143 }
144
145 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
146}
147
148/* Read a page's worth of file data into the page cache. */
149static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
150{
151 struct page *page;
152
153 page = read_mapping_page(inode->i_mapping, offset >> PAGE_SHIFT, NULL);
154 if (IS_ERR(page))
155 return page;
156 if (!PageUptodate(page)) {
157 put_page(page);
158 return ERR_PTR(-EIO);
159 }
160 return page;
161}
162
163/*
164 * Lock two pages, ensuring that we lock in offset order if the pages are from
165 * the same file.
166 */
167static void vfs_lock_two_pages(struct page *page1, struct page *page2)
168{
169 /* Always lock in order of increasing index. */
170 if (page1->index > page2->index)
171 swap(page1, page2);
172
173 lock_page(page1);
174 if (page1 != page2)
175 lock_page(page2);
176}
177
178/* Unlock two pages, being careful not to unlock the same page twice. */
179static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
180{
181 unlock_page(page1);
182 if (page1 != page2)
183 unlock_page(page2);
184}
185
186/*
187 * Compare extents of two files to see if they are the same.
188 * Caller must have locked both inodes to prevent write races.
189 */
190static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
191 struct inode *dest, loff_t destoff,
192 loff_t len, bool *is_same)
193{
194 loff_t src_poff;
195 loff_t dest_poff;
196 void *src_addr;
197 void *dest_addr;
198 struct page *src_page;
199 struct page *dest_page;
200 loff_t cmp_len;
201 bool same;
202 int error;
203
204 error = -EINVAL;
205 same = true;
206 while (len) {
207 src_poff = srcoff & (PAGE_SIZE - 1);
208 dest_poff = destoff & (PAGE_SIZE - 1);
209 cmp_len = min(PAGE_SIZE - src_poff,
210 PAGE_SIZE - dest_poff);
211 cmp_len = min(cmp_len, len);
212 if (cmp_len <= 0)
213 goto out_error;
214
215 src_page = vfs_dedupe_get_page(src, srcoff);
216 if (IS_ERR(src_page)) {
217 error = PTR_ERR(src_page);
218 goto out_error;
219 }
220 dest_page = vfs_dedupe_get_page(dest, destoff);
221 if (IS_ERR(dest_page)) {
222 error = PTR_ERR(dest_page);
223 put_page(src_page);
224 goto out_error;
225 }
226
227 vfs_lock_two_pages(src_page, dest_page);
228
229 /*
230 * Now that we've locked both pages, make sure they're still
231 * mapped to the file data we're interested in. If not,
232 * someone is invalidating pages on us and we lose.
233 */
234 if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
235 src_page->mapping != src->i_mapping ||
236 dest_page->mapping != dest->i_mapping) {
237 same = false;
238 goto unlock;
239 }
240
241 src_addr = kmap_atomic(src_page);
242 dest_addr = kmap_atomic(dest_page);
243
244 flush_dcache_page(src_page);
245 flush_dcache_page(dest_page);
246
247 if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len))
248 same = false;
249
250 kunmap_atomic(dest_addr);
251 kunmap_atomic(src_addr);
252unlock:
253 vfs_unlock_two_pages(src_page, dest_page);
254 put_page(dest_page);
255 put_page(src_page);
256
257 if (!same)
258 break;
259
260 srcoff += cmp_len;
261 destoff += cmp_len;
262 len -= cmp_len;
263 }
264
265 *is_same = same;
266 return 0;
267
268out_error:
269 return error;
270}
271
272/*
273 * Check that the two inodes are eligible for cloning, the ranges make
274 * sense, and then flush all dirty data. Caller must ensure that the
275 * inodes have been locked against any other modifications.
276 *
277 * If there's an error, then the usual negative error code is returned.
278 * Otherwise returns 0 with *len set to the request length.
279 */
280int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
281 struct file *file_out, loff_t pos_out,
282 loff_t *len, unsigned int remap_flags)
283{
284 struct inode *inode_in = file_inode(file_in);
285 struct inode *inode_out = file_inode(file_out);
286 bool same_inode = (inode_in == inode_out);
287 int ret;
288
289 /* Don't touch certain kinds of inodes */
290 if (IS_IMMUTABLE(inode_out))
291 return -EPERM;
292
293 if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
294 return -ETXTBSY;
295
296 /* Don't reflink dirs, pipes, sockets... */
297 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
298 return -EISDIR;
299 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
300 return -EINVAL;
301
302 /* Zero length dedupe exits immediately; reflink goes to EOF. */
303 if (*len == 0) {
304 loff_t isize = i_size_read(inode_in);
305
306 if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize)
307 return 0;
308 if (pos_in > isize)
309 return -EINVAL;
310 *len = isize - pos_in;
311 if (*len == 0)
312 return 0;
313 }
314
315 /* Check that we don't violate system file offset limits. */
316 ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len,
317 remap_flags);
318 if (ret)
319 return ret;
320
321 /* Wait for the completion of any pending IOs on both files */
322 inode_dio_wait(inode_in);
323 if (!same_inode)
324 inode_dio_wait(inode_out);
325
326 ret = filemap_write_and_wait_range(inode_in->i_mapping,
327 pos_in, pos_in + *len - 1);
328 if (ret)
329 return ret;
330
331 ret = filemap_write_and_wait_range(inode_out->i_mapping,
332 pos_out, pos_out + *len - 1);
333 if (ret)
334 return ret;
335
336 /*
337 * Check that the extents are the same.
338 */
339 if (remap_flags & REMAP_FILE_DEDUP) {
340 bool is_same = false;
341
342 ret = vfs_dedupe_file_range_compare(inode_in, pos_in,
343 inode_out, pos_out, *len, &is_same);
344 if (ret)
345 return ret;
346 if (!is_same)
347 return -EBADE;
348 }
349
350 ret = generic_remap_check_len(inode_in, inode_out, pos_out, len,
351 remap_flags);
352 if (ret)
353 return ret;
354
355 /* If can't alter the file contents, we're done. */
356 if (!(remap_flags & REMAP_FILE_DEDUP))
357 ret = file_modified(file_out);
358
359 return ret;
360}
361EXPORT_SYMBOL(generic_remap_file_range_prep);
362
363loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
364 struct file *file_out, loff_t pos_out,
365 loff_t len, unsigned int remap_flags)
366{
367 loff_t ret;
368
369 WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP);
370
371 /*
372 * FICLONE/FICLONERANGE ioctls enforce that src and dest files are on
373 * the same mount. Practically, they only need to be on the same file
374 * system.
375 */
376 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
377 return -EXDEV;
378
379 ret = generic_file_rw_checks(file_in, file_out);
380 if (ret < 0)
381 return ret;
382
383 if (!file_in->f_op->remap_file_range)
384 return -EOPNOTSUPP;
385
386 ret = remap_verify_area(file_in, pos_in, len, false);
387 if (ret)
388 return ret;
389
390 ret = remap_verify_area(file_out, pos_out, len, true);
391 if (ret)
392 return ret;
393
394 ret = file_in->f_op->remap_file_range(file_in, pos_in,
395 file_out, pos_out, len, remap_flags);
396 if (ret < 0)
397 return ret;
398
399 fsnotify_access(file_in);
400 fsnotify_modify(file_out);
401 return ret;
402}
403EXPORT_SYMBOL(do_clone_file_range);
404
405loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
406 struct file *file_out, loff_t pos_out,
407 loff_t len, unsigned int remap_flags)
408{
409 loff_t ret;
410
411 file_start_write(file_out);
412 ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len,
413 remap_flags);
414 file_end_write(file_out);
415
416 return ret;
417}
418EXPORT_SYMBOL(vfs_clone_file_range);
419
420/* Check whether we are allowed to dedupe the destination file */
421static bool allow_file_dedupe(struct file *file)
422{
Christian Brauner0f5d2202021-01-21 14:19:40 +0100423 struct user_namespace *mnt_userns = file_mnt_user_ns(file);
424 struct inode *inode = file_inode(file);
425
Darrick J. Wong1b2c54d2020-10-14 16:38:47 -0700426 if (capable(CAP_SYS_ADMIN))
427 return true;
428 if (file->f_mode & FMODE_WRITE)
429 return true;
Christian Brauner0f5d2202021-01-21 14:19:40 +0100430 if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)))
Darrick J. Wong1b2c54d2020-10-14 16:38:47 -0700431 return true;
Christian Brauner0f5d2202021-01-21 14:19:40 +0100432 if (!inode_permission(mnt_userns, inode, MAY_WRITE))
Darrick J. Wong1b2c54d2020-10-14 16:38:47 -0700433 return true;
434 return false;
435}
436
437loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
438 struct file *dst_file, loff_t dst_pos,
439 loff_t len, unsigned int remap_flags)
440{
441 loff_t ret;
442
443 WARN_ON_ONCE(remap_flags & ~(REMAP_FILE_DEDUP |
444 REMAP_FILE_CAN_SHORTEN));
445
446 ret = mnt_want_write_file(dst_file);
447 if (ret)
448 return ret;
449
Miklos Szeredi3078d852020-12-14 15:26:13 +0100450 /*
451 * This is redundant if called from vfs_dedupe_file_range(), but other
452 * callers need it and it's not performance sesitive...
453 */
454 ret = remap_verify_area(src_file, src_pos, len, false);
455 if (ret)
456 goto out_drop_write;
457
Darrick J. Wong1b2c54d2020-10-14 16:38:47 -0700458 ret = remap_verify_area(dst_file, dst_pos, len, true);
Miklos Szeredi3078d852020-12-14 15:26:13 +0100459 if (ret)
Darrick J. Wong1b2c54d2020-10-14 16:38:47 -0700460 goto out_drop_write;
461
462 ret = -EPERM;
463 if (!allow_file_dedupe(dst_file))
464 goto out_drop_write;
465
466 ret = -EXDEV;
467 if (src_file->f_path.mnt != dst_file->f_path.mnt)
468 goto out_drop_write;
469
470 ret = -EISDIR;
471 if (S_ISDIR(file_inode(dst_file)->i_mode))
472 goto out_drop_write;
473
474 ret = -EINVAL;
475 if (!dst_file->f_op->remap_file_range)
476 goto out_drop_write;
477
478 if (len == 0) {
479 ret = 0;
480 goto out_drop_write;
481 }
482
483 ret = dst_file->f_op->remap_file_range(src_file, src_pos, dst_file,
484 dst_pos, len, remap_flags | REMAP_FILE_DEDUP);
485out_drop_write:
486 mnt_drop_write_file(dst_file);
487
488 return ret;
489}
490EXPORT_SYMBOL(vfs_dedupe_file_range_one);
491
492int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
493{
494 struct file_dedupe_range_info *info;
495 struct inode *src = file_inode(file);
496 u64 off;
497 u64 len;
498 int i;
499 int ret;
500 u16 count = same->dest_count;
501 loff_t deduped;
502
503 if (!(file->f_mode & FMODE_READ))
504 return -EINVAL;
505
506 if (same->reserved1 || same->reserved2)
507 return -EINVAL;
508
509 off = same->src_offset;
510 len = same->src_length;
511
512 if (S_ISDIR(src->i_mode))
513 return -EISDIR;
514
515 if (!S_ISREG(src->i_mode))
516 return -EINVAL;
517
518 if (!file->f_op->remap_file_range)
519 return -EOPNOTSUPP;
520
521 ret = remap_verify_area(file, off, len, false);
522 if (ret < 0)
523 return ret;
524 ret = 0;
525
526 if (off + len > i_size_read(src))
527 return -EINVAL;
528
529 /* Arbitrary 1G limit on a single dedupe request, can be raised. */
530 len = min_t(u64, len, 1 << 30);
531
532 /* pre-format output fields to sane values */
533 for (i = 0; i < count; i++) {
534 same->info[i].bytes_deduped = 0ULL;
535 same->info[i].status = FILE_DEDUPE_RANGE_SAME;
536 }
537
538 for (i = 0, info = same->info; i < count; i++, info++) {
539 struct fd dst_fd = fdget(info->dest_fd);
540 struct file *dst_file = dst_fd.file;
541
542 if (!dst_file) {
543 info->status = -EBADF;
544 goto next_loop;
545 }
546
547 if (info->reserved) {
548 info->status = -EINVAL;
549 goto next_fdput;
550 }
551
552 deduped = vfs_dedupe_file_range_one(file, off, dst_file,
553 info->dest_offset, len,
554 REMAP_FILE_CAN_SHORTEN);
555 if (deduped == -EBADE)
556 info->status = FILE_DEDUPE_RANGE_DIFFERS;
557 else if (deduped < 0)
558 info->status = deduped;
559 else
560 info->bytes_deduped = len;
561
562next_fdput:
563 fdput(dst_fd);
564next_loop:
565 if (fatal_signal_pending(current))
566 break;
567 }
568 return ret;
569}
570EXPORT_SYMBOL(vfs_dedupe_file_range);