Christoph Hellwig | 73ce6ab | 2019-04-28 08:34:02 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2010 Red Hat, Inc. |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 4 | * Copyright (c) 2016-2018 Christoph Hellwig. |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 5 | */ |
| 6 | #include <linux/module.h> |
| 7 | #include <linux/compiler.h> |
| 8 | #include <linux/fs.h> |
| 9 | #include <linux/iomap.h> |
Darrick J. Wong | 6334b91 | 2019-11-21 16:14:49 -0800 | [diff] [blame] | 10 | #include "trace.h" |
Ingo Molnar | f361bf4 | 2017-02-03 23:47:37 +0100 | [diff] [blame] | 11 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 12 | /* |
| 13 | * Execute a iomap write on a segment of the mapping that spans a |
| 14 | * contiguous range of pages that have identical block mapping state. |
| 15 | * |
| 16 | * This avoids the need to map pages individually, do individual allocations |
| 17 | * for each page and most importantly avoid the need for filesystem specific |
| 18 | * locking per page. Instead, all the operations are amortised over the entire |
| 19 | * range of pages. It is assumed that the filesystems will lock whatever |
| 20 | * resources they require in the iomap_begin call, and release them in the |
| 21 | * iomap_end call. |
| 22 | */ |
Christoph Hellwig | befb503 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 23 | loff_t |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 24 | iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, |
Christoph Hellwig | 8ff6daa | 2017-01-27 23:20:26 -0800 | [diff] [blame] | 25 | const struct iomap_ops *ops, void *data, iomap_actor_t actor) |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 26 | { |
Goldwyn Rodrigues | c039b99 | 2019-10-18 16:44:10 -0700 | [diff] [blame] | 27 | struct iomap iomap = { .type = IOMAP_HOLE }; |
| 28 | struct iomap srcmap = { .type = IOMAP_HOLE }; |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 29 | loff_t written = 0, ret; |
Goldwyn Rodrigues | c039b99 | 2019-10-18 16:44:10 -0700 | [diff] [blame] | 30 | u64 end; |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 31 | |
Darrick J. Wong | 6334b91 | 2019-11-21 16:14:49 -0800 | [diff] [blame] | 32 | trace_iomap_apply(inode, pos, length, flags, ops, actor, _RET_IP_); |
| 33 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 34 | /* |
| 35 | * Need to map a range from start position for length bytes. This can |
| 36 | * span multiple pages - it is only guaranteed to return a range of a |
| 37 | * single type of pages (e.g. all into a hole, all mapped or all |
| 38 | * unwritten). Failure at this point has nothing to undo. |
| 39 | * |
| 40 | * If allocation is required for this range, reserve the space now so |
| 41 | * that the allocation is guaranteed to succeed later on. Once we copy |
| 42 | * the data into the page cache pages, then we cannot fail otherwise we |
| 43 | * expose transient stale data. If the reserve fails, we can safely |
| 44 | * back out at this point as there is nothing to undo. |
| 45 | */ |
Goldwyn Rodrigues | c039b99 | 2019-10-18 16:44:10 -0700 | [diff] [blame] | 46 | ret = ops->iomap_begin(inode, pos, length, flags, &iomap, &srcmap); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 47 | if (ret) |
| 48 | return ret; |
Andreas Gruenbacher | 856473c | 2020-07-06 10:49:27 -0700 | [diff] [blame] | 49 | if (WARN_ON(iomap.offset > pos)) { |
| 50 | written = -EIO; |
| 51 | goto out; |
| 52 | } |
| 53 | if (WARN_ON(iomap.length == 0)) { |
| 54 | written = -EIO; |
| 55 | goto out; |
| 56 | } |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 57 | |
Darrick J. Wong | 6334b91 | 2019-11-21 16:14:49 -0800 | [diff] [blame] | 58 | trace_iomap_apply_dstmap(inode, &iomap); |
| 59 | if (srcmap.type != IOMAP_HOLE) |
| 60 | trace_iomap_apply_srcmap(inode, &srcmap); |
| 61 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 62 | /* |
| 63 | * Cut down the length to the one actually provided by the filesystem, |
| 64 | * as it might not be able to give us the whole size that we requested. |
| 65 | */ |
Goldwyn Rodrigues | c039b99 | 2019-10-18 16:44:10 -0700 | [diff] [blame] | 66 | end = iomap.offset + iomap.length; |
| 67 | if (srcmap.type != IOMAP_HOLE) |
| 68 | end = min(end, srcmap.offset + srcmap.length); |
| 69 | if (pos + length > end) |
| 70 | length = end - pos; |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 71 | |
| 72 | /* |
Goldwyn Rodrigues | c039b99 | 2019-10-18 16:44:10 -0700 | [diff] [blame] | 73 | * Now that we have guaranteed that the space allocation will succeed, |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 74 | * we can do the copy-in page by page without having to worry about |
| 75 | * failures exposing transient data. |
Goldwyn Rodrigues | c039b99 | 2019-10-18 16:44:10 -0700 | [diff] [blame] | 76 | * |
| 77 | * To support COW operations, we read in data for partially blocks from |
| 78 | * the srcmap if the file system filled it in. In that case we the |
| 79 | * length needs to be limited to the earlier of the ends of the iomaps. |
| 80 | * If the file system did not provide a srcmap we pass in the normal |
| 81 | * iomap into the actors so that they don't need to have special |
| 82 | * handling for the two cases. |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 83 | */ |
Goldwyn Rodrigues | c039b99 | 2019-10-18 16:44:10 -0700 | [diff] [blame] | 84 | written = actor(inode, pos, length, data, &iomap, |
| 85 | srcmap.type != IOMAP_HOLE ? &srcmap : &iomap); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 86 | |
Andreas Gruenbacher | 856473c | 2020-07-06 10:49:27 -0700 | [diff] [blame] | 87 | out: |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 88 | /* |
| 89 | * Now the data has been copied, commit the range we've copied. This |
| 90 | * should not fail unless the filesystem has had a fatal error. |
| 91 | */ |
Christoph Hellwig | f20ac7a | 2016-08-17 08:42:34 +1000 | [diff] [blame] | 92 | if (ops->iomap_end) { |
| 93 | ret = ops->iomap_end(inode, pos, length, |
| 94 | written > 0 ? written : 0, |
| 95 | flags, &iomap); |
| 96 | } |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 97 | |
| 98 | return written ? written : ret; |
| 99 | } |