Christoph Hellwig | 73ce6ab | 2019-04-28 08:34:02 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2010 Red Hat, Inc. |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 4 | * Copyright (c) 2016-2018 Christoph Hellwig. |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 5 | */ |
| 6 | #include <linux/module.h> |
| 7 | #include <linux/compiler.h> |
| 8 | #include <linux/fs.h> |
| 9 | #include <linux/iomap.h> |
| 10 | #include <linux/uaccess.h> |
| 11 | #include <linux/gfp.h> |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 12 | #include <linux/migrate.h> |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 13 | #include <linux/mm.h> |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 14 | #include <linux/mm_inline.h> |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 15 | #include <linux/swap.h> |
| 16 | #include <linux/pagemap.h> |
Christoph Hellwig | 8a78cb1 | 2018-06-01 09:04:40 -0700 | [diff] [blame] | 17 | #include <linux/pagevec.h> |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 18 | #include <linux/file.h> |
| 19 | #include <linux/uio.h> |
| 20 | #include <linux/backing-dev.h> |
| 21 | #include <linux/buffer_head.h> |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 22 | #include <linux/task_io_accounting_ops.h> |
Christoph Hellwig | 9a286f0 | 2016-06-21 09:31:39 +1000 | [diff] [blame] | 23 | #include <linux/dax.h> |
Ingo Molnar | f361bf4 | 2017-02-03 23:47:37 +0100 | [diff] [blame] | 24 | #include <linux/sched/signal.h> |
| 25 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 26 | #include "internal.h" |
| 27 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 28 | /* |
| 29 | * Execute a iomap write on a segment of the mapping that spans a |
| 30 | * contiguous range of pages that have identical block mapping state. |
| 31 | * |
| 32 | * This avoids the need to map pages individually, do individual allocations |
| 33 | * for each page and most importantly avoid the need for filesystem specific |
| 34 | * locking per page. Instead, all the operations are amortised over the entire |
| 35 | * range of pages. It is assumed that the filesystems will lock whatever |
| 36 | * resources they require in the iomap_begin call, and release them in the |
| 37 | * iomap_end call. |
| 38 | */ |
Christoph Hellwig | befb503 | 2016-09-19 11:24:49 +1000 | [diff] [blame] | 39 | loff_t |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 40 | iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, |
Christoph Hellwig | 8ff6daa | 2017-01-27 23:20:26 -0800 | [diff] [blame] | 41 | const struct iomap_ops *ops, void *data, iomap_actor_t actor) |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 42 | { |
| 43 | struct iomap iomap = { 0 }; |
| 44 | loff_t written = 0, ret; |
| 45 | |
| 46 | /* |
| 47 | * Need to map a range from start position for length bytes. This can |
| 48 | * span multiple pages - it is only guaranteed to return a range of a |
| 49 | * single type of pages (e.g. all into a hole, all mapped or all |
| 50 | * unwritten). Failure at this point has nothing to undo. |
| 51 | * |
| 52 | * If allocation is required for this range, reserve the space now so |
| 53 | * that the allocation is guaranteed to succeed later on. Once we copy |
| 54 | * the data into the page cache pages, then we cannot fail otherwise we |
| 55 | * expose transient stale data. If the reserve fails, we can safely |
| 56 | * back out at this point as there is nothing to undo. |
| 57 | */ |
| 58 | ret = ops->iomap_begin(inode, pos, length, flags, &iomap); |
| 59 | if (ret) |
| 60 | return ret; |
| 61 | if (WARN_ON(iomap.offset > pos)) |
| 62 | return -EIO; |
Darrick J. Wong | 0c6dda7 | 2018-01-26 11:11:20 -0800 | [diff] [blame] | 63 | if (WARN_ON(iomap.length == 0)) |
| 64 | return -EIO; |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 65 | |
| 66 | /* |
| 67 | * Cut down the length to the one actually provided by the filesystem, |
| 68 | * as it might not be able to give us the whole size that we requested. |
| 69 | */ |
| 70 | if (iomap.offset + iomap.length < pos + length) |
| 71 | length = iomap.offset + iomap.length - pos; |
| 72 | |
| 73 | /* |
| 74 | * Now that we have guaranteed that the space allocation will succeed. |
| 75 | * we can do the copy-in page by page without having to worry about |
| 76 | * failures exposing transient data. |
| 77 | */ |
| 78 | written = actor(inode, pos, length, data, &iomap); |
| 79 | |
| 80 | /* |
| 81 | * Now the data has been copied, commit the range we've copied. This |
| 82 | * should not fail unless the filesystem has had a fatal error. |
| 83 | */ |
Christoph Hellwig | f20ac7a | 2016-08-17 08:42:34 +1000 | [diff] [blame] | 84 | if (ops->iomap_end) { |
| 85 | ret = ops->iomap_end(inode, pos, length, |
| 86 | written > 0 ? written : 0, |
| 87 | flags, &iomap); |
| 88 | } |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 89 | |
| 90 | return written ? written : ret; |
| 91 | } |
| 92 | |
Christoph Hellwig | 57fc505 | 2018-06-01 09:03:08 -0700 | [diff] [blame] | 93 | static sector_t |
| 94 | iomap_sector(struct iomap *iomap, loff_t pos) |
| 95 | { |
| 96 | return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT; |
| 97 | } |
| 98 | |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 99 | static struct iomap_page * |
| 100 | iomap_page_create(struct inode *inode, struct page *page) |
| 101 | { |
| 102 | struct iomap_page *iop = to_iomap_page(page); |
| 103 | |
| 104 | if (iop || i_blocksize(inode) == PAGE_SIZE) |
| 105 | return iop; |
| 106 | |
| 107 | iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); |
| 108 | atomic_set(&iop->read_count, 0); |
| 109 | atomic_set(&iop->write_count, 0); |
| 110 | bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); |
Piotr Jaroszynski | 8e47a45 | 2019-01-27 08:46:45 -0800 | [diff] [blame] | 111 | |
| 112 | /* |
| 113 | * migrate_page_move_mapping() assumes that pages with private data have |
| 114 | * their count elevated by 1. |
| 115 | */ |
| 116 | get_page(page); |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 117 | set_page_private(page, (unsigned long)iop); |
| 118 | SetPagePrivate(page); |
| 119 | return iop; |
| 120 | } |
| 121 | |
| 122 | static void |
| 123 | iomap_page_release(struct page *page) |
| 124 | { |
| 125 | struct iomap_page *iop = to_iomap_page(page); |
| 126 | |
| 127 | if (!iop) |
| 128 | return; |
| 129 | WARN_ON_ONCE(atomic_read(&iop->read_count)); |
| 130 | WARN_ON_ONCE(atomic_read(&iop->write_count)); |
| 131 | ClearPagePrivate(page); |
| 132 | set_page_private(page, 0); |
Piotr Jaroszynski | 8e47a45 | 2019-01-27 08:46:45 -0800 | [diff] [blame] | 133 | put_page(page); |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 134 | kfree(iop); |
| 135 | } |
| 136 | |
| 137 | /* |
| 138 | * Calculate the range inside the page that we actually need to read. |
| 139 | */ |
| 140 | static void |
| 141 | iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, |
| 142 | loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp) |
| 143 | { |
Dave Chinner | 8c110d4 | 2018-11-21 08:06:37 -0800 | [diff] [blame] | 144 | loff_t orig_pos = *pos; |
| 145 | loff_t isize = i_size_read(inode); |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 146 | unsigned block_bits = inode->i_blkbits; |
| 147 | unsigned block_size = (1 << block_bits); |
Andreas Gruenbacher | 10259de | 2018-08-10 11:46:14 -0700 | [diff] [blame] | 148 | unsigned poff = offset_in_page(*pos); |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 149 | unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length); |
| 150 | unsigned first = poff >> block_bits; |
| 151 | unsigned last = (poff + plen - 1) >> block_bits; |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 152 | |
| 153 | /* |
| 154 | * If the block size is smaller than the page size we need to check the |
| 155 | * per-block uptodate status and adjust the offset and length if needed |
| 156 | * to avoid reading in already uptodate ranges. |
| 157 | */ |
| 158 | if (iop) { |
| 159 | unsigned int i; |
| 160 | |
| 161 | /* move forward for each leading block marked uptodate */ |
| 162 | for (i = first; i <= last; i++) { |
| 163 | if (!test_bit(i, iop->uptodate)) |
| 164 | break; |
| 165 | *pos += block_size; |
| 166 | poff += block_size; |
| 167 | plen -= block_size; |
| 168 | first++; |
| 169 | } |
| 170 | |
| 171 | /* truncate len if we find any trailing uptodate block(s) */ |
| 172 | for ( ; i <= last; i++) { |
| 173 | if (test_bit(i, iop->uptodate)) { |
| 174 | plen -= (last - i + 1) * block_size; |
| 175 | last = i - 1; |
| 176 | break; |
| 177 | } |
| 178 | } |
| 179 | } |
| 180 | |
| 181 | /* |
| 182 | * If the extent spans the block that contains the i_size we need to |
| 183 | * handle both halves separately so that we properly zero data in the |
| 184 | * page cache for blocks that are entirely outside of i_size. |
| 185 | */ |
Dave Chinner | 8c110d4 | 2018-11-21 08:06:37 -0800 | [diff] [blame] | 186 | if (orig_pos <= isize && orig_pos + length > isize) { |
| 187 | unsigned end = offset_in_page(isize - 1) >> block_bits; |
| 188 | |
| 189 | if (first <= end && last > end) |
| 190 | plen -= (last - end) * block_size; |
| 191 | } |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 192 | |
| 193 | *offp = poff; |
| 194 | *lenp = plen; |
| 195 | } |
| 196 | |
| 197 | static void |
| 198 | iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len) |
| 199 | { |
| 200 | struct iomap_page *iop = to_iomap_page(page); |
| 201 | struct inode *inode = page->mapping->host; |
| 202 | unsigned first = off >> inode->i_blkbits; |
| 203 | unsigned last = (off + len - 1) >> inode->i_blkbits; |
| 204 | unsigned int i; |
| 205 | bool uptodate = true; |
| 206 | |
| 207 | if (iop) { |
| 208 | for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) { |
| 209 | if (i >= first && i <= last) |
| 210 | set_bit(i, iop->uptodate); |
| 211 | else if (!test_bit(i, iop->uptodate)) |
| 212 | uptodate = false; |
| 213 | } |
| 214 | } |
| 215 | |
| 216 | if (uptodate && !PageError(page)) |
| 217 | SetPageUptodate(page); |
| 218 | } |
| 219 | |
| 220 | static void |
| 221 | iomap_read_finish(struct iomap_page *iop, struct page *page) |
| 222 | { |
| 223 | if (!iop || atomic_dec_and_test(&iop->read_count)) |
| 224 | unlock_page(page); |
| 225 | } |
| 226 | |
| 227 | static void |
| 228 | iomap_read_page_end_io(struct bio_vec *bvec, int error) |
| 229 | { |
| 230 | struct page *page = bvec->bv_page; |
| 231 | struct iomap_page *iop = to_iomap_page(page); |
| 232 | |
| 233 | if (unlikely(error)) { |
| 234 | ClearPageUptodate(page); |
| 235 | SetPageError(page); |
| 236 | } else { |
| 237 | iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len); |
| 238 | } |
| 239 | |
| 240 | iomap_read_finish(iop, page); |
| 241 | } |
| 242 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 243 | static void |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 244 | iomap_read_end_io(struct bio *bio) |
| 245 | { |
| 246 | int error = blk_status_to_errno(bio->bi_status); |
| 247 | struct bio_vec *bvec; |
Ming Lei | 6dc4f10 | 2019-02-15 19:13:19 +0800 | [diff] [blame] | 248 | struct bvec_iter_all iter_all; |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 249 | |
Christoph Hellwig | 2b070cf | 2019-04-25 09:03:00 +0200 | [diff] [blame] | 250 | bio_for_each_segment_all(bvec, bio, iter_all) |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 251 | iomap_read_page_end_io(bvec, error); |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 252 | bio_put(bio); |
| 253 | } |
| 254 | |
| 255 | struct iomap_readpage_ctx { |
| 256 | struct page *cur_page; |
| 257 | bool cur_page_in_bio; |
| 258 | bool is_readahead; |
| 259 | struct bio *bio; |
| 260 | struct list_head *pages; |
| 261 | }; |
| 262 | |
Christoph Hellwig | cbbf4c0 | 2019-05-01 20:16:40 -0700 | [diff] [blame] | 263 | static void |
| 264 | iomap_read_inline_data(struct inode *inode, struct page *page, |
| 265 | struct iomap *iomap) |
| 266 | { |
| 267 | size_t size = i_size_read(inode); |
| 268 | void *addr; |
| 269 | |
| 270 | if (PageUptodate(page)) |
| 271 | return; |
| 272 | |
| 273 | BUG_ON(page->index); |
| 274 | BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data)); |
| 275 | |
| 276 | addr = kmap_atomic(page); |
| 277 | memcpy(addr, iomap->inline_data, size); |
| 278 | memset(addr + size, 0, PAGE_SIZE - size); |
| 279 | kunmap_atomic(addr); |
| 280 | SetPageUptodate(page); |
| 281 | } |
| 282 | |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 283 | static loff_t |
| 284 | iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, |
| 285 | struct iomap *iomap) |
| 286 | { |
| 287 | struct iomap_readpage_ctx *ctx = data; |
| 288 | struct page *page = ctx->cur_page; |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 289 | struct iomap_page *iop = iomap_page_create(inode, page); |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 290 | bool is_contig = false; |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 291 | loff_t orig_pos = pos; |
| 292 | unsigned poff, plen; |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 293 | sector_t sector; |
| 294 | |
Andreas Gruenbacher | 806a147 | 2018-07-03 09:07:47 -0700 | [diff] [blame] | 295 | if (iomap->type == IOMAP_INLINE) { |
Darrick J. Wong | 7d5e049 | 2018-08-10 17:55:57 -0700 | [diff] [blame] | 296 | WARN_ON_ONCE(pos); |
Andreas Gruenbacher | 806a147 | 2018-07-03 09:07:47 -0700 | [diff] [blame] | 297 | iomap_read_inline_data(inode, page, iomap); |
| 298 | return PAGE_SIZE; |
| 299 | } |
| 300 | |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 301 | /* zero post-eof blocks as the page may be mapped */ |
| 302 | iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen); |
| 303 | if (plen == 0) |
| 304 | goto done; |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 305 | |
| 306 | if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) { |
| 307 | zero_user(page, poff, plen); |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 308 | iomap_set_range_uptodate(page, poff, plen); |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 309 | goto done; |
| 310 | } |
| 311 | |
| 312 | ctx->cur_page_in_bio = true; |
| 313 | |
| 314 | /* |
| 315 | * Try to merge into a previous segment if we can. |
| 316 | */ |
| 317 | sector = iomap_sector(iomap, pos); |
| 318 | if (ctx->bio && bio_end_sector(ctx->bio) == sector) { |
Ming Lei | 07173c3 | 2019-02-15 19:13:20 +0800 | [diff] [blame] | 319 | if (__bio_try_merge_page(ctx->bio, page, plen, poff, true)) |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 320 | goto done; |
| 321 | is_contig = true; |
| 322 | } |
| 323 | |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 324 | /* |
| 325 | * If we start a new segment we need to increase the read count, and we |
| 326 | * need to do so before submitting any previous full bio to make sure |
| 327 | * that we don't prematurely unlock the page. |
| 328 | */ |
| 329 | if (iop) |
| 330 | atomic_inc(&iop->read_count); |
| 331 | |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 332 | if (!ctx->bio || !is_contig || bio_full(ctx->bio)) { |
| 333 | gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); |
| 334 | int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 335 | |
| 336 | if (ctx->bio) |
| 337 | submit_bio(ctx->bio); |
| 338 | |
| 339 | if (ctx->is_readahead) /* same as readahead_gfp_mask */ |
| 340 | gfp |= __GFP_NORETRY | __GFP_NOWARN; |
| 341 | ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs)); |
| 342 | ctx->bio->bi_opf = REQ_OP_READ; |
| 343 | if (ctx->is_readahead) |
| 344 | ctx->bio->bi_opf |= REQ_RAHEAD; |
| 345 | ctx->bio->bi_iter.bi_sector = sector; |
| 346 | bio_set_dev(ctx->bio, iomap->bdev); |
| 347 | ctx->bio->bi_end_io = iomap_read_end_io; |
| 348 | } |
| 349 | |
Ming Lei | 07173c3 | 2019-02-15 19:13:20 +0800 | [diff] [blame] | 350 | bio_add_page(ctx->bio, page, plen, poff); |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 351 | done: |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 352 | /* |
| 353 | * Move the caller beyond our range so that it keeps making progress. |
| 354 | * For that we have to include any leading non-uptodate ranges, but |
| 355 | * we can skip trailing ones as they will be handled in the next |
| 356 | * iteration. |
| 357 | */ |
| 358 | return pos - orig_pos + plen; |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 359 | } |
| 360 | |
| 361 | int |
| 362 | iomap_readpage(struct page *page, const struct iomap_ops *ops) |
| 363 | { |
| 364 | struct iomap_readpage_ctx ctx = { .cur_page = page }; |
| 365 | struct inode *inode = page->mapping->host; |
| 366 | unsigned poff; |
| 367 | loff_t ret; |
| 368 | |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 369 | for (poff = 0; poff < PAGE_SIZE; poff += ret) { |
| 370 | ret = iomap_apply(inode, page_offset(page) + poff, |
| 371 | PAGE_SIZE - poff, 0, ops, &ctx, |
| 372 | iomap_readpage_actor); |
| 373 | if (ret <= 0) { |
| 374 | WARN_ON_ONCE(ret == 0); |
| 375 | SetPageError(page); |
| 376 | break; |
| 377 | } |
| 378 | } |
| 379 | |
| 380 | if (ctx.bio) { |
| 381 | submit_bio(ctx.bio); |
| 382 | WARN_ON_ONCE(!ctx.cur_page_in_bio); |
| 383 | } else { |
| 384 | WARN_ON_ONCE(ctx.cur_page_in_bio); |
| 385 | unlock_page(page); |
| 386 | } |
| 387 | |
| 388 | /* |
| 389 | * Just like mpage_readpages and block_read_full_page we always |
| 390 | * return 0 and just mark the page as PageError on errors. This |
| 391 | * should be cleaned up all through the stack eventually. |
| 392 | */ |
| 393 | return 0; |
| 394 | } |
| 395 | EXPORT_SYMBOL_GPL(iomap_readpage); |
| 396 | |
| 397 | static struct page * |
| 398 | iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos, |
| 399 | loff_t length, loff_t *done) |
| 400 | { |
| 401 | while (!list_empty(pages)) { |
| 402 | struct page *page = lru_to_page(pages); |
| 403 | |
| 404 | if (page_offset(page) >= (u64)pos + length) |
| 405 | break; |
| 406 | |
| 407 | list_del(&page->lru); |
| 408 | if (!add_to_page_cache_lru(page, inode->i_mapping, page->index, |
| 409 | GFP_NOFS)) |
| 410 | return page; |
| 411 | |
| 412 | /* |
| 413 | * If we already have a page in the page cache at index we are |
| 414 | * done. Upper layers don't care if it is uptodate after the |
| 415 | * readpages call itself as every page gets checked again once |
| 416 | * actually needed. |
| 417 | */ |
| 418 | *done += PAGE_SIZE; |
| 419 | put_page(page); |
| 420 | } |
| 421 | |
| 422 | return NULL; |
| 423 | } |
| 424 | |
| 425 | static loff_t |
| 426 | iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length, |
| 427 | void *data, struct iomap *iomap) |
| 428 | { |
| 429 | struct iomap_readpage_ctx *ctx = data; |
| 430 | loff_t done, ret; |
| 431 | |
| 432 | for (done = 0; done < length; done += ret) { |
Andreas Gruenbacher | 10259de | 2018-08-10 11:46:14 -0700 | [diff] [blame] | 433 | if (ctx->cur_page && offset_in_page(pos + done) == 0) { |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 434 | if (!ctx->cur_page_in_bio) |
| 435 | unlock_page(ctx->cur_page); |
| 436 | put_page(ctx->cur_page); |
| 437 | ctx->cur_page = NULL; |
| 438 | } |
| 439 | if (!ctx->cur_page) { |
| 440 | ctx->cur_page = iomap_next_page(inode, ctx->pages, |
| 441 | pos, length, &done); |
| 442 | if (!ctx->cur_page) |
| 443 | break; |
| 444 | ctx->cur_page_in_bio = false; |
| 445 | } |
| 446 | ret = iomap_readpage_actor(inode, pos + done, length - done, |
| 447 | ctx, iomap); |
| 448 | } |
| 449 | |
| 450 | return done; |
| 451 | } |
| 452 | |
| 453 | int |
| 454 | iomap_readpages(struct address_space *mapping, struct list_head *pages, |
| 455 | unsigned nr_pages, const struct iomap_ops *ops) |
| 456 | { |
| 457 | struct iomap_readpage_ctx ctx = { |
| 458 | .pages = pages, |
| 459 | .is_readahead = true, |
| 460 | }; |
| 461 | loff_t pos = page_offset(list_entry(pages->prev, struct page, lru)); |
| 462 | loff_t last = page_offset(list_entry(pages->next, struct page, lru)); |
| 463 | loff_t length = last - pos + PAGE_SIZE, ret = 0; |
| 464 | |
| 465 | while (length > 0) { |
| 466 | ret = iomap_apply(mapping->host, pos, length, 0, ops, |
| 467 | &ctx, iomap_readpages_actor); |
| 468 | if (ret <= 0) { |
| 469 | WARN_ON_ONCE(ret == 0); |
| 470 | goto done; |
| 471 | } |
| 472 | pos += ret; |
| 473 | length -= ret; |
| 474 | } |
| 475 | ret = 0; |
| 476 | done: |
| 477 | if (ctx.bio) |
| 478 | submit_bio(ctx.bio); |
| 479 | if (ctx.cur_page) { |
| 480 | if (!ctx.cur_page_in_bio) |
| 481 | unlock_page(ctx.cur_page); |
| 482 | put_page(ctx.cur_page); |
| 483 | } |
| 484 | |
| 485 | /* |
| 486 | * Check that we didn't lose a page due to the arcance calling |
| 487 | * conventions.. |
| 488 | */ |
| 489 | WARN_ON_ONCE(!ret && !list_empty(ctx.pages)); |
| 490 | return ret; |
| 491 | } |
| 492 | EXPORT_SYMBOL_GPL(iomap_readpages); |
| 493 | |
Eric Sandeen | 3cc31fa | 2018-12-21 08:42:50 -0800 | [diff] [blame] | 494 | /* |
| 495 | * iomap_is_partially_uptodate checks whether blocks within a page are |
| 496 | * uptodate or not. |
| 497 | * |
| 498 | * Returns true if all blocks which correspond to a file portion |
| 499 | * we want to read within the page are uptodate. |
| 500 | */ |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 501 | int |
| 502 | iomap_is_partially_uptodate(struct page *page, unsigned long from, |
| 503 | unsigned long count) |
| 504 | { |
| 505 | struct iomap_page *iop = to_iomap_page(page); |
| 506 | struct inode *inode = page->mapping->host; |
Eric Sandeen | 3cc31fa | 2018-12-21 08:42:50 -0800 | [diff] [blame] | 507 | unsigned len, first, last; |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 508 | unsigned i; |
| 509 | |
Eric Sandeen | 3cc31fa | 2018-12-21 08:42:50 -0800 | [diff] [blame] | 510 | /* Limit range to one page */ |
| 511 | len = min_t(unsigned, PAGE_SIZE - from, count); |
| 512 | |
| 513 | /* First and last blocks in range within page */ |
| 514 | first = from >> inode->i_blkbits; |
| 515 | last = (from + len - 1) >> inode->i_blkbits; |
| 516 | |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 517 | if (iop) { |
| 518 | for (i = first; i <= last; i++) |
| 519 | if (!test_bit(i, iop->uptodate)) |
| 520 | return 0; |
| 521 | return 1; |
| 522 | } |
| 523 | |
| 524 | return 0; |
| 525 | } |
| 526 | EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); |
| 527 | |
| 528 | int |
| 529 | iomap_releasepage(struct page *page, gfp_t gfp_mask) |
| 530 | { |
| 531 | /* |
| 532 | * mm accommodates an old ext3 case where clean pages might not have had |
| 533 | * the dirty bit cleared. Thus, it can send actual dirty pages to |
| 534 | * ->releasepage() via shrink_active_list(), skip those here. |
| 535 | */ |
| 536 | if (PageDirty(page) || PageWriteback(page)) |
| 537 | return 0; |
| 538 | iomap_page_release(page); |
| 539 | return 1; |
| 540 | } |
| 541 | EXPORT_SYMBOL_GPL(iomap_releasepage); |
| 542 | |
| 543 | void |
| 544 | iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) |
| 545 | { |
| 546 | /* |
| 547 | * If we are invalidating the entire page, clear the dirty state from it |
| 548 | * and release it to avoid unnecessary buildup of the LRU. |
| 549 | */ |
| 550 | if (offset == 0 && len == PAGE_SIZE) { |
| 551 | WARN_ON_ONCE(PageWriteback(page)); |
| 552 | cancel_dirty_page(page); |
| 553 | iomap_page_release(page); |
| 554 | } |
| 555 | } |
| 556 | EXPORT_SYMBOL_GPL(iomap_invalidatepage); |
| 557 | |
| 558 | #ifdef CONFIG_MIGRATION |
| 559 | int |
| 560 | iomap_migrate_page(struct address_space *mapping, struct page *newpage, |
| 561 | struct page *page, enum migrate_mode mode) |
| 562 | { |
| 563 | int ret; |
| 564 | |
Jan Kara | ab41ee6 | 2018-12-28 00:39:20 -0800 | [diff] [blame] | 565 | ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0); |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 566 | if (ret != MIGRATEPAGE_SUCCESS) |
| 567 | return ret; |
| 568 | |
| 569 | if (page_has_private(page)) { |
| 570 | ClearPagePrivate(page); |
Piotr Jaroszynski | 8e47a45 | 2019-01-27 08:46:45 -0800 | [diff] [blame] | 571 | get_page(newpage); |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 572 | set_page_private(newpage, page_private(page)); |
| 573 | set_page_private(page, 0); |
Piotr Jaroszynski | 8e47a45 | 2019-01-27 08:46:45 -0800 | [diff] [blame] | 574 | put_page(page); |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 575 | SetPagePrivate(newpage); |
| 576 | } |
| 577 | |
| 578 | if (mode != MIGRATE_SYNC_NO_COPY) |
| 579 | migrate_page_copy(newpage, page); |
| 580 | else |
| 581 | migrate_page_states(newpage, page); |
| 582 | return MIGRATEPAGE_SUCCESS; |
| 583 | } |
| 584 | EXPORT_SYMBOL_GPL(iomap_migrate_page); |
| 585 | #endif /* CONFIG_MIGRATION */ |
| 586 | |
Christoph Hellwig | 72b4daa | 2018-06-19 15:10:57 -0700 | [diff] [blame] | 587 | static void |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 588 | iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) |
| 589 | { |
| 590 | loff_t i_size = i_size_read(inode); |
| 591 | |
| 592 | /* |
| 593 | * Only truncate newly allocated pages beyoned EOF, even if the |
| 594 | * write started inside the existing inode size. |
| 595 | */ |
| 596 | if (pos + len > i_size) |
| 597 | truncate_pagecache_range(inode, max(pos, i_size), pos + len); |
| 598 | } |
| 599 | |
| 600 | static int |
Christoph Hellwig | c03cea4 | 2018-06-19 15:10:58 -0700 | [diff] [blame] | 601 | iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page, |
| 602 | unsigned poff, unsigned plen, unsigned from, unsigned to, |
| 603 | struct iomap *iomap) |
| 604 | { |
| 605 | struct bio_vec bvec; |
| 606 | struct bio bio; |
| 607 | |
| 608 | if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) { |
| 609 | zero_user_segments(page, poff, from, to, poff + plen); |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 610 | iomap_set_range_uptodate(page, poff, plen); |
Christoph Hellwig | c03cea4 | 2018-06-19 15:10:58 -0700 | [diff] [blame] | 611 | return 0; |
| 612 | } |
| 613 | |
| 614 | bio_init(&bio, &bvec, 1); |
| 615 | bio.bi_opf = REQ_OP_READ; |
| 616 | bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); |
| 617 | bio_set_dev(&bio, iomap->bdev); |
| 618 | __bio_add_page(&bio, page, plen, poff); |
| 619 | return submit_bio_wait(&bio); |
| 620 | } |
| 621 | |
| 622 | static int |
| 623 | __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, |
| 624 | struct page *page, struct iomap *iomap) |
| 625 | { |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 626 | struct iomap_page *iop = iomap_page_create(inode, page); |
Christoph Hellwig | c03cea4 | 2018-06-19 15:10:58 -0700 | [diff] [blame] | 627 | loff_t block_size = i_blocksize(inode); |
| 628 | loff_t block_start = pos & ~(block_size - 1); |
| 629 | loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1); |
Andreas Gruenbacher | 10259de | 2018-08-10 11:46:14 -0700 | [diff] [blame] | 630 | unsigned from = offset_in_page(pos), to = from + len, poff, plen; |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 631 | int status = 0; |
Christoph Hellwig | c03cea4 | 2018-06-19 15:10:58 -0700 | [diff] [blame] | 632 | |
| 633 | if (PageUptodate(page)) |
| 634 | return 0; |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 635 | |
| 636 | do { |
| 637 | iomap_adjust_read_range(inode, iop, &block_start, |
| 638 | block_end - block_start, &poff, &plen); |
| 639 | if (plen == 0) |
| 640 | break; |
| 641 | |
| 642 | if ((from > poff && from < poff + plen) || |
| 643 | (to > poff && to < poff + plen)) { |
| 644 | status = iomap_read_page_sync(inode, block_start, page, |
| 645 | poff, plen, from, to, iomap); |
| 646 | if (status) |
| 647 | break; |
| 648 | } |
| 649 | |
| 650 | } while ((block_start += plen) < block_end); |
| 651 | |
| 652 | return status; |
Christoph Hellwig | c03cea4 | 2018-06-19 15:10:58 -0700 | [diff] [blame] | 653 | } |
| 654 | |
| 655 | static int |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 656 | iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, |
| 657 | struct page **pagep, struct iomap *iomap) |
| 658 | { |
Andreas Gruenbacher | df0db3e | 2019-04-30 08:45:34 -0700 | [diff] [blame] | 659 | const struct iomap_page_ops *page_ops = iomap->page_ops; |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 660 | pgoff_t index = pos >> PAGE_SHIFT; |
| 661 | struct page *page; |
| 662 | int status = 0; |
| 663 | |
| 664 | BUG_ON(pos + len > iomap->offset + iomap->length); |
| 665 | |
Michal Hocko | d1908f5 | 2017-02-03 13:13:26 -0800 | [diff] [blame] | 666 | if (fatal_signal_pending(current)) |
| 667 | return -EINTR; |
| 668 | |
Andreas Gruenbacher | df0db3e | 2019-04-30 08:45:34 -0700 | [diff] [blame] | 669 | if (page_ops && page_ops->page_prepare) { |
| 670 | status = page_ops->page_prepare(inode, pos, len, iomap); |
| 671 | if (status) |
| 672 | return status; |
| 673 | } |
| 674 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 675 | page = grab_cache_page_write_begin(inode->i_mapping, index, flags); |
Andreas Gruenbacher | df0db3e | 2019-04-30 08:45:34 -0700 | [diff] [blame] | 676 | if (!page) { |
| 677 | status = -ENOMEM; |
| 678 | goto out_no_page; |
| 679 | } |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 680 | |
Andreas Gruenbacher | 19e0c58 | 2018-06-19 15:10:56 -0700 | [diff] [blame] | 681 | if (iomap->type == IOMAP_INLINE) |
| 682 | iomap_read_inline_data(inode, page, iomap); |
Christoph Hellwig | c03cea4 | 2018-06-19 15:10:58 -0700 | [diff] [blame] | 683 | else if (iomap->flags & IOMAP_F_BUFFER_HEAD) |
Andreas Gruenbacher | 19e0c58 | 2018-06-19 15:10:56 -0700 | [diff] [blame] | 684 | status = __block_write_begin_int(page, pos, len, NULL, iomap); |
Christoph Hellwig | c03cea4 | 2018-06-19 15:10:58 -0700 | [diff] [blame] | 685 | else |
| 686 | status = __iomap_write_begin(inode, pos, len, page, iomap); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 687 | |
Andreas Gruenbacher | df0db3e | 2019-04-30 08:45:34 -0700 | [diff] [blame] | 688 | if (unlikely(status)) |
| 689 | goto out_unlock; |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 690 | |
| 691 | *pagep = page; |
Andreas Gruenbacher | df0db3e | 2019-04-30 08:45:34 -0700 | [diff] [blame] | 692 | return 0; |
| 693 | |
| 694 | out_unlock: |
| 695 | unlock_page(page); |
| 696 | put_page(page); |
| 697 | iomap_write_failed(inode, pos, len); |
| 698 | |
| 699 | out_no_page: |
| 700 | if (page_ops && page_ops->page_done) |
| 701 | page_ops->page_done(inode, pos, 0, NULL, iomap); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 702 | return status; |
| 703 | } |
| 704 | |
Christoph Hellwig | c03cea4 | 2018-06-19 15:10:58 -0700 | [diff] [blame] | 705 | int |
| 706 | iomap_set_page_dirty(struct page *page) |
| 707 | { |
| 708 | struct address_space *mapping = page_mapping(page); |
| 709 | int newly_dirty; |
| 710 | |
| 711 | if (unlikely(!mapping)) |
| 712 | return !TestSetPageDirty(page); |
| 713 | |
| 714 | /* |
| 715 | * Lock out page->mem_cgroup migration to keep PageDirty |
| 716 | * synchronized with per-memcg dirty page counters. |
| 717 | */ |
| 718 | lock_page_memcg(page); |
| 719 | newly_dirty = !TestSetPageDirty(page); |
| 720 | if (newly_dirty) |
| 721 | __set_page_dirty(page, mapping, 0); |
| 722 | unlock_page_memcg(page); |
| 723 | |
| 724 | if (newly_dirty) |
| 725 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); |
| 726 | return newly_dirty; |
| 727 | } |
| 728 | EXPORT_SYMBOL_GPL(iomap_set_page_dirty); |
| 729 | |
| 730 | static int |
| 731 | __iomap_write_end(struct inode *inode, loff_t pos, unsigned len, |
| 732 | unsigned copied, struct page *page, struct iomap *iomap) |
| 733 | { |
| 734 | flush_dcache_page(page); |
| 735 | |
| 736 | /* |
| 737 | * The blocks that were entirely written will now be uptodate, so we |
| 738 | * don't have to worry about a readpage reading them and overwriting a |
| 739 | * partial write. However if we have encountered a short write and only |
| 740 | * partially written into a block, it will not be marked uptodate, so a |
| 741 | * readpage might come in and destroy our partial write. |
| 742 | * |
| 743 | * Do the simplest thing, and just treat any short write to a non |
| 744 | * uptodate page as a zero-length write, and force the caller to redo |
| 745 | * the whole thing. |
| 746 | */ |
Christoph Hellwig | dbc582b | 2019-04-30 08:45:33 -0700 | [diff] [blame] | 747 | if (unlikely(copied < len && !PageUptodate(page))) |
| 748 | return 0; |
| 749 | iomap_set_range_uptodate(page, offset_in_page(pos), len); |
| 750 | iomap_set_page_dirty(page); |
| 751 | return copied; |
Christoph Hellwig | c03cea4 | 2018-06-19 15:10:58 -0700 | [diff] [blame] | 752 | } |
| 753 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 754 | static int |
Andreas Gruenbacher | 19e0c58 | 2018-06-19 15:10:56 -0700 | [diff] [blame] | 755 | iomap_write_end_inline(struct inode *inode, struct page *page, |
| 756 | struct iomap *iomap, loff_t pos, unsigned copied) |
| 757 | { |
| 758 | void *addr; |
| 759 | |
| 760 | WARN_ON_ONCE(!PageUptodate(page)); |
| 761 | BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data)); |
| 762 | |
| 763 | addr = kmap_atomic(page); |
| 764 | memcpy(iomap->inline_data + pos, addr + pos, copied); |
| 765 | kunmap_atomic(addr); |
| 766 | |
| 767 | mark_inode_dirty(inode); |
Andreas Gruenbacher | 19e0c58 | 2018-06-19 15:10:56 -0700 | [diff] [blame] | 768 | return copied; |
| 769 | } |
| 770 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 771 | static int |
| 772 | iomap_write_end(struct inode *inode, loff_t pos, unsigned len, |
Andreas Gruenbacher | 19e0c58 | 2018-06-19 15:10:56 -0700 | [diff] [blame] | 773 | unsigned copied, struct page *page, struct iomap *iomap) |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 774 | { |
Andreas Gruenbacher | df0db3e | 2019-04-30 08:45:34 -0700 | [diff] [blame] | 775 | const struct iomap_page_ops *page_ops = iomap->page_ops; |
Andreas Gruenbacher | 8d3e72a | 2019-06-27 17:28:40 -0700 | [diff] [blame] | 776 | loff_t old_size = inode->i_size; |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 777 | int ret; |
| 778 | |
Andreas Gruenbacher | 19e0c58 | 2018-06-19 15:10:56 -0700 | [diff] [blame] | 779 | if (iomap->type == IOMAP_INLINE) { |
| 780 | ret = iomap_write_end_inline(inode, page, iomap, pos, copied); |
Christoph Hellwig | c03cea4 | 2018-06-19 15:10:58 -0700 | [diff] [blame] | 781 | } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) { |
Christoph Hellwig | dbc582b | 2019-04-30 08:45:33 -0700 | [diff] [blame] | 782 | ret = block_write_end(NULL, inode->i_mapping, pos, len, copied, |
| 783 | page, NULL); |
Christoph Hellwig | c03cea4 | 2018-06-19 15:10:58 -0700 | [diff] [blame] | 784 | } else { |
| 785 | ret = __iomap_write_end(inode, pos, len, copied, page, iomap); |
Andreas Gruenbacher | 19e0c58 | 2018-06-19 15:10:56 -0700 | [diff] [blame] | 786 | } |
| 787 | |
Andreas Gruenbacher | 8d3e72a | 2019-06-27 17:28:40 -0700 | [diff] [blame] | 788 | /* |
| 789 | * Update the in-memory inode size after copying the data into the page |
| 790 | * cache. It's up to the file system to write the updated size to disk, |
| 791 | * preferably after I/O completion so that no stale data is exposed. |
| 792 | */ |
| 793 | if (pos + ret > old_size) { |
| 794 | i_size_write(inode, pos + ret); |
| 795 | iomap->flags |= IOMAP_F_SIZE_CHANGED; |
| 796 | } |
| 797 | unlock_page(page); |
| 798 | |
| 799 | if (old_size < pos) |
| 800 | pagecache_isize_extended(inode, old_size, pos); |
Andreas Gruenbacher | df0db3e | 2019-04-30 08:45:34 -0700 | [diff] [blame] | 801 | if (page_ops && page_ops->page_done) |
Andreas Gruenbacher | 36a7347d | 2019-06-27 17:28:41 -0700 | [diff] [blame^] | 802 | page_ops->page_done(inode, pos, ret, page, iomap); |
Andreas Gruenbacher | 7a77dad | 2019-04-30 08:45:34 -0700 | [diff] [blame] | 803 | put_page(page); |
Christoph Hellwig | 63899c6 | 2018-06-19 15:10:56 -0700 | [diff] [blame] | 804 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 805 | if (ret < len) |
| 806 | iomap_write_failed(inode, pos, len); |
| 807 | return ret; |
| 808 | } |
| 809 | |
| 810 | static loff_t |
| 811 | iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, |
| 812 | struct iomap *iomap) |
| 813 | { |
| 814 | struct iov_iter *i = data; |
| 815 | long status = 0; |
| 816 | ssize_t written = 0; |
| 817 | unsigned int flags = AOP_FLAG_NOFS; |
| 818 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 819 | do { |
| 820 | struct page *page; |
| 821 | unsigned long offset; /* Offset into pagecache page */ |
| 822 | unsigned long bytes; /* Bytes to write to page */ |
| 823 | size_t copied; /* Bytes copied from user */ |
| 824 | |
Andreas Gruenbacher | 10259de | 2018-08-10 11:46:14 -0700 | [diff] [blame] | 825 | offset = offset_in_page(pos); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 826 | bytes = min_t(unsigned long, PAGE_SIZE - offset, |
| 827 | iov_iter_count(i)); |
| 828 | again: |
| 829 | if (bytes > length) |
| 830 | bytes = length; |
| 831 | |
| 832 | /* |
| 833 | * Bring in the user page that we will copy from _first_. |
| 834 | * Otherwise there's a nasty deadlock on copying from the |
| 835 | * same page as we're writing to, without it being marked |
| 836 | * up-to-date. |
| 837 | * |
| 838 | * Not only is this an optimisation, but it is also required |
| 839 | * to check that the address is actually valid, when atomic |
| 840 | * usercopies are used, below. |
| 841 | */ |
| 842 | if (unlikely(iov_iter_fault_in_readable(i, bytes))) { |
| 843 | status = -EFAULT; |
| 844 | break; |
| 845 | } |
| 846 | |
| 847 | status = iomap_write_begin(inode, pos, bytes, flags, &page, |
| 848 | iomap); |
| 849 | if (unlikely(status)) |
| 850 | break; |
| 851 | |
| 852 | if (mapping_writably_mapped(inode->i_mapping)) |
| 853 | flush_dcache_page(page); |
| 854 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 855 | copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 856 | |
| 857 | flush_dcache_page(page); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 858 | |
Andreas Gruenbacher | 19e0c58 | 2018-06-19 15:10:56 -0700 | [diff] [blame] | 859 | status = iomap_write_end(inode, pos, bytes, copied, page, |
| 860 | iomap); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 861 | if (unlikely(status < 0)) |
| 862 | break; |
| 863 | copied = status; |
| 864 | |
| 865 | cond_resched(); |
| 866 | |
| 867 | iov_iter_advance(i, copied); |
| 868 | if (unlikely(copied == 0)) { |
| 869 | /* |
| 870 | * If we were unable to copy any data at all, we must |
| 871 | * fall back to a single segment length write. |
| 872 | * |
| 873 | * If we didn't fallback here, we could livelock |
| 874 | * because not all segments in the iov can be copied at |
| 875 | * once without a pagefault. |
| 876 | */ |
| 877 | bytes = min_t(unsigned long, PAGE_SIZE - offset, |
| 878 | iov_iter_single_seg_count(i)); |
| 879 | goto again; |
| 880 | } |
| 881 | pos += copied; |
| 882 | written += copied; |
| 883 | length -= copied; |
| 884 | |
| 885 | balance_dirty_pages_ratelimited(inode->i_mapping); |
| 886 | } while (iov_iter_count(i) && length); |
| 887 | |
| 888 | return written ? written : status; |
| 889 | } |
| 890 | |
| 891 | ssize_t |
| 892 | iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter, |
Christoph Hellwig | 8ff6daa | 2017-01-27 23:20:26 -0800 | [diff] [blame] | 893 | const struct iomap_ops *ops) |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 894 | { |
| 895 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
| 896 | loff_t pos = iocb->ki_pos, ret = 0, written = 0; |
| 897 | |
| 898 | while (iov_iter_count(iter)) { |
| 899 | ret = iomap_apply(inode, pos, iov_iter_count(iter), |
| 900 | IOMAP_WRITE, ops, iter, iomap_write_actor); |
| 901 | if (ret <= 0) |
| 902 | break; |
| 903 | pos += ret; |
| 904 | written += ret; |
| 905 | } |
| 906 | |
| 907 | return written ? written : ret; |
| 908 | } |
| 909 | EXPORT_SYMBOL_GPL(iomap_file_buffered_write); |
| 910 | |
Christoph Hellwig | 5f4e575 | 2016-09-19 10:12:45 +1000 | [diff] [blame] | 911 | static struct page * |
| 912 | __iomap_read_page(struct inode *inode, loff_t offset) |
| 913 | { |
| 914 | struct address_space *mapping = inode->i_mapping; |
| 915 | struct page *page; |
| 916 | |
| 917 | page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL); |
| 918 | if (IS_ERR(page)) |
| 919 | return page; |
| 920 | if (!PageUptodate(page)) { |
| 921 | put_page(page); |
| 922 | return ERR_PTR(-EIO); |
| 923 | } |
| 924 | return page; |
| 925 | } |
| 926 | |
| 927 | static loff_t |
| 928 | iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data, |
| 929 | struct iomap *iomap) |
| 930 | { |
| 931 | long status = 0; |
| 932 | ssize_t written = 0; |
| 933 | |
| 934 | do { |
| 935 | struct page *page, *rpage; |
| 936 | unsigned long offset; /* Offset into pagecache page */ |
| 937 | unsigned long bytes; /* Bytes to write to page */ |
| 938 | |
Andreas Gruenbacher | 10259de | 2018-08-10 11:46:14 -0700 | [diff] [blame] | 939 | offset = offset_in_page(pos); |
Christoph Hellwig | e28ae8e | 2017-08-11 12:45:35 -0700 | [diff] [blame] | 940 | bytes = min_t(loff_t, PAGE_SIZE - offset, length); |
Christoph Hellwig | 5f4e575 | 2016-09-19 10:12:45 +1000 | [diff] [blame] | 941 | |
| 942 | rpage = __iomap_read_page(inode, pos); |
| 943 | if (IS_ERR(rpage)) |
| 944 | return PTR_ERR(rpage); |
| 945 | |
| 946 | status = iomap_write_begin(inode, pos, bytes, |
Tetsuo Handa | c718a97 | 2017-05-08 15:58:59 -0700 | [diff] [blame] | 947 | AOP_FLAG_NOFS, &page, iomap); |
Christoph Hellwig | 5f4e575 | 2016-09-19 10:12:45 +1000 | [diff] [blame] | 948 | put_page(rpage); |
| 949 | if (unlikely(status)) |
| 950 | return status; |
| 951 | |
| 952 | WARN_ON_ONCE(!PageUptodate(page)); |
| 953 | |
Andreas Gruenbacher | 19e0c58 | 2018-06-19 15:10:56 -0700 | [diff] [blame] | 954 | status = iomap_write_end(inode, pos, bytes, bytes, page, iomap); |
Christoph Hellwig | 5f4e575 | 2016-09-19 10:12:45 +1000 | [diff] [blame] | 955 | if (unlikely(status <= 0)) { |
| 956 | if (WARN_ON_ONCE(status == 0)) |
| 957 | return -EIO; |
| 958 | return status; |
| 959 | } |
| 960 | |
| 961 | cond_resched(); |
| 962 | |
| 963 | pos += status; |
| 964 | written += status; |
| 965 | length -= status; |
| 966 | |
| 967 | balance_dirty_pages_ratelimited(inode->i_mapping); |
| 968 | } while (length); |
| 969 | |
| 970 | return written; |
| 971 | } |
| 972 | |
| 973 | int |
| 974 | iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len, |
Christoph Hellwig | 8ff6daa | 2017-01-27 23:20:26 -0800 | [diff] [blame] | 975 | const struct iomap_ops *ops) |
Christoph Hellwig | 5f4e575 | 2016-09-19 10:12:45 +1000 | [diff] [blame] | 976 | { |
| 977 | loff_t ret; |
| 978 | |
| 979 | while (len) { |
| 980 | ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL, |
| 981 | iomap_dirty_actor); |
| 982 | if (ret <= 0) |
| 983 | return ret; |
| 984 | pos += ret; |
| 985 | len -= ret; |
| 986 | } |
| 987 | |
| 988 | return 0; |
| 989 | } |
| 990 | EXPORT_SYMBOL_GPL(iomap_file_dirty); |
| 991 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 992 | static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset, |
| 993 | unsigned bytes, struct iomap *iomap) |
| 994 | { |
| 995 | struct page *page; |
| 996 | int status; |
| 997 | |
Tetsuo Handa | c718a97 | 2017-05-08 15:58:59 -0700 | [diff] [blame] | 998 | status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page, |
| 999 | iomap); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1000 | if (status) |
| 1001 | return status; |
| 1002 | |
| 1003 | zero_user(page, offset, bytes); |
| 1004 | mark_page_accessed(page); |
| 1005 | |
Andreas Gruenbacher | 19e0c58 | 2018-06-19 15:10:56 -0700 | [diff] [blame] | 1006 | return iomap_write_end(inode, pos, bytes, bytes, page, iomap); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1007 | } |
| 1008 | |
Christoph Hellwig | 9a286f0 | 2016-06-21 09:31:39 +1000 | [diff] [blame] | 1009 | static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes, |
| 1010 | struct iomap *iomap) |
| 1011 | { |
Christoph Hellwig | 57fc505 | 2018-06-01 09:03:08 -0700 | [diff] [blame] | 1012 | return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, |
| 1013 | iomap_sector(iomap, pos & PAGE_MASK), offset, bytes); |
Christoph Hellwig | 9a286f0 | 2016-06-21 09:31:39 +1000 | [diff] [blame] | 1014 | } |
| 1015 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1016 | static loff_t |
| 1017 | iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count, |
| 1018 | void *data, struct iomap *iomap) |
| 1019 | { |
| 1020 | bool *did_zero = data; |
| 1021 | loff_t written = 0; |
| 1022 | int status; |
| 1023 | |
| 1024 | /* already zeroed? we're done. */ |
| 1025 | if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) |
| 1026 | return count; |
| 1027 | |
| 1028 | do { |
| 1029 | unsigned offset, bytes; |
| 1030 | |
Andreas Gruenbacher | 10259de | 2018-08-10 11:46:14 -0700 | [diff] [blame] | 1031 | offset = offset_in_page(pos); |
Christoph Hellwig | e28ae8e | 2017-08-11 12:45:35 -0700 | [diff] [blame] | 1032 | bytes = min_t(loff_t, PAGE_SIZE - offset, count); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1033 | |
Christoph Hellwig | 9a286f0 | 2016-06-21 09:31:39 +1000 | [diff] [blame] | 1034 | if (IS_DAX(inode)) |
| 1035 | status = iomap_dax_zero(pos, offset, bytes, iomap); |
| 1036 | else |
| 1037 | status = iomap_zero(inode, pos, offset, bytes, iomap); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1038 | if (status < 0) |
| 1039 | return status; |
| 1040 | |
| 1041 | pos += bytes; |
| 1042 | count -= bytes; |
| 1043 | written += bytes; |
| 1044 | if (did_zero) |
| 1045 | *did_zero = true; |
| 1046 | } while (count > 0); |
| 1047 | |
| 1048 | return written; |
| 1049 | } |
| 1050 | |
| 1051 | int |
| 1052 | iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, |
Christoph Hellwig | 8ff6daa | 2017-01-27 23:20:26 -0800 | [diff] [blame] | 1053 | const struct iomap_ops *ops) |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1054 | { |
| 1055 | loff_t ret; |
| 1056 | |
| 1057 | while (len > 0) { |
| 1058 | ret = iomap_apply(inode, pos, len, IOMAP_ZERO, |
| 1059 | ops, did_zero, iomap_zero_range_actor); |
| 1060 | if (ret <= 0) |
| 1061 | return ret; |
| 1062 | |
| 1063 | pos += ret; |
| 1064 | len -= ret; |
| 1065 | } |
| 1066 | |
| 1067 | return 0; |
| 1068 | } |
| 1069 | EXPORT_SYMBOL_GPL(iomap_zero_range); |
| 1070 | |
| 1071 | int |
| 1072 | iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, |
Christoph Hellwig | 8ff6daa | 2017-01-27 23:20:26 -0800 | [diff] [blame] | 1073 | const struct iomap_ops *ops) |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1074 | { |
Fabian Frederick | 9340747 | 2017-02-27 14:28:32 -0800 | [diff] [blame] | 1075 | unsigned int blocksize = i_blocksize(inode); |
| 1076 | unsigned int off = pos & (blocksize - 1); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1077 | |
| 1078 | /* Block boundary? Nothing to do */ |
| 1079 | if (!off) |
| 1080 | return 0; |
| 1081 | return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); |
| 1082 | } |
| 1083 | EXPORT_SYMBOL_GPL(iomap_truncate_page); |
| 1084 | |
| 1085 | static loff_t |
| 1086 | iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, |
| 1087 | void *data, struct iomap *iomap) |
| 1088 | { |
| 1089 | struct page *page = data; |
| 1090 | int ret; |
| 1091 | |
Christoph Hellwig | c03cea4 | 2018-06-19 15:10:58 -0700 | [diff] [blame] | 1092 | if (iomap->flags & IOMAP_F_BUFFER_HEAD) { |
| 1093 | ret = __block_write_begin_int(page, pos, length, NULL, iomap); |
| 1094 | if (ret) |
| 1095 | return ret; |
| 1096 | block_commit_write(page, 0, length); |
| 1097 | } else { |
| 1098 | WARN_ON_ONCE(!PageUptodate(page)); |
Christoph Hellwig | 9dc55f1 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 1099 | iomap_page_create(inode, page); |
Brian Foster | 561295a | 2018-09-29 13:51:01 +1000 | [diff] [blame] | 1100 | set_page_dirty(page); |
Christoph Hellwig | c03cea4 | 2018-06-19 15:10:58 -0700 | [diff] [blame] | 1101 | } |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1102 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1103 | return length; |
| 1104 | } |
| 1105 | |
Souptick Joarder | 5780a02 | 2018-10-26 15:02:59 -0700 | [diff] [blame] | 1106 | vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1107 | { |
| 1108 | struct page *page = vmf->page; |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 1109 | struct inode *inode = file_inode(vmf->vma->vm_file); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1110 | unsigned long length; |
| 1111 | loff_t offset, size; |
| 1112 | ssize_t ret; |
| 1113 | |
| 1114 | lock_page(page); |
| 1115 | size = i_size_read(inode); |
| 1116 | if ((page->mapping != inode->i_mapping) || |
| 1117 | (page_offset(page) > size)) { |
| 1118 | /* We overload EFAULT to mean page got truncated */ |
| 1119 | ret = -EFAULT; |
| 1120 | goto out_unlock; |
| 1121 | } |
| 1122 | |
| 1123 | /* page is wholly or partially inside EOF */ |
| 1124 | if (((page->index + 1) << PAGE_SHIFT) > size) |
Andreas Gruenbacher | 10259de | 2018-08-10 11:46:14 -0700 | [diff] [blame] | 1125 | length = offset_in_page(size); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1126 | else |
| 1127 | length = PAGE_SIZE; |
| 1128 | |
| 1129 | offset = page_offset(page); |
| 1130 | while (length > 0) { |
Jan Kara | 9484ab1 | 2016-11-10 10:26:50 +1100 | [diff] [blame] | 1131 | ret = iomap_apply(inode, offset, length, |
| 1132 | IOMAP_WRITE | IOMAP_FAULT, ops, page, |
| 1133 | iomap_page_mkwrite_actor); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1134 | if (unlikely(ret <= 0)) |
| 1135 | goto out_unlock; |
| 1136 | offset += ret; |
| 1137 | length -= ret; |
| 1138 | } |
| 1139 | |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1140 | wait_for_stable_page(page); |
Christoph Hellwig | e7647fb | 2017-08-29 10:08:41 -0700 | [diff] [blame] | 1141 | return VM_FAULT_LOCKED; |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1142 | out_unlock: |
| 1143 | unlock_page(page); |
Christoph Hellwig | e7647fb | 2017-08-29 10:08:41 -0700 | [diff] [blame] | 1144 | return block_page_mkwrite_return(ret); |
Christoph Hellwig | ae259a9 | 2016-06-21 09:23:11 +1000 | [diff] [blame] | 1145 | } |
| 1146 | EXPORT_SYMBOL_GPL(iomap_page_mkwrite); |
Christoph Hellwig | 8be9f56 | 2016-06-21 09:38:45 +1000 | [diff] [blame] | 1147 | |
| 1148 | struct fiemap_ctx { |
| 1149 | struct fiemap_extent_info *fi; |
| 1150 | struct iomap prev; |
| 1151 | }; |
| 1152 | |
| 1153 | static int iomap_to_fiemap(struct fiemap_extent_info *fi, |
| 1154 | struct iomap *iomap, u32 flags) |
| 1155 | { |
| 1156 | switch (iomap->type) { |
| 1157 | case IOMAP_HOLE: |
| 1158 | /* skip holes */ |
| 1159 | return 0; |
| 1160 | case IOMAP_DELALLOC: |
| 1161 | flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN; |
| 1162 | break; |
Christoph Hellwig | 19319b5 | 2018-06-01 09:03:06 -0700 | [diff] [blame] | 1163 | case IOMAP_MAPPED: |
| 1164 | break; |
Christoph Hellwig | 8be9f56 | 2016-06-21 09:38:45 +1000 | [diff] [blame] | 1165 | case IOMAP_UNWRITTEN: |
| 1166 | flags |= FIEMAP_EXTENT_UNWRITTEN; |
| 1167 | break; |
Christoph Hellwig | 19319b5 | 2018-06-01 09:03:06 -0700 | [diff] [blame] | 1168 | case IOMAP_INLINE: |
| 1169 | flags |= FIEMAP_EXTENT_DATA_INLINE; |
Christoph Hellwig | 8be9f56 | 2016-06-21 09:38:45 +1000 | [diff] [blame] | 1170 | break; |
| 1171 | } |
| 1172 | |
Christoph Hellwig | 17de0a9 | 2016-08-29 11:33:58 +1000 | [diff] [blame] | 1173 | if (iomap->flags & IOMAP_F_MERGED) |
| 1174 | flags |= FIEMAP_EXTENT_MERGED; |
Darrick J. Wong | e43c460 | 2016-09-19 10:13:02 +1000 | [diff] [blame] | 1175 | if (iomap->flags & IOMAP_F_SHARED) |
| 1176 | flags |= FIEMAP_EXTENT_SHARED; |
Christoph Hellwig | 17de0a9 | 2016-08-29 11:33:58 +1000 | [diff] [blame] | 1177 | |
Christoph Hellwig | 8be9f56 | 2016-06-21 09:38:45 +1000 | [diff] [blame] | 1178 | return fiemap_fill_next_extent(fi, iomap->offset, |
Andreas Gruenbacher | 19fe5f6 | 2017-10-01 17:55:54 -0400 | [diff] [blame] | 1179 | iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0, |
Christoph Hellwig | 17de0a9 | 2016-08-29 11:33:58 +1000 | [diff] [blame] | 1180 | iomap->length, flags); |
Christoph Hellwig | 8be9f56 | 2016-06-21 09:38:45 +1000 | [diff] [blame] | 1181 | } |
| 1182 | |
| 1183 | static loff_t |
| 1184 | iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, |
| 1185 | struct iomap *iomap) |
| 1186 | { |
| 1187 | struct fiemap_ctx *ctx = data; |
| 1188 | loff_t ret = length; |
| 1189 | |
| 1190 | if (iomap->type == IOMAP_HOLE) |
| 1191 | return length; |
| 1192 | |
| 1193 | ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0); |
| 1194 | ctx->prev = *iomap; |
| 1195 | switch (ret) { |
| 1196 | case 0: /* success */ |
| 1197 | return length; |
| 1198 | case 1: /* extent array full */ |
| 1199 | return 0; |
| 1200 | default: |
| 1201 | return ret; |
| 1202 | } |
| 1203 | } |
| 1204 | |
| 1205 | int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, |
Christoph Hellwig | 8ff6daa | 2017-01-27 23:20:26 -0800 | [diff] [blame] | 1206 | loff_t start, loff_t len, const struct iomap_ops *ops) |
Christoph Hellwig | 8be9f56 | 2016-06-21 09:38:45 +1000 | [diff] [blame] | 1207 | { |
| 1208 | struct fiemap_ctx ctx; |
| 1209 | loff_t ret; |
| 1210 | |
| 1211 | memset(&ctx, 0, sizeof(ctx)); |
| 1212 | ctx.fi = fi; |
| 1213 | ctx.prev.type = IOMAP_HOLE; |
| 1214 | |
| 1215 | ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC); |
| 1216 | if (ret) |
| 1217 | return ret; |
| 1218 | |
Dave Chinner | 8896b8f | 2016-08-17 08:41:10 +1000 | [diff] [blame] | 1219 | if (fi->fi_flags & FIEMAP_FLAG_SYNC) { |
| 1220 | ret = filemap_write_and_wait(inode->i_mapping); |
| 1221 | if (ret) |
| 1222 | return ret; |
| 1223 | } |
Christoph Hellwig | 8be9f56 | 2016-06-21 09:38:45 +1000 | [diff] [blame] | 1224 | |
| 1225 | while (len > 0) { |
Christoph Hellwig | d33fd77 | 2016-10-20 15:51:28 +1100 | [diff] [blame] | 1226 | ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx, |
Christoph Hellwig | 8be9f56 | 2016-06-21 09:38:45 +1000 | [diff] [blame] | 1227 | iomap_fiemap_actor); |
Dave Chinner | ac2dc05 | 2016-08-17 08:41:34 +1000 | [diff] [blame] | 1228 | /* inode with no (attribute) mapping will give ENOENT */ |
| 1229 | if (ret == -ENOENT) |
| 1230 | break; |
Christoph Hellwig | 8be9f56 | 2016-06-21 09:38:45 +1000 | [diff] [blame] | 1231 | if (ret < 0) |
| 1232 | return ret; |
| 1233 | if (ret == 0) |
| 1234 | break; |
| 1235 | |
| 1236 | start += ret; |
| 1237 | len -= ret; |
| 1238 | } |
| 1239 | |
| 1240 | if (ctx.prev.type != IOMAP_HOLE) { |
| 1241 | ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST); |
| 1242 | if (ret < 0) |
| 1243 | return ret; |
| 1244 | } |
| 1245 | |
| 1246 | return 0; |
| 1247 | } |
| 1248 | EXPORT_SYMBOL_GPL(iomap_fiemap); |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1249 | |
Christoph Hellwig | 8a78cb1 | 2018-06-01 09:04:40 -0700 | [diff] [blame] | 1250 | /* |
| 1251 | * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff. |
Christoph Hellwig | afd9d6a | 2018-06-01 09:05:15 -0700 | [diff] [blame] | 1252 | * Returns true if found and updates @lastoff to the offset in file. |
Christoph Hellwig | 8a78cb1 | 2018-06-01 09:04:40 -0700 | [diff] [blame] | 1253 | */ |
Christoph Hellwig | afd9d6a | 2018-06-01 09:05:15 -0700 | [diff] [blame] | 1254 | static bool |
| 1255 | page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff, |
| 1256 | int whence) |
Christoph Hellwig | 8a78cb1 | 2018-06-01 09:04:40 -0700 | [diff] [blame] | 1257 | { |
Christoph Hellwig | afd9d6a | 2018-06-01 09:05:15 -0700 | [diff] [blame] | 1258 | const struct address_space_operations *ops = inode->i_mapping->a_ops; |
| 1259 | unsigned int bsize = i_blocksize(inode), off; |
Christoph Hellwig | 8a78cb1 | 2018-06-01 09:04:40 -0700 | [diff] [blame] | 1260 | bool seek_data = whence == SEEK_DATA; |
Christoph Hellwig | afd9d6a | 2018-06-01 09:05:15 -0700 | [diff] [blame] | 1261 | loff_t poff = page_offset(page); |
Christoph Hellwig | 8a78cb1 | 2018-06-01 09:04:40 -0700 | [diff] [blame] | 1262 | |
Christoph Hellwig | afd9d6a | 2018-06-01 09:05:15 -0700 | [diff] [blame] | 1263 | if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE)) |
| 1264 | return false; |
Christoph Hellwig | 8a78cb1 | 2018-06-01 09:04:40 -0700 | [diff] [blame] | 1265 | |
Christoph Hellwig | afd9d6a | 2018-06-01 09:05:15 -0700 | [diff] [blame] | 1266 | if (*lastoff < poff) { |
Christoph Hellwig | 8a78cb1 | 2018-06-01 09:04:40 -0700 | [diff] [blame] | 1267 | /* |
Christoph Hellwig | afd9d6a | 2018-06-01 09:05:15 -0700 | [diff] [blame] | 1268 | * Last offset smaller than the start of the page means we found |
| 1269 | * a hole: |
Christoph Hellwig | 8a78cb1 | 2018-06-01 09:04:40 -0700 | [diff] [blame] | 1270 | */ |
Christoph Hellwig | afd9d6a | 2018-06-01 09:05:15 -0700 | [diff] [blame] | 1271 | if (whence == SEEK_HOLE) |
| 1272 | return true; |
| 1273 | *lastoff = poff; |
| 1274 | } |
Christoph Hellwig | 8a78cb1 | 2018-06-01 09:04:40 -0700 | [diff] [blame] | 1275 | |
Christoph Hellwig | afd9d6a | 2018-06-01 09:05:15 -0700 | [diff] [blame] | 1276 | /* |
| 1277 | * Just check the page unless we can and should check block ranges: |
| 1278 | */ |
| 1279 | if (bsize == PAGE_SIZE || !ops->is_partially_uptodate) |
| 1280 | return PageUptodate(page) == seek_data; |
| 1281 | |
| 1282 | lock_page(page); |
| 1283 | if (unlikely(page->mapping != inode->i_mapping)) |
| 1284 | goto out_unlock_not_found; |
| 1285 | |
| 1286 | for (off = 0; off < PAGE_SIZE; off += bsize) { |
Andreas Gruenbacher | 10259de | 2018-08-10 11:46:14 -0700 | [diff] [blame] | 1287 | if (offset_in_page(*lastoff) >= off + bsize) |
Christoph Hellwig | afd9d6a | 2018-06-01 09:05:15 -0700 | [diff] [blame] | 1288 | continue; |
| 1289 | if (ops->is_partially_uptodate(page, off, bsize) == seek_data) { |
| 1290 | unlock_page(page); |
| 1291 | return true; |
| 1292 | } |
| 1293 | *lastoff = poff + off + bsize; |
| 1294 | } |
| 1295 | |
| 1296 | out_unlock_not_found: |
| 1297 | unlock_page(page); |
| 1298 | return false; |
Christoph Hellwig | 8a78cb1 | 2018-06-01 09:04:40 -0700 | [diff] [blame] | 1299 | } |
| 1300 | |
| 1301 | /* |
| 1302 | * Seek for SEEK_DATA / SEEK_HOLE in the page cache. |
| 1303 | * |
| 1304 | * Within unwritten extents, the page cache determines which parts are holes |
Christoph Hellwig | bd56b3e | 2018-06-01 09:05:14 -0700 | [diff] [blame] | 1305 | * and which are data: uptodate buffer heads count as data; everything else |
| 1306 | * counts as a hole. |
Christoph Hellwig | 8a78cb1 | 2018-06-01 09:04:40 -0700 | [diff] [blame] | 1307 | * |
| 1308 | * Returns the resulting offset on successs, and -ENOENT otherwise. |
| 1309 | */ |
| 1310 | static loff_t |
| 1311 | page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length, |
| 1312 | int whence) |
| 1313 | { |
| 1314 | pgoff_t index = offset >> PAGE_SHIFT; |
| 1315 | pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE); |
| 1316 | loff_t lastoff = offset; |
| 1317 | struct pagevec pvec; |
| 1318 | |
| 1319 | if (length <= 0) |
| 1320 | return -ENOENT; |
| 1321 | |
| 1322 | pagevec_init(&pvec); |
| 1323 | |
| 1324 | do { |
| 1325 | unsigned nr_pages, i; |
| 1326 | |
| 1327 | nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index, |
| 1328 | end - 1); |
| 1329 | if (nr_pages == 0) |
| 1330 | break; |
| 1331 | |
| 1332 | for (i = 0; i < nr_pages; i++) { |
| 1333 | struct page *page = pvec.pages[i]; |
| 1334 | |
Christoph Hellwig | afd9d6a | 2018-06-01 09:05:15 -0700 | [diff] [blame] | 1335 | if (page_seek_hole_data(inode, page, &lastoff, whence)) |
Christoph Hellwig | 8a78cb1 | 2018-06-01 09:04:40 -0700 | [diff] [blame] | 1336 | goto check_range; |
Christoph Hellwig | 8a78cb1 | 2018-06-01 09:04:40 -0700 | [diff] [blame] | 1337 | lastoff = page_offset(page) + PAGE_SIZE; |
| 1338 | } |
| 1339 | pagevec_release(&pvec); |
| 1340 | } while (index < end); |
| 1341 | |
| 1342 | /* When no page at lastoff and we are not done, we found a hole. */ |
| 1343 | if (whence != SEEK_HOLE) |
| 1344 | goto not_found; |
| 1345 | |
| 1346 | check_range: |
| 1347 | if (lastoff < offset + length) |
| 1348 | goto out; |
| 1349 | not_found: |
| 1350 | lastoff = -ENOENT; |
| 1351 | out: |
| 1352 | pagevec_release(&pvec); |
| 1353 | return lastoff; |
| 1354 | } |
| 1355 | |
| 1356 | |
Andreas Gruenbacher | 0ed3b0d | 2017-06-29 11:43:21 -0700 | [diff] [blame] | 1357 | static loff_t |
| 1358 | iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length, |
| 1359 | void *data, struct iomap *iomap) |
| 1360 | { |
| 1361 | switch (iomap->type) { |
| 1362 | case IOMAP_UNWRITTEN: |
| 1363 | offset = page_cache_seek_hole_data(inode, offset, length, |
| 1364 | SEEK_HOLE); |
| 1365 | if (offset < 0) |
| 1366 | return length; |
| 1367 | /* fall through */ |
| 1368 | case IOMAP_HOLE: |
| 1369 | *(loff_t *)data = offset; |
| 1370 | return 0; |
| 1371 | default: |
| 1372 | return length; |
| 1373 | } |
| 1374 | } |
| 1375 | |
| 1376 | loff_t |
| 1377 | iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops) |
| 1378 | { |
| 1379 | loff_t size = i_size_read(inode); |
| 1380 | loff_t length = size - offset; |
| 1381 | loff_t ret; |
| 1382 | |
Darrick J. Wong | d6ab17f | 2017-07-12 10:26:47 -0700 | [diff] [blame] | 1383 | /* Nothing to be found before or beyond the end of the file. */ |
| 1384 | if (offset < 0 || offset >= size) |
Andreas Gruenbacher | 0ed3b0d | 2017-06-29 11:43:21 -0700 | [diff] [blame] | 1385 | return -ENXIO; |
| 1386 | |
| 1387 | while (length > 0) { |
| 1388 | ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, |
| 1389 | &offset, iomap_seek_hole_actor); |
| 1390 | if (ret < 0) |
| 1391 | return ret; |
| 1392 | if (ret == 0) |
| 1393 | break; |
| 1394 | |
| 1395 | offset += ret; |
| 1396 | length -= ret; |
| 1397 | } |
| 1398 | |
| 1399 | return offset; |
| 1400 | } |
| 1401 | EXPORT_SYMBOL_GPL(iomap_seek_hole); |
| 1402 | |
| 1403 | static loff_t |
| 1404 | iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length, |
| 1405 | void *data, struct iomap *iomap) |
| 1406 | { |
| 1407 | switch (iomap->type) { |
| 1408 | case IOMAP_HOLE: |
| 1409 | return length; |
| 1410 | case IOMAP_UNWRITTEN: |
| 1411 | offset = page_cache_seek_hole_data(inode, offset, length, |
| 1412 | SEEK_DATA); |
| 1413 | if (offset < 0) |
| 1414 | return length; |
| 1415 | /*FALLTHRU*/ |
| 1416 | default: |
| 1417 | *(loff_t *)data = offset; |
| 1418 | return 0; |
| 1419 | } |
| 1420 | } |
| 1421 | |
| 1422 | loff_t |
| 1423 | iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops) |
| 1424 | { |
| 1425 | loff_t size = i_size_read(inode); |
| 1426 | loff_t length = size - offset; |
| 1427 | loff_t ret; |
| 1428 | |
Darrick J. Wong | d6ab17f | 2017-07-12 10:26:47 -0700 | [diff] [blame] | 1429 | /* Nothing to be found before or beyond the end of the file. */ |
| 1430 | if (offset < 0 || offset >= size) |
Andreas Gruenbacher | 0ed3b0d | 2017-06-29 11:43:21 -0700 | [diff] [blame] | 1431 | return -ENXIO; |
| 1432 | |
| 1433 | while (length > 0) { |
| 1434 | ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, |
| 1435 | &offset, iomap_seek_data_actor); |
| 1436 | if (ret < 0) |
| 1437 | return ret; |
| 1438 | if (ret == 0) |
| 1439 | break; |
| 1440 | |
| 1441 | offset += ret; |
| 1442 | length -= ret; |
| 1443 | } |
| 1444 | |
| 1445 | if (length <= 0) |
| 1446 | return -ENXIO; |
| 1447 | return offset; |
| 1448 | } |
| 1449 | EXPORT_SYMBOL_GPL(iomap_seek_data); |
| 1450 | |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1451 | /* |
| 1452 | * Private flags for iomap_dio, must not overlap with the public ones in |
| 1453 | * iomap.h: |
| 1454 | */ |
Dave Chinner | 3460cac | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1455 | #define IOMAP_DIO_WRITE_FUA (1 << 28) |
Dave Chinner | 4f8ff44 | 2018-05-02 12:54:52 -0700 | [diff] [blame] | 1456 | #define IOMAP_DIO_NEED_SYNC (1 << 29) |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1457 | #define IOMAP_DIO_WRITE (1 << 30) |
| 1458 | #define IOMAP_DIO_DIRTY (1 << 31) |
| 1459 | |
| 1460 | struct iomap_dio { |
| 1461 | struct kiocb *iocb; |
| 1462 | iomap_dio_end_io_t *end_io; |
| 1463 | loff_t i_size; |
| 1464 | loff_t size; |
| 1465 | atomic_t ref; |
| 1466 | unsigned flags; |
| 1467 | int error; |
Andreas Gruenbacher | ebf00be | 2018-06-19 15:10:55 -0700 | [diff] [blame] | 1468 | bool wait_for_completion; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1469 | |
| 1470 | union { |
| 1471 | /* used during submission and for synchronous completion: */ |
| 1472 | struct { |
| 1473 | struct iov_iter *iter; |
| 1474 | struct task_struct *waiter; |
| 1475 | struct request_queue *last_queue; |
| 1476 | blk_qc_t cookie; |
| 1477 | } submit; |
| 1478 | |
| 1479 | /* used for aio completion: */ |
| 1480 | struct { |
| 1481 | struct work_struct work; |
| 1482 | } aio; |
| 1483 | }; |
| 1484 | }; |
| 1485 | |
Christoph Hellwig | 81214ba | 2018-12-04 11:12:08 -0700 | [diff] [blame] | 1486 | int iomap_dio_iopoll(struct kiocb *kiocb, bool spin) |
| 1487 | { |
| 1488 | struct request_queue *q = READ_ONCE(kiocb->private); |
| 1489 | |
| 1490 | if (!q) |
| 1491 | return 0; |
| 1492 | return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin); |
| 1493 | } |
| 1494 | EXPORT_SYMBOL_GPL(iomap_dio_iopoll); |
| 1495 | |
| 1496 | static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap, |
| 1497 | struct bio *bio) |
| 1498 | { |
| 1499 | atomic_inc(&dio->ref); |
| 1500 | |
| 1501 | if (dio->iocb->ki_flags & IOCB_HIPRI) |
| 1502 | bio_set_polled(bio, dio->iocb); |
| 1503 | |
| 1504 | dio->submit.last_queue = bdev_get_queue(iomap->bdev); |
| 1505 | dio->submit.cookie = submit_bio(bio); |
| 1506 | } |
| 1507 | |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1508 | static ssize_t iomap_dio_complete(struct iomap_dio *dio) |
| 1509 | { |
| 1510 | struct kiocb *iocb = dio->iocb; |
Lukas Czerner | 332391a | 2017-09-21 08:16:29 -0600 | [diff] [blame] | 1511 | struct inode *inode = file_inode(iocb->ki_filp); |
Eryu Guan | 5e25c26 | 2017-10-13 09:47:46 -0700 | [diff] [blame] | 1512 | loff_t offset = iocb->ki_pos; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1513 | ssize_t ret; |
| 1514 | |
| 1515 | if (dio->end_io) { |
| 1516 | ret = dio->end_io(iocb, |
| 1517 | dio->error ? dio->error : dio->size, |
| 1518 | dio->flags); |
| 1519 | } else { |
| 1520 | ret = dio->error; |
| 1521 | } |
| 1522 | |
| 1523 | if (likely(!ret)) { |
| 1524 | ret = dio->size; |
| 1525 | /* check for short read */ |
Eryu Guan | 5e25c26 | 2017-10-13 09:47:46 -0700 | [diff] [blame] | 1526 | if (offset + ret > dio->i_size && |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1527 | !(dio->flags & IOMAP_DIO_WRITE)) |
Eryu Guan | 5e25c26 | 2017-10-13 09:47:46 -0700 | [diff] [blame] | 1528 | ret = dio->i_size - offset; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1529 | iocb->ki_pos += ret; |
| 1530 | } |
| 1531 | |
Eryu Guan | 5e25c26 | 2017-10-13 09:47:46 -0700 | [diff] [blame] | 1532 | /* |
| 1533 | * Try again to invalidate clean pages which might have been cached by |
| 1534 | * non-direct readahead, or faulted in by get_user_pages() if the source |
| 1535 | * of the write was an mmap'ed region of the file we're writing. Either |
| 1536 | * one is a pretty crazy thing to do, so we don't support it 100%. If |
| 1537 | * this invalidation fails, tough, the write still worked... |
| 1538 | * |
| 1539 | * And this page cache invalidation has to be after dio->end_io(), as |
| 1540 | * some filesystems convert unwritten extents to real allocations in |
| 1541 | * end_io() when necessary, otherwise a racing buffer read would cache |
| 1542 | * zeros from unwritten extents. |
| 1543 | */ |
| 1544 | if (!dio->error && |
| 1545 | (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { |
| 1546 | int err; |
| 1547 | err = invalidate_inode_pages2_range(inode->i_mapping, |
| 1548 | offset >> PAGE_SHIFT, |
| 1549 | (offset + dio->size - 1) >> PAGE_SHIFT); |
Darrick J. Wong | 5a9d929 | 2018-01-08 10:41:39 -0800 | [diff] [blame] | 1550 | if (err) |
| 1551 | dio_warn_stale_pagecache(iocb->ki_filp); |
Eryu Guan | 5e25c26 | 2017-10-13 09:47:46 -0700 | [diff] [blame] | 1552 | } |
| 1553 | |
Dave Chinner | 4f8ff44 | 2018-05-02 12:54:52 -0700 | [diff] [blame] | 1554 | /* |
| 1555 | * If this is a DSYNC write, make sure we push it to stable storage now |
| 1556 | * that we've written data. |
| 1557 | */ |
| 1558 | if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC)) |
| 1559 | ret = generic_write_sync(iocb, ret); |
| 1560 | |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1561 | inode_dio_end(file_inode(iocb->ki_filp)); |
| 1562 | kfree(dio); |
| 1563 | |
| 1564 | return ret; |
| 1565 | } |
| 1566 | |
| 1567 | static void iomap_dio_complete_work(struct work_struct *work) |
| 1568 | { |
| 1569 | struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); |
| 1570 | struct kiocb *iocb = dio->iocb; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1571 | |
Dave Chinner | 4f8ff44 | 2018-05-02 12:54:52 -0700 | [diff] [blame] | 1572 | iocb->ki_complete(iocb, iomap_dio_complete(dio), 0); |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1573 | } |
| 1574 | |
| 1575 | /* |
| 1576 | * Set an error in the dio if none is set yet. We have to use cmpxchg |
| 1577 | * as the submission context and the completion context(s) can race to |
| 1578 | * update the error. |
| 1579 | */ |
| 1580 | static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) |
| 1581 | { |
| 1582 | cmpxchg(&dio->error, 0, ret); |
| 1583 | } |
| 1584 | |
| 1585 | static void iomap_dio_bio_end_io(struct bio *bio) |
| 1586 | { |
| 1587 | struct iomap_dio *dio = bio->bi_private; |
| 1588 | bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); |
| 1589 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 1590 | if (bio->bi_status) |
| 1591 | iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1592 | |
| 1593 | if (atomic_dec_and_test(&dio->ref)) { |
Andreas Gruenbacher | ebf00be | 2018-06-19 15:10:55 -0700 | [diff] [blame] | 1594 | if (dio->wait_for_completion) { |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1595 | struct task_struct *waiter = dio->submit.waiter; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1596 | WRITE_ONCE(dio->submit.waiter, NULL); |
Jens Axboe | 0619317 | 2018-11-13 21:16:54 -0700 | [diff] [blame] | 1597 | blk_wake_io_task(waiter); |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1598 | } else if (dio->flags & IOMAP_DIO_WRITE) { |
| 1599 | struct inode *inode = file_inode(dio->iocb->ki_filp); |
| 1600 | |
| 1601 | INIT_WORK(&dio->aio.work, iomap_dio_complete_work); |
| 1602 | queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); |
| 1603 | } else { |
| 1604 | iomap_dio_complete_work(&dio->aio.work); |
| 1605 | } |
| 1606 | } |
| 1607 | |
| 1608 | if (should_dirty) { |
| 1609 | bio_check_pages_dirty(bio); |
| 1610 | } else { |
Jens Axboe | 399254a | 2019-02-27 13:13:23 -0700 | [diff] [blame] | 1611 | if (!bio_flagged(bio, BIO_NO_PAGE_REF)) { |
| 1612 | struct bvec_iter_all iter_all; |
| 1613 | struct bio_vec *bvec; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1614 | |
Christoph Hellwig | 2b070cf | 2019-04-25 09:03:00 +0200 | [diff] [blame] | 1615 | bio_for_each_segment_all(bvec, bio, iter_all) |
Jens Axboe | 399254a | 2019-02-27 13:13:23 -0700 | [diff] [blame] | 1616 | put_page(bvec->bv_page); |
| 1617 | } |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1618 | bio_put(bio); |
| 1619 | } |
| 1620 | } |
| 1621 | |
Christoph Hellwig | 81214ba | 2018-12-04 11:12:08 -0700 | [diff] [blame] | 1622 | static void |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1623 | iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, |
| 1624 | unsigned len) |
| 1625 | { |
| 1626 | struct page *page = ZERO_PAGE(0); |
Jens Axboe | d1e3628 | 2018-08-29 10:36:56 -0600 | [diff] [blame] | 1627 | int flags = REQ_SYNC | REQ_IDLE; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1628 | struct bio *bio; |
| 1629 | |
| 1630 | bio = bio_alloc(GFP_KERNEL, 1); |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 1631 | bio_set_dev(bio, iomap->bdev); |
Christoph Hellwig | 57fc505 | 2018-06-01 09:03:08 -0700 | [diff] [blame] | 1632 | bio->bi_iter.bi_sector = iomap_sector(iomap, pos); |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1633 | bio->bi_private = dio; |
| 1634 | bio->bi_end_io = iomap_dio_bio_end_io; |
| 1635 | |
| 1636 | get_page(page); |
Christoph Hellwig | 6533b4e | 2018-06-01 09:03:07 -0700 | [diff] [blame] | 1637 | __bio_add_page(bio, page, len, 0); |
Jens Axboe | d1e3628 | 2018-08-29 10:36:56 -0600 | [diff] [blame] | 1638 | bio_set_op_attrs(bio, REQ_OP_WRITE, flags); |
Christoph Hellwig | 81214ba | 2018-12-04 11:12:08 -0700 | [diff] [blame] | 1639 | iomap_dio_submit_bio(dio, iomap, bio); |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1640 | } |
| 1641 | |
| 1642 | static loff_t |
Christoph Hellwig | 0923043 | 2018-07-03 09:07:46 -0700 | [diff] [blame] | 1643 | iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, |
| 1644 | struct iomap_dio *dio, struct iomap *iomap) |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1645 | { |
Fabian Frederick | 9340747 | 2017-02-27 14:28:32 -0800 | [diff] [blame] | 1646 | unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); |
| 1647 | unsigned int fs_block_size = i_blocksize(inode), pad; |
| 1648 | unsigned int align = iov_iter_alignment(dio->submit.iter); |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1649 | struct iov_iter iter; |
| 1650 | struct bio *bio; |
| 1651 | bool need_zeroout = false; |
Dave Chinner | 3460cac | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1652 | bool use_fua = false; |
Dave Chinner | 4721a60 | 2018-11-19 13:31:11 -0800 | [diff] [blame] | 1653 | int nr_pages, ret = 0; |
Al Viro | cfe057f | 2017-09-11 21:17:09 +0100 | [diff] [blame] | 1654 | size_t copied = 0; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1655 | |
| 1656 | if ((pos | length | align) & ((1 << blkbits) - 1)) |
| 1657 | return -EINVAL; |
| 1658 | |
Christoph Hellwig | 0923043 | 2018-07-03 09:07:46 -0700 | [diff] [blame] | 1659 | if (iomap->type == IOMAP_UNWRITTEN) { |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1660 | dio->flags |= IOMAP_DIO_UNWRITTEN; |
| 1661 | need_zeroout = true; |
Christoph Hellwig | 0923043 | 2018-07-03 09:07:46 -0700 | [diff] [blame] | 1662 | } |
| 1663 | |
| 1664 | if (iomap->flags & IOMAP_F_SHARED) |
| 1665 | dio->flags |= IOMAP_DIO_COW; |
| 1666 | |
| 1667 | if (iomap->flags & IOMAP_F_NEW) { |
| 1668 | need_zeroout = true; |
Dave Chinner | 0929d85 | 2018-11-19 13:31:10 -0800 | [diff] [blame] | 1669 | } else if (iomap->type == IOMAP_MAPPED) { |
Christoph Hellwig | 0923043 | 2018-07-03 09:07:46 -0700 | [diff] [blame] | 1670 | /* |
Dave Chinner | 0929d85 | 2018-11-19 13:31:10 -0800 | [diff] [blame] | 1671 | * Use a FUA write if we need datasync semantics, this is a pure |
| 1672 | * data IO that doesn't require any metadata updates (including |
| 1673 | * after IO completion such as unwritten extent conversion) and |
| 1674 | * the underlying device supports FUA. This allows us to avoid |
| 1675 | * cache flushes on IO completion. |
Christoph Hellwig | 0923043 | 2018-07-03 09:07:46 -0700 | [diff] [blame] | 1676 | */ |
| 1677 | if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && |
| 1678 | (dio->flags & IOMAP_DIO_WRITE_FUA) && |
| 1679 | blk_queue_fua(bdev_get_queue(iomap->bdev))) |
| 1680 | use_fua = true; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1681 | } |
| 1682 | |
| 1683 | /* |
| 1684 | * Operate on a partial iter trimmed to the extent we were called for. |
| 1685 | * We'll update the iter in the dio once we're done with this extent. |
| 1686 | */ |
| 1687 | iter = *dio->submit.iter; |
| 1688 | iov_iter_truncate(&iter, length); |
| 1689 | |
| 1690 | nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); |
| 1691 | if (nr_pages <= 0) |
| 1692 | return nr_pages; |
| 1693 | |
| 1694 | if (need_zeroout) { |
| 1695 | /* zero out from the start of the block to the write offset */ |
| 1696 | pad = pos & (fs_block_size - 1); |
| 1697 | if (pad) |
| 1698 | iomap_dio_zero(dio, iomap, pos - pad, pad); |
| 1699 | } |
| 1700 | |
| 1701 | do { |
Al Viro | cfe057f | 2017-09-11 21:17:09 +0100 | [diff] [blame] | 1702 | size_t n; |
| 1703 | if (dio->error) { |
| 1704 | iov_iter_revert(dio->submit.iter, copied); |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1705 | return 0; |
Al Viro | cfe057f | 2017-09-11 21:17:09 +0100 | [diff] [blame] | 1706 | } |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1707 | |
| 1708 | bio = bio_alloc(GFP_KERNEL, nr_pages); |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 1709 | bio_set_dev(bio, iomap->bdev); |
Christoph Hellwig | 57fc505 | 2018-06-01 09:03:08 -0700 | [diff] [blame] | 1710 | bio->bi_iter.bi_sector = iomap_sector(iomap, pos); |
Jens Axboe | 45d06cf | 2017-06-27 11:01:22 -0600 | [diff] [blame] | 1711 | bio->bi_write_hint = dio->iocb->ki_hint; |
Adam Manzanares | 087e566 | 2018-05-22 10:52:21 -0700 | [diff] [blame] | 1712 | bio->bi_ioprio = dio->iocb->ki_ioprio; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1713 | bio->bi_private = dio; |
| 1714 | bio->bi_end_io = iomap_dio_bio_end_io; |
| 1715 | |
| 1716 | ret = bio_iov_iter_get_pages(bio, &iter); |
| 1717 | if (unlikely(ret)) { |
Dave Chinner | 4721a60 | 2018-11-19 13:31:11 -0800 | [diff] [blame] | 1718 | /* |
| 1719 | * We have to stop part way through an IO. We must fall |
| 1720 | * through to the sub-block tail zeroing here, otherwise |
| 1721 | * this short IO may expose stale data in the tail of |
| 1722 | * the block we haven't written data to. |
| 1723 | */ |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1724 | bio_put(bio); |
Dave Chinner | 4721a60 | 2018-11-19 13:31:11 -0800 | [diff] [blame] | 1725 | goto zero_tail; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1726 | } |
| 1727 | |
Al Viro | cfe057f | 2017-09-11 21:17:09 +0100 | [diff] [blame] | 1728 | n = bio->bi_iter.bi_size; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1729 | if (dio->flags & IOMAP_DIO_WRITE) { |
Dave Chinner | 3460cac | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1730 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; |
| 1731 | if (use_fua) |
| 1732 | bio->bi_opf |= REQ_FUA; |
| 1733 | else |
| 1734 | dio->flags &= ~IOMAP_DIO_WRITE_FUA; |
Al Viro | cfe057f | 2017-09-11 21:17:09 +0100 | [diff] [blame] | 1735 | task_io_account_write(n); |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1736 | } else { |
Dave Chinner | 3460cac | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1737 | bio->bi_opf = REQ_OP_READ; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1738 | if (dio->flags & IOMAP_DIO_DIRTY) |
| 1739 | bio_set_pages_dirty(bio); |
| 1740 | } |
| 1741 | |
Al Viro | cfe057f | 2017-09-11 21:17:09 +0100 | [diff] [blame] | 1742 | iov_iter_advance(dio->submit.iter, n); |
| 1743 | |
| 1744 | dio->size += n; |
| 1745 | pos += n; |
| 1746 | copied += n; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1747 | |
| 1748 | nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); |
Christoph Hellwig | 81214ba | 2018-12-04 11:12:08 -0700 | [diff] [blame] | 1749 | iomap_dio_submit_bio(dio, iomap, bio); |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1750 | } while (nr_pages); |
| 1751 | |
Dave Chinner | b450672 | 2018-11-19 13:31:10 -0800 | [diff] [blame] | 1752 | /* |
| 1753 | * We need to zeroout the tail of a sub-block write if the extent type |
| 1754 | * requires zeroing or the write extends beyond EOF. If we don't zero |
| 1755 | * the block tail in the latter case, we can expose stale data via mmap |
| 1756 | * reads of the EOF block. |
| 1757 | */ |
Dave Chinner | 4721a60 | 2018-11-19 13:31:11 -0800 | [diff] [blame] | 1758 | zero_tail: |
Dave Chinner | b450672 | 2018-11-19 13:31:10 -0800 | [diff] [blame] | 1759 | if (need_zeroout || |
| 1760 | ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) { |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1761 | /* zero out from the end of the write to the end of the block */ |
| 1762 | pad = pos & (fs_block_size - 1); |
| 1763 | if (pad) |
| 1764 | iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); |
| 1765 | } |
Dave Chinner | 4721a60 | 2018-11-19 13:31:11 -0800 | [diff] [blame] | 1766 | return copied ? copied : ret; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1767 | } |
| 1768 | |
Christoph Hellwig | 0923043 | 2018-07-03 09:07:46 -0700 | [diff] [blame] | 1769 | static loff_t |
| 1770 | iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio) |
| 1771 | { |
| 1772 | length = iov_iter_zero(length, dio->submit.iter); |
| 1773 | dio->size += length; |
| 1774 | return length; |
| 1775 | } |
| 1776 | |
| 1777 | static loff_t |
Andreas Gruenbacher | ec181f6 | 2018-07-03 09:07:47 -0700 | [diff] [blame] | 1778 | iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length, |
| 1779 | struct iomap_dio *dio, struct iomap *iomap) |
| 1780 | { |
| 1781 | struct iov_iter *iter = dio->submit.iter; |
| 1782 | size_t copied; |
| 1783 | |
| 1784 | BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data)); |
| 1785 | |
| 1786 | if (dio->flags & IOMAP_DIO_WRITE) { |
| 1787 | loff_t size = inode->i_size; |
| 1788 | |
| 1789 | if (pos > size) |
| 1790 | memset(iomap->inline_data + size, 0, pos - size); |
| 1791 | copied = copy_from_iter(iomap->inline_data + pos, length, iter); |
| 1792 | if (copied) { |
| 1793 | if (pos + copied > size) |
| 1794 | i_size_write(inode, pos + copied); |
| 1795 | mark_inode_dirty(inode); |
| 1796 | } |
| 1797 | } else { |
| 1798 | copied = copy_to_iter(iomap->inline_data + pos, length, iter); |
| 1799 | } |
| 1800 | dio->size += copied; |
| 1801 | return copied; |
| 1802 | } |
| 1803 | |
| 1804 | static loff_t |
Christoph Hellwig | 0923043 | 2018-07-03 09:07:46 -0700 | [diff] [blame] | 1805 | iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, |
| 1806 | void *data, struct iomap *iomap) |
| 1807 | { |
| 1808 | struct iomap_dio *dio = data; |
| 1809 | |
| 1810 | switch (iomap->type) { |
| 1811 | case IOMAP_HOLE: |
| 1812 | if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) |
| 1813 | return -EIO; |
| 1814 | return iomap_dio_hole_actor(length, dio); |
| 1815 | case IOMAP_UNWRITTEN: |
| 1816 | if (!(dio->flags & IOMAP_DIO_WRITE)) |
| 1817 | return iomap_dio_hole_actor(length, dio); |
| 1818 | return iomap_dio_bio_actor(inode, pos, length, dio, iomap); |
| 1819 | case IOMAP_MAPPED: |
| 1820 | return iomap_dio_bio_actor(inode, pos, length, dio, iomap); |
Andreas Gruenbacher | ec181f6 | 2018-07-03 09:07:47 -0700 | [diff] [blame] | 1821 | case IOMAP_INLINE: |
| 1822 | return iomap_dio_inline_actor(inode, pos, length, dio, iomap); |
Christoph Hellwig | 0923043 | 2018-07-03 09:07:46 -0700 | [diff] [blame] | 1823 | default: |
| 1824 | WARN_ON_ONCE(1); |
| 1825 | return -EIO; |
| 1826 | } |
| 1827 | } |
| 1828 | |
Dave Chinner | 4f8ff44 | 2018-05-02 12:54:52 -0700 | [diff] [blame] | 1829 | /* |
| 1830 | * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO |
Dave Chinner | 3460cac | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1831 | * is being issued as AIO or not. This allows us to optimise pure data writes |
| 1832 | * to use REQ_FUA rather than requiring generic_write_sync() to issue a |
| 1833 | * REQ_FLUSH post write. This is slightly tricky because a single request here |
| 1834 | * can be mapped into multiple disjoint IOs and only a subset of the IOs issued |
| 1835 | * may be pure data writes. In that case, we still need to do a full data sync |
| 1836 | * completion. |
Dave Chinner | 4f8ff44 | 2018-05-02 12:54:52 -0700 | [diff] [blame] | 1837 | */ |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1838 | ssize_t |
Christoph Hellwig | 8ff6daa | 2017-01-27 23:20:26 -0800 | [diff] [blame] | 1839 | iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, |
| 1840 | const struct iomap_ops *ops, iomap_dio_end_io_t end_io) |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1841 | { |
| 1842 | struct address_space *mapping = iocb->ki_filp->f_mapping; |
| 1843 | struct inode *inode = file_inode(iocb->ki_filp); |
| 1844 | size_t count = iov_iter_count(iter); |
Eryu Guan | c771c14 | 2017-03-02 15:02:06 -0800 | [diff] [blame] | 1845 | loff_t pos = iocb->ki_pos, start = pos; |
| 1846 | loff_t end = iocb->ki_pos + count - 1, ret = 0; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1847 | unsigned int flags = IOMAP_DIRECT; |
Christoph Hellwig | 4ea899e | 2019-01-17 08:58:58 -0800 | [diff] [blame] | 1848 | bool wait_for_completion = is_sync_kiocb(iocb); |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1849 | struct blk_plug plug; |
| 1850 | struct iomap_dio *dio; |
| 1851 | |
| 1852 | lockdep_assert_held(&inode->i_rwsem); |
| 1853 | |
| 1854 | if (!count) |
| 1855 | return 0; |
| 1856 | |
| 1857 | dio = kmalloc(sizeof(*dio), GFP_KERNEL); |
| 1858 | if (!dio) |
| 1859 | return -ENOMEM; |
| 1860 | |
| 1861 | dio->iocb = iocb; |
| 1862 | atomic_set(&dio->ref, 1); |
| 1863 | dio->size = 0; |
| 1864 | dio->i_size = i_size_read(inode); |
| 1865 | dio->end_io = end_io; |
| 1866 | dio->error = 0; |
| 1867 | dio->flags = 0; |
| 1868 | |
| 1869 | dio->submit.iter = iter; |
Andreas Gruenbacher | ebf00be | 2018-06-19 15:10:55 -0700 | [diff] [blame] | 1870 | dio->submit.waiter = current; |
| 1871 | dio->submit.cookie = BLK_QC_T_NONE; |
| 1872 | dio->submit.last_queue = NULL; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1873 | |
| 1874 | if (iov_iter_rw(iter) == READ) { |
| 1875 | if (pos >= dio->i_size) |
| 1876 | goto out_free_dio; |
| 1877 | |
David Howells | 00e2370 | 2018-10-22 13:07:28 +0100 | [diff] [blame] | 1878 | if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ) |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1879 | dio->flags |= IOMAP_DIO_DIRTY; |
| 1880 | } else { |
Dave Chinner | 3460cac | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1881 | flags |= IOMAP_WRITE; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1882 | dio->flags |= IOMAP_DIO_WRITE; |
Dave Chinner | 3460cac | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1883 | |
| 1884 | /* for data sync or sync, we need sync completion processing */ |
Dave Chinner | 4f8ff44 | 2018-05-02 12:54:52 -0700 | [diff] [blame] | 1885 | if (iocb->ki_flags & IOCB_DSYNC) |
| 1886 | dio->flags |= IOMAP_DIO_NEED_SYNC; |
Dave Chinner | 3460cac | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1887 | |
| 1888 | /* |
| 1889 | * For datasync only writes, we optimistically try using FUA for |
| 1890 | * this IO. Any non-FUA write that occurs will clear this flag, |
| 1891 | * hence we know before completion whether a cache flush is |
| 1892 | * necessary. |
| 1893 | */ |
| 1894 | if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC) |
| 1895 | dio->flags |= IOMAP_DIO_WRITE_FUA; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1896 | } |
| 1897 | |
Goldwyn Rodrigues | a38d124 | 2017-06-20 07:05:45 -0500 | [diff] [blame] | 1898 | if (iocb->ki_flags & IOCB_NOWAIT) { |
| 1899 | if (filemap_range_has_page(mapping, start, end)) { |
| 1900 | ret = -EAGAIN; |
| 1901 | goto out_free_dio; |
| 1902 | } |
| 1903 | flags |= IOMAP_NOWAIT; |
| 1904 | } |
| 1905 | |
Andrey Ryabinin | 55635ba | 2017-05-03 14:55:59 -0700 | [diff] [blame] | 1906 | ret = filemap_write_and_wait_range(mapping, start, end); |
| 1907 | if (ret) |
| 1908 | goto out_free_dio; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1909 | |
Darrick J. Wong | 5a9d929 | 2018-01-08 10:41:39 -0800 | [diff] [blame] | 1910 | /* |
| 1911 | * Try to invalidate cache pages for the range we're direct |
| 1912 | * writing. If this invalidation fails, tough, the write will |
| 1913 | * still work, but racing two incompatible write paths is a |
| 1914 | * pretty crazy thing to do, so we don't support it 100%. |
| 1915 | */ |
Andrey Ryabinin | 55635ba | 2017-05-03 14:55:59 -0700 | [diff] [blame] | 1916 | ret = invalidate_inode_pages2_range(mapping, |
| 1917 | start >> PAGE_SHIFT, end >> PAGE_SHIFT); |
Darrick J. Wong | 5a9d929 | 2018-01-08 10:41:39 -0800 | [diff] [blame] | 1918 | if (ret) |
| 1919 | dio_warn_stale_pagecache(iocb->ki_filp); |
Andrey Ryabinin | 55635ba | 2017-05-03 14:55:59 -0700 | [diff] [blame] | 1920 | ret = 0; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1921 | |
Christoph Hellwig | 4ea899e | 2019-01-17 08:58:58 -0800 | [diff] [blame] | 1922 | if (iov_iter_rw(iter) == WRITE && !wait_for_completion && |
Chandan Rajendra | 546e7be | 2017-09-22 11:47:33 -0700 | [diff] [blame] | 1923 | !inode->i_sb->s_dio_done_wq) { |
| 1924 | ret = sb_init_dio_done_wq(inode->i_sb); |
| 1925 | if (ret < 0) |
| 1926 | goto out_free_dio; |
| 1927 | } |
| 1928 | |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1929 | inode_dio_begin(inode); |
| 1930 | |
| 1931 | blk_start_plug(&plug); |
| 1932 | do { |
| 1933 | ret = iomap_apply(inode, pos, count, flags, ops, dio, |
| 1934 | iomap_dio_actor); |
| 1935 | if (ret <= 0) { |
| 1936 | /* magic error code to fall back to buffered I/O */ |
Andreas Gruenbacher | ebf00be | 2018-06-19 15:10:55 -0700 | [diff] [blame] | 1937 | if (ret == -ENOTBLK) { |
Christoph Hellwig | 4ea899e | 2019-01-17 08:58:58 -0800 | [diff] [blame] | 1938 | wait_for_completion = true; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1939 | ret = 0; |
Andreas Gruenbacher | ebf00be | 2018-06-19 15:10:55 -0700 | [diff] [blame] | 1940 | } |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1941 | break; |
| 1942 | } |
| 1943 | pos += ret; |
Chandan Rajendra | a008c31 | 2017-04-12 11:03:20 -0700 | [diff] [blame] | 1944 | |
| 1945 | if (iov_iter_rw(iter) == READ && pos >= dio->i_size) |
| 1946 | break; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1947 | } while ((count = iov_iter_count(iter)) > 0); |
| 1948 | blk_finish_plug(&plug); |
| 1949 | |
| 1950 | if (ret < 0) |
| 1951 | iomap_dio_set_error(dio, ret); |
| 1952 | |
Dave Chinner | 3460cac | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1953 | /* |
| 1954 | * If all the writes we issued were FUA, we don't need to flush the |
| 1955 | * cache on IO completion. Clear the sync flag for this case. |
| 1956 | */ |
| 1957 | if (dio->flags & IOMAP_DIO_WRITE_FUA) |
| 1958 | dio->flags &= ~IOMAP_DIO_NEED_SYNC; |
| 1959 | |
Christoph Hellwig | 81214ba | 2018-12-04 11:12:08 -0700 | [diff] [blame] | 1960 | WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie); |
| 1961 | WRITE_ONCE(iocb->private, dio->submit.last_queue); |
| 1962 | |
Christoph Hellwig | 4ea899e | 2019-01-17 08:58:58 -0800 | [diff] [blame] | 1963 | /* |
| 1964 | * We are about to drop our additional submission reference, which |
| 1965 | * might be the last reference to the dio. There are three three |
| 1966 | * different ways we can progress here: |
| 1967 | * |
| 1968 | * (a) If this is the last reference we will always complete and free |
| 1969 | * the dio ourselves. |
| 1970 | * (b) If this is not the last reference, and we serve an asynchronous |
| 1971 | * iocb, we must never touch the dio after the decrement, the |
| 1972 | * I/O completion handler will complete and free it. |
| 1973 | * (c) If this is not the last reference, but we serve a synchronous |
| 1974 | * iocb, the I/O completion handler will wake us up on the drop |
| 1975 | * of the final reference, and we will complete and free it here |
| 1976 | * after we got woken by the I/O completion handler. |
| 1977 | */ |
| 1978 | dio->wait_for_completion = wait_for_completion; |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1979 | if (!atomic_dec_and_test(&dio->ref)) { |
Christoph Hellwig | 4ea899e | 2019-01-17 08:58:58 -0800 | [diff] [blame] | 1980 | if (!wait_for_completion) |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1981 | return -EIOCBQUEUED; |
| 1982 | |
| 1983 | for (;;) { |
Linus Torvalds | 1ac5cd4 | 2019-01-02 10:46:03 -0800 | [diff] [blame] | 1984 | set_current_state(TASK_UNINTERRUPTIBLE); |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1985 | if (!READ_ONCE(dio->submit.waiter)) |
| 1986 | break; |
| 1987 | |
| 1988 | if (!(iocb->ki_flags & IOCB_HIPRI) || |
| 1989 | !dio->submit.last_queue || |
Christoph Hellwig | ea435e1 | 2017-11-02 21:29:54 +0300 | [diff] [blame] | 1990 | !blk_poll(dio->submit.last_queue, |
Jens Axboe | 0a1b8b8 | 2018-11-26 08:24:43 -0700 | [diff] [blame] | 1991 | dio->submit.cookie, true)) |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1992 | io_schedule(); |
| 1993 | } |
| 1994 | __set_current_state(TASK_RUNNING); |
| 1995 | } |
| 1996 | |
Christoph Hellwig | 4ea899e | 2019-01-17 08:58:58 -0800 | [diff] [blame] | 1997 | return iomap_dio_complete(dio); |
Christoph Hellwig | ff6a929 | 2016-11-30 14:36:01 +1100 | [diff] [blame] | 1998 | |
| 1999 | out_free_dio: |
| 2000 | kfree(dio); |
| 2001 | return ret; |
| 2002 | } |
| 2003 | EXPORT_SYMBOL_GPL(iomap_dio_rw); |
Darrick J. Wong | 6748212 | 2018-05-10 08:38:15 -0700 | [diff] [blame] | 2004 | |
| 2005 | /* Swapfile activation */ |
| 2006 | |
| 2007 | #ifdef CONFIG_SWAP |
| 2008 | struct iomap_swapfile_info { |
| 2009 | struct iomap iomap; /* accumulated iomap */ |
| 2010 | struct swap_info_struct *sis; |
| 2011 | uint64_t lowest_ppage; /* lowest physical addr seen (pages) */ |
| 2012 | uint64_t highest_ppage; /* highest physical addr seen (pages) */ |
| 2013 | unsigned long nr_pages; /* number of pages collected */ |
| 2014 | int nr_extents; /* extent count */ |
| 2015 | }; |
| 2016 | |
| 2017 | /* |
| 2018 | * Collect physical extents for this swap file. Physical extents reported to |
| 2019 | * the swap code must be trimmed to align to a page boundary. The logical |
| 2020 | * offset within the file is irrelevant since the swapfile code maps logical |
| 2021 | * page numbers of the swap device to the physical page-aligned extents. |
| 2022 | */ |
| 2023 | static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi) |
| 2024 | { |
| 2025 | struct iomap *iomap = &isi->iomap; |
| 2026 | unsigned long nr_pages; |
| 2027 | uint64_t first_ppage; |
| 2028 | uint64_t first_ppage_reported; |
| 2029 | uint64_t next_ppage; |
| 2030 | int error; |
| 2031 | |
| 2032 | /* |
| 2033 | * Round the start up and the end down so that the physical |
| 2034 | * extent aligns to a page boundary. |
| 2035 | */ |
| 2036 | first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT; |
| 2037 | next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >> |
| 2038 | PAGE_SHIFT; |
| 2039 | |
| 2040 | /* Skip too-short physical extents. */ |
| 2041 | if (first_ppage >= next_ppage) |
| 2042 | return 0; |
| 2043 | nr_pages = next_ppage - first_ppage; |
| 2044 | |
| 2045 | /* |
| 2046 | * Calculate how much swap space we're adding; the first page contains |
| 2047 | * the swap header and doesn't count. The mm still wants that first |
| 2048 | * page fed to add_swap_extent, however. |
| 2049 | */ |
| 2050 | first_ppage_reported = first_ppage; |
| 2051 | if (iomap->offset == 0) |
| 2052 | first_ppage_reported++; |
| 2053 | if (isi->lowest_ppage > first_ppage_reported) |
| 2054 | isi->lowest_ppage = first_ppage_reported; |
| 2055 | if (isi->highest_ppage < (next_ppage - 1)) |
| 2056 | isi->highest_ppage = next_ppage - 1; |
| 2057 | |
| 2058 | /* Add extent, set up for the next call. */ |
| 2059 | error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage); |
| 2060 | if (error < 0) |
| 2061 | return error; |
| 2062 | isi->nr_extents += error; |
| 2063 | isi->nr_pages += nr_pages; |
| 2064 | return 0; |
| 2065 | } |
| 2066 | |
| 2067 | /* |
| 2068 | * Accumulate iomaps for this swap file. We have to accumulate iomaps because |
| 2069 | * swap only cares about contiguous page-aligned physical extents and makes no |
| 2070 | * distinction between written and unwritten extents. |
| 2071 | */ |
| 2072 | static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos, |
| 2073 | loff_t count, void *data, struct iomap *iomap) |
| 2074 | { |
| 2075 | struct iomap_swapfile_info *isi = data; |
| 2076 | int error; |
| 2077 | |
Christoph Hellwig | 19319b5 | 2018-06-01 09:03:06 -0700 | [diff] [blame] | 2078 | switch (iomap->type) { |
| 2079 | case IOMAP_MAPPED: |
| 2080 | case IOMAP_UNWRITTEN: |
| 2081 | /* Only real or unwritten extents. */ |
| 2082 | break; |
| 2083 | case IOMAP_INLINE: |
| 2084 | /* No inline data. */ |
Omar Sandoval | ec60192 | 2018-05-16 11:13:34 -0700 | [diff] [blame] | 2085 | pr_err("swapon: file is inline\n"); |
| 2086 | return -EINVAL; |
Christoph Hellwig | 19319b5 | 2018-06-01 09:03:06 -0700 | [diff] [blame] | 2087 | default: |
Omar Sandoval | ec60192 | 2018-05-16 11:13:34 -0700 | [diff] [blame] | 2088 | pr_err("swapon: file has unallocated extents\n"); |
| 2089 | return -EINVAL; |
| 2090 | } |
Darrick J. Wong | 6748212 | 2018-05-10 08:38:15 -0700 | [diff] [blame] | 2091 | |
Omar Sandoval | ec60192 | 2018-05-16 11:13:34 -0700 | [diff] [blame] | 2092 | /* No uncommitted metadata or shared blocks. */ |
| 2093 | if (iomap->flags & IOMAP_F_DIRTY) { |
| 2094 | pr_err("swapon: file is not committed\n"); |
| 2095 | return -EINVAL; |
| 2096 | } |
| 2097 | if (iomap->flags & IOMAP_F_SHARED) { |
| 2098 | pr_err("swapon: file has shared extents\n"); |
| 2099 | return -EINVAL; |
| 2100 | } |
Darrick J. Wong | 6748212 | 2018-05-10 08:38:15 -0700 | [diff] [blame] | 2101 | |
Omar Sandoval | ec60192 | 2018-05-16 11:13:34 -0700 | [diff] [blame] | 2102 | /* Only one bdev per swap file. */ |
| 2103 | if (iomap->bdev != isi->sis->bdev) { |
| 2104 | pr_err("swapon: file is on multiple devices\n"); |
| 2105 | return -EINVAL; |
| 2106 | } |
Darrick J. Wong | 6748212 | 2018-05-10 08:38:15 -0700 | [diff] [blame] | 2107 | |
| 2108 | if (isi->iomap.length == 0) { |
| 2109 | /* No accumulated extent, so just store it. */ |
| 2110 | memcpy(&isi->iomap, iomap, sizeof(isi->iomap)); |
| 2111 | } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) { |
| 2112 | /* Append this to the accumulated extent. */ |
| 2113 | isi->iomap.length += iomap->length; |
| 2114 | } else { |
| 2115 | /* Otherwise, add the retained iomap and store this one. */ |
| 2116 | error = iomap_swapfile_add_extent(isi); |
| 2117 | if (error) |
| 2118 | return error; |
| 2119 | memcpy(&isi->iomap, iomap, sizeof(isi->iomap)); |
| 2120 | } |
Darrick J. Wong | 6748212 | 2018-05-10 08:38:15 -0700 | [diff] [blame] | 2121 | return count; |
Darrick J. Wong | 6748212 | 2018-05-10 08:38:15 -0700 | [diff] [blame] | 2122 | } |
| 2123 | |
| 2124 | /* |
| 2125 | * Iterate a swap file's iomaps to construct physical extents that can be |
| 2126 | * passed to the swapfile subsystem. |
| 2127 | */ |
| 2128 | int iomap_swapfile_activate(struct swap_info_struct *sis, |
| 2129 | struct file *swap_file, sector_t *pagespan, |
| 2130 | const struct iomap_ops *ops) |
| 2131 | { |
| 2132 | struct iomap_swapfile_info isi = { |
| 2133 | .sis = sis, |
| 2134 | .lowest_ppage = (sector_t)-1ULL, |
| 2135 | }; |
| 2136 | struct address_space *mapping = swap_file->f_mapping; |
| 2137 | struct inode *inode = mapping->host; |
| 2138 | loff_t pos = 0; |
| 2139 | loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE); |
| 2140 | loff_t ret; |
| 2141 | |
Darrick J. Wong | 117a148 | 2018-06-05 09:53:05 -0700 | [diff] [blame] | 2142 | /* |
| 2143 | * Persist all file mapping metadata so that we won't have any |
| 2144 | * IOMAP_F_DIRTY iomaps. |
| 2145 | */ |
| 2146 | ret = vfs_fsync(swap_file, 1); |
Darrick J. Wong | 6748212 | 2018-05-10 08:38:15 -0700 | [diff] [blame] | 2147 | if (ret) |
| 2148 | return ret; |
| 2149 | |
| 2150 | while (len > 0) { |
| 2151 | ret = iomap_apply(inode, pos, len, IOMAP_REPORT, |
| 2152 | ops, &isi, iomap_swapfile_activate_actor); |
| 2153 | if (ret <= 0) |
| 2154 | return ret; |
| 2155 | |
| 2156 | pos += ret; |
| 2157 | len -= ret; |
| 2158 | } |
| 2159 | |
| 2160 | if (isi.iomap.length) { |
| 2161 | ret = iomap_swapfile_add_extent(&isi); |
| 2162 | if (ret) |
| 2163 | return ret; |
| 2164 | } |
| 2165 | |
| 2166 | *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage; |
| 2167 | sis->max = isi.nr_pages; |
| 2168 | sis->pages = isi.nr_pages - 1; |
| 2169 | sis->highest_bit = isi.nr_pages - 1; |
| 2170 | return isi.nr_extents; |
| 2171 | } |
| 2172 | EXPORT_SYMBOL_GPL(iomap_swapfile_activate); |
| 2173 | #endif /* CONFIG_SWAP */ |
Christoph Hellwig | 89eb190 | 2018-06-01 09:03:08 -0700 | [diff] [blame] | 2174 | |
| 2175 | static loff_t |
| 2176 | iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length, |
| 2177 | void *data, struct iomap *iomap) |
| 2178 | { |
| 2179 | sector_t *bno = data, addr; |
| 2180 | |
| 2181 | if (iomap->type == IOMAP_MAPPED) { |
| 2182 | addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits; |
| 2183 | if (addr > INT_MAX) |
| 2184 | WARN(1, "would truncate bmap result\n"); |
| 2185 | else |
| 2186 | *bno = addr; |
| 2187 | } |
| 2188 | return 0; |
| 2189 | } |
| 2190 | |
| 2191 | /* legacy ->bmap interface. 0 is the error return (!) */ |
| 2192 | sector_t |
| 2193 | iomap_bmap(struct address_space *mapping, sector_t bno, |
| 2194 | const struct iomap_ops *ops) |
| 2195 | { |
| 2196 | struct inode *inode = mapping->host; |
Eric Sandeen | 79b3dbe | 2018-08-02 13:09:27 -0700 | [diff] [blame] | 2197 | loff_t pos = bno << inode->i_blkbits; |
Christoph Hellwig | 89eb190 | 2018-06-01 09:03:08 -0700 | [diff] [blame] | 2198 | unsigned blocksize = i_blocksize(inode); |
| 2199 | |
| 2200 | if (filemap_write_and_wait(mapping)) |
| 2201 | return 0; |
| 2202 | |
| 2203 | bno = 0; |
| 2204 | iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor); |
| 2205 | return bno; |
| 2206 | } |
| 2207 | EXPORT_SYMBOL_GPL(iomap_bmap); |