Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2010 Red Hat, Inc. |
| 4 | * Copyright (c) 2016-2018 Christoph Hellwig. |
| 5 | */ |
| 6 | #include <linux/module.h> |
| 7 | #include <linux/compiler.h> |
| 8 | #include <linux/fs.h> |
| 9 | #include <linux/iomap.h> |
| 10 | #include <linux/backing-dev.h> |
| 11 | #include <linux/uio.h> |
| 12 | #include <linux/task_io_accounting_ops.h> |
| 13 | |
| 14 | #include "../internal.h" |
| 15 | |
| 16 | /* |
| 17 | * Private flags for iomap_dio, must not overlap with the public ones in |
| 18 | * iomap.h: |
| 19 | */ |
| 20 | #define IOMAP_DIO_WRITE_FUA (1 << 28) |
| 21 | #define IOMAP_DIO_NEED_SYNC (1 << 29) |
| 22 | #define IOMAP_DIO_WRITE (1 << 30) |
| 23 | #define IOMAP_DIO_DIRTY (1 << 31) |
| 24 | |
| 25 | struct iomap_dio { |
| 26 | struct kiocb *iocb; |
Christoph Hellwig | 838c4f3 | 2019-09-19 15:32:45 -0700 | [diff] [blame] | 27 | const struct iomap_dio_ops *dops; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 28 | loff_t i_size; |
| 29 | loff_t size; |
| 30 | atomic_t ref; |
| 31 | unsigned flags; |
| 32 | int error; |
| 33 | bool wait_for_completion; |
| 34 | |
| 35 | union { |
| 36 | /* used during submission and for synchronous completion: */ |
| 37 | struct { |
| 38 | struct iov_iter *iter; |
| 39 | struct task_struct *waiter; |
| 40 | struct request_queue *last_queue; |
| 41 | blk_qc_t cookie; |
| 42 | } submit; |
| 43 | |
| 44 | /* used for aio completion: */ |
| 45 | struct { |
| 46 | struct work_struct work; |
| 47 | } aio; |
| 48 | }; |
| 49 | }; |
| 50 | |
| 51 | int iomap_dio_iopoll(struct kiocb *kiocb, bool spin) |
| 52 | { |
| 53 | struct request_queue *q = READ_ONCE(kiocb->private); |
| 54 | |
| 55 | if (!q) |
| 56 | return 0; |
| 57 | return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin); |
| 58 | } |
| 59 | EXPORT_SYMBOL_GPL(iomap_dio_iopoll); |
| 60 | |
| 61 | static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap, |
| 62 | struct bio *bio) |
| 63 | { |
| 64 | atomic_inc(&dio->ref); |
| 65 | |
| 66 | if (dio->iocb->ki_flags & IOCB_HIPRI) |
| 67 | bio_set_polled(bio, dio->iocb); |
| 68 | |
| 69 | dio->submit.last_queue = bdev_get_queue(iomap->bdev); |
| 70 | dio->submit.cookie = submit_bio(bio); |
| 71 | } |
| 72 | |
| 73 | static ssize_t iomap_dio_complete(struct iomap_dio *dio) |
| 74 | { |
Christoph Hellwig | 838c4f3 | 2019-09-19 15:32:45 -0700 | [diff] [blame] | 75 | const struct iomap_dio_ops *dops = dio->dops; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 76 | struct kiocb *iocb = dio->iocb; |
| 77 | struct inode *inode = file_inode(iocb->ki_filp); |
| 78 | loff_t offset = iocb->ki_pos; |
Christoph Hellwig | 838c4f3 | 2019-09-19 15:32:45 -0700 | [diff] [blame] | 79 | ssize_t ret = dio->error; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 80 | |
Christoph Hellwig | 838c4f3 | 2019-09-19 15:32:45 -0700 | [diff] [blame] | 81 | if (dops && dops->end_io) |
| 82 | ret = dops->end_io(iocb, dio->size, ret, dio->flags); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 83 | |
| 84 | if (likely(!ret)) { |
| 85 | ret = dio->size; |
| 86 | /* check for short read */ |
| 87 | if (offset + ret > dio->i_size && |
| 88 | !(dio->flags & IOMAP_DIO_WRITE)) |
| 89 | ret = dio->i_size - offset; |
| 90 | iocb->ki_pos += ret; |
| 91 | } |
| 92 | |
| 93 | /* |
| 94 | * Try again to invalidate clean pages which might have been cached by |
| 95 | * non-direct readahead, or faulted in by get_user_pages() if the source |
| 96 | * of the write was an mmap'ed region of the file we're writing. Either |
| 97 | * one is a pretty crazy thing to do, so we don't support it 100%. If |
| 98 | * this invalidation fails, tough, the write still worked... |
| 99 | * |
Christoph Hellwig | 838c4f3 | 2019-09-19 15:32:45 -0700 | [diff] [blame] | 100 | * And this page cache invalidation has to be after ->end_io(), as some |
| 101 | * filesystems convert unwritten extents to real allocations in |
| 102 | * ->end_io() when necessary, otherwise a racing buffer read would cache |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 103 | * zeros from unwritten extents. |
| 104 | */ |
| 105 | if (!dio->error && |
| 106 | (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { |
| 107 | int err; |
| 108 | err = invalidate_inode_pages2_range(inode->i_mapping, |
| 109 | offset >> PAGE_SHIFT, |
| 110 | (offset + dio->size - 1) >> PAGE_SHIFT); |
| 111 | if (err) |
| 112 | dio_warn_stale_pagecache(iocb->ki_filp); |
| 113 | } |
| 114 | |
| 115 | /* |
| 116 | * If this is a DSYNC write, make sure we push it to stable storage now |
| 117 | * that we've written data. |
| 118 | */ |
| 119 | if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC)) |
| 120 | ret = generic_write_sync(iocb, ret); |
| 121 | |
| 122 | inode_dio_end(file_inode(iocb->ki_filp)); |
| 123 | kfree(dio); |
| 124 | |
| 125 | return ret; |
| 126 | } |
| 127 | |
| 128 | static void iomap_dio_complete_work(struct work_struct *work) |
| 129 | { |
| 130 | struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); |
| 131 | struct kiocb *iocb = dio->iocb; |
| 132 | |
| 133 | iocb->ki_complete(iocb, iomap_dio_complete(dio), 0); |
| 134 | } |
| 135 | |
| 136 | /* |
| 137 | * Set an error in the dio if none is set yet. We have to use cmpxchg |
| 138 | * as the submission context and the completion context(s) can race to |
| 139 | * update the error. |
| 140 | */ |
| 141 | static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) |
| 142 | { |
| 143 | cmpxchg(&dio->error, 0, ret); |
| 144 | } |
| 145 | |
| 146 | static void iomap_dio_bio_end_io(struct bio *bio) |
| 147 | { |
| 148 | struct iomap_dio *dio = bio->bi_private; |
| 149 | bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); |
| 150 | |
| 151 | if (bio->bi_status) |
| 152 | iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); |
| 153 | |
| 154 | if (atomic_dec_and_test(&dio->ref)) { |
| 155 | if (dio->wait_for_completion) { |
| 156 | struct task_struct *waiter = dio->submit.waiter; |
| 157 | WRITE_ONCE(dio->submit.waiter, NULL); |
| 158 | blk_wake_io_task(waiter); |
| 159 | } else if (dio->flags & IOMAP_DIO_WRITE) { |
| 160 | struct inode *inode = file_inode(dio->iocb->ki_filp); |
| 161 | |
| 162 | INIT_WORK(&dio->aio.work, iomap_dio_complete_work); |
| 163 | queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); |
| 164 | } else { |
| 165 | iomap_dio_complete_work(&dio->aio.work); |
| 166 | } |
| 167 | } |
| 168 | |
| 169 | if (should_dirty) { |
| 170 | bio_check_pages_dirty(bio); |
| 171 | } else { |
| 172 | bio_release_pages(bio, false); |
| 173 | bio_put(bio); |
| 174 | } |
| 175 | } |
| 176 | |
| 177 | static void |
| 178 | iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, |
| 179 | unsigned len) |
| 180 | { |
| 181 | struct page *page = ZERO_PAGE(0); |
| 182 | int flags = REQ_SYNC | REQ_IDLE; |
| 183 | struct bio *bio; |
| 184 | |
| 185 | bio = bio_alloc(GFP_KERNEL, 1); |
| 186 | bio_set_dev(bio, iomap->bdev); |
| 187 | bio->bi_iter.bi_sector = iomap_sector(iomap, pos); |
| 188 | bio->bi_private = dio; |
| 189 | bio->bi_end_io = iomap_dio_bio_end_io; |
| 190 | |
| 191 | get_page(page); |
| 192 | __bio_add_page(bio, page, len, 0); |
| 193 | bio_set_op_attrs(bio, REQ_OP_WRITE, flags); |
| 194 | iomap_dio_submit_bio(dio, iomap, bio); |
| 195 | } |
| 196 | |
| 197 | static loff_t |
| 198 | iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, |
| 199 | struct iomap_dio *dio, struct iomap *iomap) |
| 200 | { |
| 201 | unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); |
| 202 | unsigned int fs_block_size = i_blocksize(inode), pad; |
| 203 | unsigned int align = iov_iter_alignment(dio->submit.iter); |
| 204 | struct iov_iter iter; |
| 205 | struct bio *bio; |
| 206 | bool need_zeroout = false; |
| 207 | bool use_fua = false; |
| 208 | int nr_pages, ret = 0; |
| 209 | size_t copied = 0; |
| 210 | |
| 211 | if ((pos | length | align) & ((1 << blkbits) - 1)) |
| 212 | return -EINVAL; |
| 213 | |
| 214 | if (iomap->type == IOMAP_UNWRITTEN) { |
| 215 | dio->flags |= IOMAP_DIO_UNWRITTEN; |
| 216 | need_zeroout = true; |
| 217 | } |
| 218 | |
| 219 | if (iomap->flags & IOMAP_F_SHARED) |
| 220 | dio->flags |= IOMAP_DIO_COW; |
| 221 | |
| 222 | if (iomap->flags & IOMAP_F_NEW) { |
| 223 | need_zeroout = true; |
| 224 | } else if (iomap->type == IOMAP_MAPPED) { |
| 225 | /* |
| 226 | * Use a FUA write if we need datasync semantics, this is a pure |
| 227 | * data IO that doesn't require any metadata updates (including |
| 228 | * after IO completion such as unwritten extent conversion) and |
| 229 | * the underlying device supports FUA. This allows us to avoid |
| 230 | * cache flushes on IO completion. |
| 231 | */ |
| 232 | if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && |
| 233 | (dio->flags & IOMAP_DIO_WRITE_FUA) && |
| 234 | blk_queue_fua(bdev_get_queue(iomap->bdev))) |
| 235 | use_fua = true; |
| 236 | } |
| 237 | |
| 238 | /* |
| 239 | * Operate on a partial iter trimmed to the extent we were called for. |
| 240 | * We'll update the iter in the dio once we're done with this extent. |
| 241 | */ |
| 242 | iter = *dio->submit.iter; |
| 243 | iov_iter_truncate(&iter, length); |
| 244 | |
| 245 | nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); |
| 246 | if (nr_pages <= 0) |
| 247 | return nr_pages; |
| 248 | |
| 249 | if (need_zeroout) { |
| 250 | /* zero out from the start of the block to the write offset */ |
| 251 | pad = pos & (fs_block_size - 1); |
| 252 | if (pad) |
| 253 | iomap_dio_zero(dio, iomap, pos - pad, pad); |
| 254 | } |
| 255 | |
| 256 | do { |
| 257 | size_t n; |
| 258 | if (dio->error) { |
| 259 | iov_iter_revert(dio->submit.iter, copied); |
| 260 | return 0; |
| 261 | } |
| 262 | |
| 263 | bio = bio_alloc(GFP_KERNEL, nr_pages); |
| 264 | bio_set_dev(bio, iomap->bdev); |
| 265 | bio->bi_iter.bi_sector = iomap_sector(iomap, pos); |
| 266 | bio->bi_write_hint = dio->iocb->ki_hint; |
| 267 | bio->bi_ioprio = dio->iocb->ki_ioprio; |
| 268 | bio->bi_private = dio; |
| 269 | bio->bi_end_io = iomap_dio_bio_end_io; |
| 270 | |
| 271 | ret = bio_iov_iter_get_pages(bio, &iter); |
| 272 | if (unlikely(ret)) { |
| 273 | /* |
| 274 | * We have to stop part way through an IO. We must fall |
| 275 | * through to the sub-block tail zeroing here, otherwise |
| 276 | * this short IO may expose stale data in the tail of |
| 277 | * the block we haven't written data to. |
| 278 | */ |
| 279 | bio_put(bio); |
| 280 | goto zero_tail; |
| 281 | } |
| 282 | |
| 283 | n = bio->bi_iter.bi_size; |
| 284 | if (dio->flags & IOMAP_DIO_WRITE) { |
| 285 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; |
| 286 | if (use_fua) |
| 287 | bio->bi_opf |= REQ_FUA; |
| 288 | else |
| 289 | dio->flags &= ~IOMAP_DIO_WRITE_FUA; |
| 290 | task_io_account_write(n); |
| 291 | } else { |
| 292 | bio->bi_opf = REQ_OP_READ; |
| 293 | if (dio->flags & IOMAP_DIO_DIRTY) |
| 294 | bio_set_pages_dirty(bio); |
| 295 | } |
| 296 | |
| 297 | iov_iter_advance(dio->submit.iter, n); |
| 298 | |
| 299 | dio->size += n; |
| 300 | pos += n; |
| 301 | copied += n; |
| 302 | |
| 303 | nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); |
| 304 | iomap_dio_submit_bio(dio, iomap, bio); |
| 305 | } while (nr_pages); |
| 306 | |
| 307 | /* |
| 308 | * We need to zeroout the tail of a sub-block write if the extent type |
| 309 | * requires zeroing or the write extends beyond EOF. If we don't zero |
| 310 | * the block tail in the latter case, we can expose stale data via mmap |
| 311 | * reads of the EOF block. |
| 312 | */ |
| 313 | zero_tail: |
| 314 | if (need_zeroout || |
| 315 | ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) { |
| 316 | /* zero out from the end of the write to the end of the block */ |
| 317 | pad = pos & (fs_block_size - 1); |
| 318 | if (pad) |
| 319 | iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); |
| 320 | } |
Jan Stancek | e9f930a | 2019-11-11 12:58:24 -0800 | [diff] [blame] | 321 | if (copied) |
| 322 | return copied; |
| 323 | return ret; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 324 | } |
| 325 | |
| 326 | static loff_t |
| 327 | iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio) |
| 328 | { |
| 329 | length = iov_iter_zero(length, dio->submit.iter); |
| 330 | dio->size += length; |
| 331 | return length; |
| 332 | } |
| 333 | |
| 334 | static loff_t |
| 335 | iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length, |
| 336 | struct iomap_dio *dio, struct iomap *iomap) |
| 337 | { |
| 338 | struct iov_iter *iter = dio->submit.iter; |
| 339 | size_t copied; |
| 340 | |
| 341 | BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data)); |
| 342 | |
| 343 | if (dio->flags & IOMAP_DIO_WRITE) { |
| 344 | loff_t size = inode->i_size; |
| 345 | |
| 346 | if (pos > size) |
| 347 | memset(iomap->inline_data + size, 0, pos - size); |
| 348 | copied = copy_from_iter(iomap->inline_data + pos, length, iter); |
| 349 | if (copied) { |
| 350 | if (pos + copied > size) |
| 351 | i_size_write(inode, pos + copied); |
| 352 | mark_inode_dirty(inode); |
| 353 | } |
| 354 | } else { |
| 355 | copied = copy_to_iter(iomap->inline_data + pos, length, iter); |
| 356 | } |
| 357 | dio->size += copied; |
| 358 | return copied; |
| 359 | } |
| 360 | |
| 361 | static loff_t |
| 362 | iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, |
Goldwyn Rodrigues | c039b99 | 2019-10-18 16:44:10 -0700 | [diff] [blame] | 363 | void *data, struct iomap *iomap, struct iomap *srcmap) |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 364 | { |
| 365 | struct iomap_dio *dio = data; |
| 366 | |
| 367 | switch (iomap->type) { |
| 368 | case IOMAP_HOLE: |
| 369 | if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) |
| 370 | return -EIO; |
| 371 | return iomap_dio_hole_actor(length, dio); |
| 372 | case IOMAP_UNWRITTEN: |
| 373 | if (!(dio->flags & IOMAP_DIO_WRITE)) |
| 374 | return iomap_dio_hole_actor(length, dio); |
| 375 | return iomap_dio_bio_actor(inode, pos, length, dio, iomap); |
| 376 | case IOMAP_MAPPED: |
| 377 | return iomap_dio_bio_actor(inode, pos, length, dio, iomap); |
| 378 | case IOMAP_INLINE: |
| 379 | return iomap_dio_inline_actor(inode, pos, length, dio, iomap); |
| 380 | default: |
| 381 | WARN_ON_ONCE(1); |
| 382 | return -EIO; |
| 383 | } |
| 384 | } |
| 385 | |
| 386 | /* |
| 387 | * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO |
| 388 | * is being issued as AIO or not. This allows us to optimise pure data writes |
| 389 | * to use REQ_FUA rather than requiring generic_write_sync() to issue a |
| 390 | * REQ_FLUSH post write. This is slightly tricky because a single request here |
| 391 | * can be mapped into multiple disjoint IOs and only a subset of the IOs issued |
| 392 | * may be pure data writes. In that case, we still need to do a full data sync |
| 393 | * completion. |
| 394 | */ |
| 395 | ssize_t |
| 396 | iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, |
Jan Kara | 13ef954 | 2019-10-15 08:43:42 -0700 | [diff] [blame] | 397 | const struct iomap_ops *ops, const struct iomap_dio_ops *dops, |
| 398 | bool wait_for_completion) |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 399 | { |
| 400 | struct address_space *mapping = iocb->ki_filp->f_mapping; |
| 401 | struct inode *inode = file_inode(iocb->ki_filp); |
| 402 | size_t count = iov_iter_count(iter); |
| 403 | loff_t pos = iocb->ki_pos, start = pos; |
| 404 | loff_t end = iocb->ki_pos + count - 1, ret = 0; |
| 405 | unsigned int flags = IOMAP_DIRECT; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 406 | struct blk_plug plug; |
| 407 | struct iomap_dio *dio; |
| 408 | |
| 409 | lockdep_assert_held(&inode->i_rwsem); |
| 410 | |
| 411 | if (!count) |
| 412 | return 0; |
| 413 | |
Jan Kara | 13ef954 | 2019-10-15 08:43:42 -0700 | [diff] [blame] | 414 | if (WARN_ON(is_sync_kiocb(iocb) && !wait_for_completion)) |
| 415 | return -EIO; |
| 416 | |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 417 | dio = kmalloc(sizeof(*dio), GFP_KERNEL); |
| 418 | if (!dio) |
| 419 | return -ENOMEM; |
| 420 | |
| 421 | dio->iocb = iocb; |
| 422 | atomic_set(&dio->ref, 1); |
| 423 | dio->size = 0; |
| 424 | dio->i_size = i_size_read(inode); |
Christoph Hellwig | 838c4f3 | 2019-09-19 15:32:45 -0700 | [diff] [blame] | 425 | dio->dops = dops; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 426 | dio->error = 0; |
| 427 | dio->flags = 0; |
| 428 | |
| 429 | dio->submit.iter = iter; |
| 430 | dio->submit.waiter = current; |
| 431 | dio->submit.cookie = BLK_QC_T_NONE; |
| 432 | dio->submit.last_queue = NULL; |
| 433 | |
| 434 | if (iov_iter_rw(iter) == READ) { |
| 435 | if (pos >= dio->i_size) |
| 436 | goto out_free_dio; |
| 437 | |
Joseph Qi | a901004 | 2019-10-29 09:51:24 -0700 | [diff] [blame] | 438 | if (iter_is_iovec(iter)) |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 439 | dio->flags |= IOMAP_DIO_DIRTY; |
| 440 | } else { |
| 441 | flags |= IOMAP_WRITE; |
| 442 | dio->flags |= IOMAP_DIO_WRITE; |
| 443 | |
| 444 | /* for data sync or sync, we need sync completion processing */ |
| 445 | if (iocb->ki_flags & IOCB_DSYNC) |
| 446 | dio->flags |= IOMAP_DIO_NEED_SYNC; |
| 447 | |
| 448 | /* |
| 449 | * For datasync only writes, we optimistically try using FUA for |
| 450 | * this IO. Any non-FUA write that occurs will clear this flag, |
| 451 | * hence we know before completion whether a cache flush is |
| 452 | * necessary. |
| 453 | */ |
| 454 | if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC) |
| 455 | dio->flags |= IOMAP_DIO_WRITE_FUA; |
| 456 | } |
| 457 | |
| 458 | if (iocb->ki_flags & IOCB_NOWAIT) { |
| 459 | if (filemap_range_has_page(mapping, start, end)) { |
| 460 | ret = -EAGAIN; |
| 461 | goto out_free_dio; |
| 462 | } |
| 463 | flags |= IOMAP_NOWAIT; |
| 464 | } |
| 465 | |
| 466 | ret = filemap_write_and_wait_range(mapping, start, end); |
| 467 | if (ret) |
| 468 | goto out_free_dio; |
| 469 | |
| 470 | /* |
| 471 | * Try to invalidate cache pages for the range we're direct |
| 472 | * writing. If this invalidation fails, tough, the write will |
| 473 | * still work, but racing two incompatible write paths is a |
| 474 | * pretty crazy thing to do, so we don't support it 100%. |
| 475 | */ |
| 476 | ret = invalidate_inode_pages2_range(mapping, |
| 477 | start >> PAGE_SHIFT, end >> PAGE_SHIFT); |
| 478 | if (ret) |
| 479 | dio_warn_stale_pagecache(iocb->ki_filp); |
| 480 | ret = 0; |
| 481 | |
| 482 | if (iov_iter_rw(iter) == WRITE && !wait_for_completion && |
| 483 | !inode->i_sb->s_dio_done_wq) { |
| 484 | ret = sb_init_dio_done_wq(inode->i_sb); |
| 485 | if (ret < 0) |
| 486 | goto out_free_dio; |
| 487 | } |
| 488 | |
| 489 | inode_dio_begin(inode); |
| 490 | |
| 491 | blk_start_plug(&plug); |
| 492 | do { |
| 493 | ret = iomap_apply(inode, pos, count, flags, ops, dio, |
| 494 | iomap_dio_actor); |
| 495 | if (ret <= 0) { |
| 496 | /* magic error code to fall back to buffered I/O */ |
| 497 | if (ret == -ENOTBLK) { |
| 498 | wait_for_completion = true; |
| 499 | ret = 0; |
| 500 | } |
| 501 | break; |
| 502 | } |
| 503 | pos += ret; |
| 504 | |
Jan Kara | 419e9c3 | 2019-11-21 16:14:38 -0800 | [diff] [blame^] | 505 | if (iov_iter_rw(iter) == READ && pos >= dio->i_size) { |
| 506 | /* |
| 507 | * We only report that we've read data up to i_size. |
| 508 | * Revert iter to a state corresponding to that as |
| 509 | * some callers (such as splice code) rely on it. |
| 510 | */ |
| 511 | iov_iter_revert(iter, pos - dio->i_size); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 512 | break; |
Jan Kara | 419e9c3 | 2019-11-21 16:14:38 -0800 | [diff] [blame^] | 513 | } |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 514 | } while ((count = iov_iter_count(iter)) > 0); |
| 515 | blk_finish_plug(&plug); |
| 516 | |
| 517 | if (ret < 0) |
| 518 | iomap_dio_set_error(dio, ret); |
| 519 | |
| 520 | /* |
| 521 | * If all the writes we issued were FUA, we don't need to flush the |
| 522 | * cache on IO completion. Clear the sync flag for this case. |
| 523 | */ |
| 524 | if (dio->flags & IOMAP_DIO_WRITE_FUA) |
| 525 | dio->flags &= ~IOMAP_DIO_NEED_SYNC; |
| 526 | |
| 527 | WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie); |
| 528 | WRITE_ONCE(iocb->private, dio->submit.last_queue); |
| 529 | |
| 530 | /* |
| 531 | * We are about to drop our additional submission reference, which |
| 532 | * might be the last reference to the dio. There are three three |
| 533 | * different ways we can progress here: |
| 534 | * |
| 535 | * (a) If this is the last reference we will always complete and free |
| 536 | * the dio ourselves. |
| 537 | * (b) If this is not the last reference, and we serve an asynchronous |
| 538 | * iocb, we must never touch the dio after the decrement, the |
| 539 | * I/O completion handler will complete and free it. |
| 540 | * (c) If this is not the last reference, but we serve a synchronous |
| 541 | * iocb, the I/O completion handler will wake us up on the drop |
| 542 | * of the final reference, and we will complete and free it here |
| 543 | * after we got woken by the I/O completion handler. |
| 544 | */ |
| 545 | dio->wait_for_completion = wait_for_completion; |
| 546 | if (!atomic_dec_and_test(&dio->ref)) { |
| 547 | if (!wait_for_completion) |
| 548 | return -EIOCBQUEUED; |
| 549 | |
| 550 | for (;;) { |
| 551 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 552 | if (!READ_ONCE(dio->submit.waiter)) |
| 553 | break; |
| 554 | |
| 555 | if (!(iocb->ki_flags & IOCB_HIPRI) || |
| 556 | !dio->submit.last_queue || |
| 557 | !blk_poll(dio->submit.last_queue, |
| 558 | dio->submit.cookie, true)) |
| 559 | io_schedule(); |
| 560 | } |
| 561 | __set_current_state(TASK_RUNNING); |
| 562 | } |
| 563 | |
| 564 | return iomap_dio_complete(dio); |
| 565 | |
| 566 | out_free_dio: |
| 567 | kfree(dio); |
| 568 | return ret; |
| 569 | } |
| 570 | EXPORT_SYMBOL_GPL(iomap_dio_rw); |