Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2010 Red Hat, Inc. |
| 4 | * Copyright (c) 2016-2018 Christoph Hellwig. |
| 5 | */ |
| 6 | #include <linux/module.h> |
| 7 | #include <linux/compiler.h> |
| 8 | #include <linux/fs.h> |
| 9 | #include <linux/iomap.h> |
| 10 | #include <linux/backing-dev.h> |
| 11 | #include <linux/uio.h> |
| 12 | #include <linux/task_io_accounting_ops.h> |
Christoph Hellwig | 60263d5 | 2020-07-23 22:45:59 -0700 | [diff] [blame] | 13 | #include "trace.h" |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 14 | |
| 15 | #include "../internal.h" |
| 16 | |
| 17 | /* |
| 18 | * Private flags for iomap_dio, must not overlap with the public ones in |
| 19 | * iomap.h: |
| 20 | */ |
| 21 | #define IOMAP_DIO_WRITE_FUA (1 << 28) |
| 22 | #define IOMAP_DIO_NEED_SYNC (1 << 29) |
| 23 | #define IOMAP_DIO_WRITE (1 << 30) |
| 24 | #define IOMAP_DIO_DIRTY (1 << 31) |
| 25 | |
| 26 | struct iomap_dio { |
| 27 | struct kiocb *iocb; |
Christoph Hellwig | 838c4f3 | 2019-09-19 15:32:45 -0700 | [diff] [blame] | 28 | const struct iomap_dio_ops *dops; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 29 | loff_t i_size; |
| 30 | loff_t size; |
| 31 | atomic_t ref; |
| 32 | unsigned flags; |
| 33 | int error; |
| 34 | bool wait_for_completion; |
| 35 | |
| 36 | union { |
| 37 | /* used during submission and for synchronous completion: */ |
| 38 | struct { |
| 39 | struct iov_iter *iter; |
| 40 | struct task_struct *waiter; |
| 41 | struct request_queue *last_queue; |
| 42 | blk_qc_t cookie; |
| 43 | } submit; |
| 44 | |
| 45 | /* used for aio completion: */ |
| 46 | struct { |
| 47 | struct work_struct work; |
| 48 | } aio; |
| 49 | }; |
| 50 | }; |
| 51 | |
| 52 | int iomap_dio_iopoll(struct kiocb *kiocb, bool spin) |
| 53 | { |
| 54 | struct request_queue *q = READ_ONCE(kiocb->private); |
| 55 | |
| 56 | if (!q) |
| 57 | return 0; |
| 58 | return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin); |
| 59 | } |
| 60 | EXPORT_SYMBOL_GPL(iomap_dio_iopoll); |
| 61 | |
| 62 | static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap, |
Goldwyn Rodrigues | 8cecd0b | 2019-05-14 18:54:27 -0500 | [diff] [blame] | 63 | struct bio *bio, loff_t pos) |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 64 | { |
| 65 | atomic_inc(&dio->ref); |
| 66 | |
| 67 | if (dio->iocb->ki_flags & IOCB_HIPRI) |
| 68 | bio_set_polled(bio, dio->iocb); |
| 69 | |
| 70 | dio->submit.last_queue = bdev_get_queue(iomap->bdev); |
Goldwyn Rodrigues | 8cecd0b | 2019-05-14 18:54:27 -0500 | [diff] [blame] | 71 | if (dio->dops && dio->dops->submit_io) |
| 72 | dio->submit.cookie = dio->dops->submit_io( |
| 73 | file_inode(dio->iocb->ki_filp), |
| 74 | iomap, bio, pos); |
| 75 | else |
| 76 | dio->submit.cookie = submit_bio(bio); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 77 | } |
| 78 | |
Christoph Hellwig | c3d4ed1 | 2020-09-28 08:51:08 -0700 | [diff] [blame] | 79 | ssize_t iomap_dio_complete(struct iomap_dio *dio) |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 80 | { |
Christoph Hellwig | 838c4f3 | 2019-09-19 15:32:45 -0700 | [diff] [blame] | 81 | const struct iomap_dio_ops *dops = dio->dops; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 82 | struct kiocb *iocb = dio->iocb; |
| 83 | struct inode *inode = file_inode(iocb->ki_filp); |
| 84 | loff_t offset = iocb->ki_pos; |
Christoph Hellwig | 838c4f3 | 2019-09-19 15:32:45 -0700 | [diff] [blame] | 85 | ssize_t ret = dio->error; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 86 | |
Christoph Hellwig | 838c4f3 | 2019-09-19 15:32:45 -0700 | [diff] [blame] | 87 | if (dops && dops->end_io) |
| 88 | ret = dops->end_io(iocb, dio->size, ret, dio->flags); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 89 | |
| 90 | if (likely(!ret)) { |
| 91 | ret = dio->size; |
| 92 | /* check for short read */ |
| 93 | if (offset + ret > dio->i_size && |
| 94 | !(dio->flags & IOMAP_DIO_WRITE)) |
| 95 | ret = dio->i_size - offset; |
| 96 | iocb->ki_pos += ret; |
| 97 | } |
| 98 | |
| 99 | /* |
| 100 | * Try again to invalidate clean pages which might have been cached by |
| 101 | * non-direct readahead, or faulted in by get_user_pages() if the source |
| 102 | * of the write was an mmap'ed region of the file we're writing. Either |
| 103 | * one is a pretty crazy thing to do, so we don't support it 100%. If |
| 104 | * this invalidation fails, tough, the write still worked... |
| 105 | * |
Christoph Hellwig | 838c4f3 | 2019-09-19 15:32:45 -0700 | [diff] [blame] | 106 | * And this page cache invalidation has to be after ->end_io(), as some |
| 107 | * filesystems convert unwritten extents to real allocations in |
| 108 | * ->end_io() when necessary, otherwise a racing buffer read would cache |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 109 | * zeros from unwritten extents. |
| 110 | */ |
Andreas Gruenbacher | c114bbc | 2020-09-10 08:26:16 -0700 | [diff] [blame] | 111 | if (!dio->error && dio->size && |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 112 | (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { |
| 113 | int err; |
| 114 | err = invalidate_inode_pages2_range(inode->i_mapping, |
| 115 | offset >> PAGE_SHIFT, |
| 116 | (offset + dio->size - 1) >> PAGE_SHIFT); |
| 117 | if (err) |
| 118 | dio_warn_stale_pagecache(iocb->ki_filp); |
| 119 | } |
| 120 | |
Goldwyn Rodrigues | 1a31182 | 2020-09-28 08:51:08 -0700 | [diff] [blame] | 121 | inode_dio_end(file_inode(iocb->ki_filp)); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 122 | /* |
| 123 | * If this is a DSYNC write, make sure we push it to stable storage now |
| 124 | * that we've written data. |
| 125 | */ |
| 126 | if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC)) |
| 127 | ret = generic_write_sync(iocb, ret); |
| 128 | |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 129 | kfree(dio); |
| 130 | |
| 131 | return ret; |
| 132 | } |
Christoph Hellwig | c3d4ed1 | 2020-09-28 08:51:08 -0700 | [diff] [blame] | 133 | EXPORT_SYMBOL_GPL(iomap_dio_complete); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 134 | |
| 135 | static void iomap_dio_complete_work(struct work_struct *work) |
| 136 | { |
| 137 | struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); |
| 138 | struct kiocb *iocb = dio->iocb; |
| 139 | |
| 140 | iocb->ki_complete(iocb, iomap_dio_complete(dio), 0); |
| 141 | } |
| 142 | |
| 143 | /* |
| 144 | * Set an error in the dio if none is set yet. We have to use cmpxchg |
| 145 | * as the submission context and the completion context(s) can race to |
| 146 | * update the error. |
| 147 | */ |
| 148 | static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) |
| 149 | { |
| 150 | cmpxchg(&dio->error, 0, ret); |
| 151 | } |
| 152 | |
| 153 | static void iomap_dio_bio_end_io(struct bio *bio) |
| 154 | { |
| 155 | struct iomap_dio *dio = bio->bi_private; |
| 156 | bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); |
| 157 | |
| 158 | if (bio->bi_status) |
| 159 | iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); |
| 160 | |
| 161 | if (atomic_dec_and_test(&dio->ref)) { |
| 162 | if (dio->wait_for_completion) { |
| 163 | struct task_struct *waiter = dio->submit.waiter; |
| 164 | WRITE_ONCE(dio->submit.waiter, NULL); |
| 165 | blk_wake_io_task(waiter); |
| 166 | } else if (dio->flags & IOMAP_DIO_WRITE) { |
| 167 | struct inode *inode = file_inode(dio->iocb->ki_filp); |
| 168 | |
| 169 | INIT_WORK(&dio->aio.work, iomap_dio_complete_work); |
| 170 | queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); |
| 171 | } else { |
| 172 | iomap_dio_complete_work(&dio->aio.work); |
| 173 | } |
| 174 | } |
| 175 | |
| 176 | if (should_dirty) { |
| 177 | bio_check_pages_dirty(bio); |
| 178 | } else { |
| 179 | bio_release_pages(bio, false); |
| 180 | bio_put(bio); |
| 181 | } |
| 182 | } |
| 183 | |
| 184 | static void |
| 185 | iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, |
| 186 | unsigned len) |
| 187 | { |
| 188 | struct page *page = ZERO_PAGE(0); |
| 189 | int flags = REQ_SYNC | REQ_IDLE; |
| 190 | struct bio *bio; |
| 191 | |
| 192 | bio = bio_alloc(GFP_KERNEL, 1); |
| 193 | bio_set_dev(bio, iomap->bdev); |
| 194 | bio->bi_iter.bi_sector = iomap_sector(iomap, pos); |
| 195 | bio->bi_private = dio; |
| 196 | bio->bi_end_io = iomap_dio_bio_end_io; |
| 197 | |
| 198 | get_page(page); |
| 199 | __bio_add_page(bio, page, len, 0); |
| 200 | bio_set_op_attrs(bio, REQ_OP_WRITE, flags); |
Goldwyn Rodrigues | 8cecd0b | 2019-05-14 18:54:27 -0500 | [diff] [blame] | 201 | iomap_dio_submit_bio(dio, iomap, bio, pos); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 202 | } |
| 203 | |
Naohiro Aota | c3b0e88 | 2021-02-04 19:21:41 +0900 | [diff] [blame] | 204 | /* |
| 205 | * Figure out the bio's operation flags from the dio request, the |
| 206 | * mapping, and whether or not we want FUA. Note that we can end up |
| 207 | * clearing the WRITE_FUA flag in the dio request. |
| 208 | */ |
| 209 | static inline unsigned int |
| 210 | iomap_dio_bio_opflags(struct iomap_dio *dio, struct iomap *iomap, bool use_fua) |
| 211 | { |
| 212 | unsigned int opflags = REQ_SYNC | REQ_IDLE; |
| 213 | |
| 214 | if (!(dio->flags & IOMAP_DIO_WRITE)) { |
| 215 | WARN_ON_ONCE(iomap->flags & IOMAP_F_ZONE_APPEND); |
| 216 | return REQ_OP_READ; |
| 217 | } |
| 218 | |
| 219 | if (iomap->flags & IOMAP_F_ZONE_APPEND) |
| 220 | opflags |= REQ_OP_ZONE_APPEND; |
| 221 | else |
| 222 | opflags |= REQ_OP_WRITE; |
| 223 | |
| 224 | if (use_fua) |
| 225 | opflags |= REQ_FUA; |
| 226 | else |
| 227 | dio->flags &= ~IOMAP_DIO_WRITE_FUA; |
| 228 | |
| 229 | return opflags; |
| 230 | } |
| 231 | |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 232 | static loff_t |
| 233 | iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, |
| 234 | struct iomap_dio *dio, struct iomap *iomap) |
| 235 | { |
| 236 | unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); |
| 237 | unsigned int fs_block_size = i_blocksize(inode), pad; |
| 238 | unsigned int align = iov_iter_alignment(dio->submit.iter); |
Naohiro Aota | c3b0e88 | 2021-02-04 19:21:41 +0900 | [diff] [blame] | 239 | unsigned int bio_opf; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 240 | struct bio *bio; |
| 241 | bool need_zeroout = false; |
| 242 | bool use_fua = false; |
| 243 | int nr_pages, ret = 0; |
| 244 | size_t copied = 0; |
Jan Kara | f550ee9 | 2019-11-26 09:28:47 -0800 | [diff] [blame] | 245 | size_t orig_count; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 246 | |
| 247 | if ((pos | length | align) & ((1 << blkbits) - 1)) |
| 248 | return -EINVAL; |
| 249 | |
| 250 | if (iomap->type == IOMAP_UNWRITTEN) { |
| 251 | dio->flags |= IOMAP_DIO_UNWRITTEN; |
| 252 | need_zeroout = true; |
| 253 | } |
| 254 | |
| 255 | if (iomap->flags & IOMAP_F_SHARED) |
| 256 | dio->flags |= IOMAP_DIO_COW; |
| 257 | |
| 258 | if (iomap->flags & IOMAP_F_NEW) { |
| 259 | need_zeroout = true; |
| 260 | } else if (iomap->type == IOMAP_MAPPED) { |
| 261 | /* |
| 262 | * Use a FUA write if we need datasync semantics, this is a pure |
| 263 | * data IO that doesn't require any metadata updates (including |
| 264 | * after IO completion such as unwritten extent conversion) and |
| 265 | * the underlying device supports FUA. This allows us to avoid |
| 266 | * cache flushes on IO completion. |
| 267 | */ |
| 268 | if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && |
| 269 | (dio->flags & IOMAP_DIO_WRITE_FUA) && |
| 270 | blk_queue_fua(bdev_get_queue(iomap->bdev))) |
| 271 | use_fua = true; |
| 272 | } |
| 273 | |
| 274 | /* |
Jan Kara | f550ee9 | 2019-11-26 09:28:47 -0800 | [diff] [blame] | 275 | * Save the original count and trim the iter to just the extent we |
| 276 | * are operating on right now. The iter will be re-expanded once |
| 277 | * we are done. |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 278 | */ |
Jan Kara | f550ee9 | 2019-11-26 09:28:47 -0800 | [diff] [blame] | 279 | orig_count = iov_iter_count(dio->submit.iter); |
| 280 | iov_iter_truncate(dio->submit.iter, length); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 281 | |
Pavel Begunkov | 3e1a88e | 2021-01-09 16:03:02 +0000 | [diff] [blame] | 282 | if (!iov_iter_count(dio->submit.iter)) |
Jan Kara | f550ee9 | 2019-11-26 09:28:47 -0800 | [diff] [blame] | 283 | goto out; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 284 | |
| 285 | if (need_zeroout) { |
| 286 | /* zero out from the start of the block to the write offset */ |
| 287 | pad = pos & (fs_block_size - 1); |
| 288 | if (pad) |
| 289 | iomap_dio_zero(dio, iomap, pos - pad, pad); |
| 290 | } |
| 291 | |
Naohiro Aota | c3b0e88 | 2021-02-04 19:21:41 +0900 | [diff] [blame] | 292 | /* |
| 293 | * Set the operation flags early so that bio_iov_iter_get_pages |
| 294 | * can set up the page vector appropriately for a ZONE_APPEND |
| 295 | * operation. |
| 296 | */ |
| 297 | bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua); |
| 298 | |
Christoph Hellwig | a8affc0 | 2021-03-11 12:01:37 +0100 | [diff] [blame] | 299 | nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 300 | do { |
| 301 | size_t n; |
| 302 | if (dio->error) { |
| 303 | iov_iter_revert(dio->submit.iter, copied); |
Jan Kara | f550ee9 | 2019-11-26 09:28:47 -0800 | [diff] [blame] | 304 | copied = ret = 0; |
| 305 | goto out; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 306 | } |
| 307 | |
| 308 | bio = bio_alloc(GFP_KERNEL, nr_pages); |
| 309 | bio_set_dev(bio, iomap->bdev); |
| 310 | bio->bi_iter.bi_sector = iomap_sector(iomap, pos); |
| 311 | bio->bi_write_hint = dio->iocb->ki_hint; |
| 312 | bio->bi_ioprio = dio->iocb->ki_ioprio; |
| 313 | bio->bi_private = dio; |
| 314 | bio->bi_end_io = iomap_dio_bio_end_io; |
Naohiro Aota | c3b0e88 | 2021-02-04 19:21:41 +0900 | [diff] [blame] | 315 | bio->bi_opf = bio_opf; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 316 | |
Jan Kara | f550ee9 | 2019-11-26 09:28:47 -0800 | [diff] [blame] | 317 | ret = bio_iov_iter_get_pages(bio, dio->submit.iter); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 318 | if (unlikely(ret)) { |
| 319 | /* |
| 320 | * We have to stop part way through an IO. We must fall |
| 321 | * through to the sub-block tail zeroing here, otherwise |
| 322 | * this short IO may expose stale data in the tail of |
| 323 | * the block we haven't written data to. |
| 324 | */ |
| 325 | bio_put(bio); |
| 326 | goto zero_tail; |
| 327 | } |
| 328 | |
| 329 | n = bio->bi_iter.bi_size; |
| 330 | if (dio->flags & IOMAP_DIO_WRITE) { |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 331 | task_io_account_write(n); |
| 332 | } else { |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 333 | if (dio->flags & IOMAP_DIO_DIRTY) |
| 334 | bio_set_pages_dirty(bio); |
| 335 | } |
| 336 | |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 337 | dio->size += n; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 338 | copied += n; |
| 339 | |
Pavel Begunkov | 3e1a88e | 2021-01-09 16:03:02 +0000 | [diff] [blame] | 340 | nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, |
Christoph Hellwig | a8affc0 | 2021-03-11 12:01:37 +0100 | [diff] [blame] | 341 | BIO_MAX_VECS); |
Goldwyn Rodrigues | 8cecd0b | 2019-05-14 18:54:27 -0500 | [diff] [blame] | 342 | iomap_dio_submit_bio(dio, iomap, bio, pos); |
| 343 | pos += n; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 344 | } while (nr_pages); |
| 345 | |
| 346 | /* |
| 347 | * We need to zeroout the tail of a sub-block write if the extent type |
| 348 | * requires zeroing or the write extends beyond EOF. If we don't zero |
| 349 | * the block tail in the latter case, we can expose stale data via mmap |
| 350 | * reads of the EOF block. |
| 351 | */ |
| 352 | zero_tail: |
| 353 | if (need_zeroout || |
| 354 | ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) { |
| 355 | /* zero out from the end of the write to the end of the block */ |
| 356 | pad = pos & (fs_block_size - 1); |
| 357 | if (pad) |
| 358 | iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); |
| 359 | } |
Jan Kara | f550ee9 | 2019-11-26 09:28:47 -0800 | [diff] [blame] | 360 | out: |
| 361 | /* Undo iter limitation to current extent */ |
| 362 | iov_iter_reexpand(dio->submit.iter, orig_count - copied); |
Jan Stancek | e9f930a | 2019-11-11 12:58:24 -0800 | [diff] [blame] | 363 | if (copied) |
| 364 | return copied; |
| 365 | return ret; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 366 | } |
| 367 | |
| 368 | static loff_t |
| 369 | iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio) |
| 370 | { |
| 371 | length = iov_iter_zero(length, dio->submit.iter); |
| 372 | dio->size += length; |
| 373 | return length; |
| 374 | } |
| 375 | |
| 376 | static loff_t |
| 377 | iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length, |
| 378 | struct iomap_dio *dio, struct iomap *iomap) |
| 379 | { |
| 380 | struct iov_iter *iter = dio->submit.iter; |
| 381 | size_t copied; |
| 382 | |
| 383 | BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data)); |
| 384 | |
| 385 | if (dio->flags & IOMAP_DIO_WRITE) { |
| 386 | loff_t size = inode->i_size; |
| 387 | |
| 388 | if (pos > size) |
| 389 | memset(iomap->inline_data + size, 0, pos - size); |
| 390 | copied = copy_from_iter(iomap->inline_data + pos, length, iter); |
| 391 | if (copied) { |
| 392 | if (pos + copied > size) |
| 393 | i_size_write(inode, pos + copied); |
| 394 | mark_inode_dirty(inode); |
| 395 | } |
| 396 | } else { |
| 397 | copied = copy_to_iter(iomap->inline_data + pos, length, iter); |
| 398 | } |
| 399 | dio->size += copied; |
| 400 | return copied; |
| 401 | } |
| 402 | |
| 403 | static loff_t |
| 404 | iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, |
Goldwyn Rodrigues | c039b99 | 2019-10-18 16:44:10 -0700 | [diff] [blame] | 405 | void *data, struct iomap *iomap, struct iomap *srcmap) |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 406 | { |
| 407 | struct iomap_dio *dio = data; |
| 408 | |
| 409 | switch (iomap->type) { |
| 410 | case IOMAP_HOLE: |
| 411 | if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) |
| 412 | return -EIO; |
| 413 | return iomap_dio_hole_actor(length, dio); |
| 414 | case IOMAP_UNWRITTEN: |
| 415 | if (!(dio->flags & IOMAP_DIO_WRITE)) |
| 416 | return iomap_dio_hole_actor(length, dio); |
| 417 | return iomap_dio_bio_actor(inode, pos, length, dio, iomap); |
| 418 | case IOMAP_MAPPED: |
| 419 | return iomap_dio_bio_actor(inode, pos, length, dio, iomap); |
| 420 | case IOMAP_INLINE: |
| 421 | return iomap_dio_inline_actor(inode, pos, length, dio, iomap); |
Qian Cai | a805c11 | 2020-09-10 08:26:15 -0700 | [diff] [blame] | 422 | case IOMAP_DELALLOC: |
| 423 | /* |
| 424 | * DIO is not serialised against mmap() access at all, and so |
| 425 | * if the page_mkwrite occurs between the writeback and the |
| 426 | * iomap_apply() call in the DIO path, then it will see the |
| 427 | * DELALLOC block that the page-mkwrite allocated. |
| 428 | */ |
| 429 | pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n", |
| 430 | dio->iocb->ki_filp, current->comm); |
| 431 | return -EIO; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 432 | default: |
| 433 | WARN_ON_ONCE(1); |
| 434 | return -EIO; |
| 435 | } |
| 436 | } |
| 437 | |
| 438 | /* |
| 439 | * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO |
| 440 | * is being issued as AIO or not. This allows us to optimise pure data writes |
| 441 | * to use REQ_FUA rather than requiring generic_write_sync() to issue a |
| 442 | * REQ_FLUSH post write. This is slightly tricky because a single request here |
| 443 | * can be mapped into multiple disjoint IOs and only a subset of the IOs issued |
| 444 | * may be pure data writes. In that case, we still need to do a full data sync |
| 445 | * completion. |
Christoph Hellwig | 60263d5 | 2020-07-23 22:45:59 -0700 | [diff] [blame] | 446 | * |
| 447 | * Returns -ENOTBLK In case of a page invalidation invalidation failure for |
| 448 | * writes. The callers needs to fall back to buffered I/O in this case. |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 449 | */ |
Christoph Hellwig | c3d4ed1 | 2020-09-28 08:51:08 -0700 | [diff] [blame] | 450 | struct iomap_dio * |
| 451 | __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, |
Jan Kara | 13ef954 | 2019-10-15 08:43:42 -0700 | [diff] [blame] | 452 | const struct iomap_ops *ops, const struct iomap_dio_ops *dops, |
Christoph Hellwig | 2f63296 | 2021-01-23 10:06:09 -0800 | [diff] [blame] | 453 | unsigned int dio_flags) |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 454 | { |
| 455 | struct address_space *mapping = iocb->ki_filp->f_mapping; |
| 456 | struct inode *inode = file_inode(iocb->ki_filp); |
| 457 | size_t count = iov_iter_count(iter); |
Johannes Thumshirn | 88cfd30 | 2019-11-26 09:28:47 -0800 | [diff] [blame] | 458 | loff_t pos = iocb->ki_pos; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 459 | loff_t end = iocb->ki_pos + count - 1, ret = 0; |
Christoph Hellwig | 2f63296 | 2021-01-23 10:06:09 -0800 | [diff] [blame] | 460 | bool wait_for_completion = |
| 461 | is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT); |
Christoph Hellwig | 5724be5 | 2021-01-23 10:06:09 -0800 | [diff] [blame] | 462 | unsigned int iomap_flags = IOMAP_DIRECT; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 463 | struct blk_plug plug; |
| 464 | struct iomap_dio *dio; |
| 465 | |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 466 | if (!count) |
Christoph Hellwig | c3d4ed1 | 2020-09-28 08:51:08 -0700 | [diff] [blame] | 467 | return NULL; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 468 | |
| 469 | dio = kmalloc(sizeof(*dio), GFP_KERNEL); |
| 470 | if (!dio) |
Christoph Hellwig | c3d4ed1 | 2020-09-28 08:51:08 -0700 | [diff] [blame] | 471 | return ERR_PTR(-ENOMEM); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 472 | |
| 473 | dio->iocb = iocb; |
| 474 | atomic_set(&dio->ref, 1); |
| 475 | dio->size = 0; |
| 476 | dio->i_size = i_size_read(inode); |
Christoph Hellwig | 838c4f3 | 2019-09-19 15:32:45 -0700 | [diff] [blame] | 477 | dio->dops = dops; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 478 | dio->error = 0; |
| 479 | dio->flags = 0; |
| 480 | |
| 481 | dio->submit.iter = iter; |
| 482 | dio->submit.waiter = current; |
| 483 | dio->submit.cookie = BLK_QC_T_NONE; |
| 484 | dio->submit.last_queue = NULL; |
| 485 | |
| 486 | if (iov_iter_rw(iter) == READ) { |
| 487 | if (pos >= dio->i_size) |
| 488 | goto out_free_dio; |
| 489 | |
Jens Axboe | 985b71d | 2021-04-29 22:55:24 -0700 | [diff] [blame] | 490 | if (iocb->ki_flags & IOCB_NOWAIT) { |
| 491 | if (filemap_range_needs_writeback(mapping, pos, end)) { |
| 492 | ret = -EAGAIN; |
| 493 | goto out_free_dio; |
| 494 | } |
| 495 | iomap_flags |= IOMAP_NOWAIT; |
| 496 | } |
| 497 | |
Joseph Qi | a901004 | 2019-10-29 09:51:24 -0700 | [diff] [blame] | 498 | if (iter_is_iovec(iter)) |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 499 | dio->flags |= IOMAP_DIO_DIRTY; |
| 500 | } else { |
Christoph Hellwig | 5724be5 | 2021-01-23 10:06:09 -0800 | [diff] [blame] | 501 | iomap_flags |= IOMAP_WRITE; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 502 | dio->flags |= IOMAP_DIO_WRITE; |
| 503 | |
Jens Axboe | 985b71d | 2021-04-29 22:55:24 -0700 | [diff] [blame] | 504 | if (iocb->ki_flags & IOCB_NOWAIT) { |
| 505 | if (filemap_range_has_page(mapping, pos, end)) { |
| 506 | ret = -EAGAIN; |
| 507 | goto out_free_dio; |
| 508 | } |
| 509 | iomap_flags |= IOMAP_NOWAIT; |
| 510 | } |
| 511 | |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 512 | /* for data sync or sync, we need sync completion processing */ |
| 513 | if (iocb->ki_flags & IOCB_DSYNC) |
| 514 | dio->flags |= IOMAP_DIO_NEED_SYNC; |
| 515 | |
| 516 | /* |
| 517 | * For datasync only writes, we optimistically try using FUA for |
| 518 | * this IO. Any non-FUA write that occurs will clear this flag, |
| 519 | * hence we know before completion whether a cache flush is |
| 520 | * necessary. |
| 521 | */ |
| 522 | if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC) |
| 523 | dio->flags |= IOMAP_DIO_WRITE_FUA; |
| 524 | } |
| 525 | |
Christoph Hellwig | 213f627 | 2021-01-23 10:06:10 -0800 | [diff] [blame] | 526 | if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) { |
| 527 | ret = -EAGAIN; |
| 528 | if (pos >= dio->i_size || pos + count > dio->i_size) |
| 529 | goto out_free_dio; |
| 530 | iomap_flags |= IOMAP_OVERWRITE_ONLY; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 531 | } |
| 532 | |
Johannes Thumshirn | 88cfd30 | 2019-11-26 09:28:47 -0800 | [diff] [blame] | 533 | ret = filemap_write_and_wait_range(mapping, pos, end); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 534 | if (ret) |
| 535 | goto out_free_dio; |
| 536 | |
Dave Chinner | 54752de | 2020-07-23 22:45:58 -0700 | [diff] [blame] | 537 | if (iov_iter_rw(iter) == WRITE) { |
| 538 | /* |
| 539 | * Try to invalidate cache pages for the range we are writing. |
Christoph Hellwig | 60263d5 | 2020-07-23 22:45:59 -0700 | [diff] [blame] | 540 | * If this invalidation fails, let the caller fall back to |
| 541 | * buffered I/O. |
Dave Chinner | 54752de | 2020-07-23 22:45:58 -0700 | [diff] [blame] | 542 | */ |
| 543 | if (invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, |
Christoph Hellwig | 60263d5 | 2020-07-23 22:45:59 -0700 | [diff] [blame] | 544 | end >> PAGE_SHIFT)) { |
| 545 | trace_iomap_dio_invalidate_fail(inode, pos, count); |
| 546 | ret = -ENOTBLK; |
| 547 | goto out_free_dio; |
| 548 | } |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 549 | |
Dave Chinner | 54752de | 2020-07-23 22:45:58 -0700 | [diff] [blame] | 550 | if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) { |
| 551 | ret = sb_init_dio_done_wq(inode->i_sb); |
| 552 | if (ret < 0) |
| 553 | goto out_free_dio; |
| 554 | } |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 555 | } |
| 556 | |
| 557 | inode_dio_begin(inode); |
| 558 | |
| 559 | blk_start_plug(&plug); |
| 560 | do { |
Christoph Hellwig | 5724be5 | 2021-01-23 10:06:09 -0800 | [diff] [blame] | 561 | ret = iomap_apply(inode, pos, count, iomap_flags, ops, dio, |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 562 | iomap_dio_actor); |
| 563 | if (ret <= 0) { |
| 564 | /* magic error code to fall back to buffered I/O */ |
| 565 | if (ret == -ENOTBLK) { |
| 566 | wait_for_completion = true; |
| 567 | ret = 0; |
| 568 | } |
| 569 | break; |
| 570 | } |
| 571 | pos += ret; |
| 572 | |
Jan Kara | 419e9c3 | 2019-11-21 16:14:38 -0800 | [diff] [blame] | 573 | if (iov_iter_rw(iter) == READ && pos >= dio->i_size) { |
| 574 | /* |
| 575 | * We only report that we've read data up to i_size. |
| 576 | * Revert iter to a state corresponding to that as |
| 577 | * some callers (such as splice code) rely on it. |
| 578 | */ |
| 579 | iov_iter_revert(iter, pos - dio->i_size); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 580 | break; |
Jan Kara | 419e9c3 | 2019-11-21 16:14:38 -0800 | [diff] [blame] | 581 | } |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 582 | } while ((count = iov_iter_count(iter)) > 0); |
| 583 | blk_finish_plug(&plug); |
| 584 | |
| 585 | if (ret < 0) |
| 586 | iomap_dio_set_error(dio, ret); |
| 587 | |
| 588 | /* |
| 589 | * If all the writes we issued were FUA, we don't need to flush the |
| 590 | * cache on IO completion. Clear the sync flag for this case. |
| 591 | */ |
| 592 | if (dio->flags & IOMAP_DIO_WRITE_FUA) |
| 593 | dio->flags &= ~IOMAP_DIO_NEED_SYNC; |
| 594 | |
| 595 | WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie); |
| 596 | WRITE_ONCE(iocb->private, dio->submit.last_queue); |
| 597 | |
| 598 | /* |
| 599 | * We are about to drop our additional submission reference, which |
yangerkun | d9973ce2 | 2020-03-18 08:04:36 -0700 | [diff] [blame] | 600 | * might be the last reference to the dio. There are three different |
| 601 | * ways we can progress here: |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 602 | * |
| 603 | * (a) If this is the last reference we will always complete and free |
| 604 | * the dio ourselves. |
| 605 | * (b) If this is not the last reference, and we serve an asynchronous |
| 606 | * iocb, we must never touch the dio after the decrement, the |
| 607 | * I/O completion handler will complete and free it. |
| 608 | * (c) If this is not the last reference, but we serve a synchronous |
| 609 | * iocb, the I/O completion handler will wake us up on the drop |
| 610 | * of the final reference, and we will complete and free it here |
| 611 | * after we got woken by the I/O completion handler. |
| 612 | */ |
| 613 | dio->wait_for_completion = wait_for_completion; |
| 614 | if (!atomic_dec_and_test(&dio->ref)) { |
| 615 | if (!wait_for_completion) |
Christoph Hellwig | c3d4ed1 | 2020-09-28 08:51:08 -0700 | [diff] [blame] | 616 | return ERR_PTR(-EIOCBQUEUED); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 617 | |
| 618 | for (;;) { |
| 619 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 620 | if (!READ_ONCE(dio->submit.waiter)) |
| 621 | break; |
| 622 | |
| 623 | if (!(iocb->ki_flags & IOCB_HIPRI) || |
| 624 | !dio->submit.last_queue || |
| 625 | !blk_poll(dio->submit.last_queue, |
| 626 | dio->submit.cookie, true)) |
Ming Lei | e6249cd | 2020-05-03 09:54:22 +0800 | [diff] [blame] | 627 | blk_io_schedule(); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 628 | } |
| 629 | __set_current_state(TASK_RUNNING); |
| 630 | } |
| 631 | |
Christoph Hellwig | c3d4ed1 | 2020-09-28 08:51:08 -0700 | [diff] [blame] | 632 | return dio; |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 633 | |
| 634 | out_free_dio: |
| 635 | kfree(dio); |
Christoph Hellwig | c3d4ed1 | 2020-09-28 08:51:08 -0700 | [diff] [blame] | 636 | if (ret) |
| 637 | return ERR_PTR(ret); |
| 638 | return NULL; |
| 639 | } |
| 640 | EXPORT_SYMBOL_GPL(__iomap_dio_rw); |
| 641 | |
| 642 | ssize_t |
| 643 | iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, |
| 644 | const struct iomap_ops *ops, const struct iomap_dio_ops *dops, |
Christoph Hellwig | 2f63296 | 2021-01-23 10:06:09 -0800 | [diff] [blame] | 645 | unsigned int dio_flags) |
Christoph Hellwig | c3d4ed1 | 2020-09-28 08:51:08 -0700 | [diff] [blame] | 646 | { |
| 647 | struct iomap_dio *dio; |
| 648 | |
Christoph Hellwig | 2f63296 | 2021-01-23 10:06:09 -0800 | [diff] [blame] | 649 | dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags); |
Christoph Hellwig | c3d4ed1 | 2020-09-28 08:51:08 -0700 | [diff] [blame] | 650 | if (IS_ERR_OR_NULL(dio)) |
| 651 | return PTR_ERR_OR_ZERO(dio); |
| 652 | return iomap_dio_complete(dio); |
Darrick J. Wong | db07443 | 2019-07-15 08:50:59 -0700 | [diff] [blame] | 653 | } |
| 654 | EXPORT_SYMBOL_GPL(iomap_dio_rw); |