blob: 9398b8c31323b39278144bf8cbaa1ba5d0f94180 [file] [log] [blame]
Darrick J. Wongdb074432019-07-15 08:50:59 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
5 */
6#include <linux/module.h>
7#include <linux/compiler.h>
8#include <linux/fs.h>
9#include <linux/iomap.h>
10#include <linux/backing-dev.h>
11#include <linux/uio.h>
12#include <linux/task_io_accounting_ops.h>
Christoph Hellwig60263d52020-07-23 22:45:59 -070013#include "trace.h"
Darrick J. Wongdb074432019-07-15 08:50:59 -070014
15#include "../internal.h"
16
17/*
18 * Private flags for iomap_dio, must not overlap with the public ones in
19 * iomap.h:
20 */
21#define IOMAP_DIO_WRITE_FUA (1 << 28)
22#define IOMAP_DIO_NEED_SYNC (1 << 29)
23#define IOMAP_DIO_WRITE (1 << 30)
24#define IOMAP_DIO_DIRTY (1 << 31)
25
26struct iomap_dio {
27 struct kiocb *iocb;
Christoph Hellwig838c4f32019-09-19 15:32:45 -070028 const struct iomap_dio_ops *dops;
Darrick J. Wongdb074432019-07-15 08:50:59 -070029 loff_t i_size;
30 loff_t size;
31 atomic_t ref;
32 unsigned flags;
33 int error;
34 bool wait_for_completion;
35
36 union {
37 /* used during submission and for synchronous completion: */
38 struct {
39 struct iov_iter *iter;
40 struct task_struct *waiter;
41 struct request_queue *last_queue;
42 blk_qc_t cookie;
43 } submit;
44
45 /* used for aio completion: */
46 struct {
47 struct work_struct work;
48 } aio;
49 };
50};
51
52int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
53{
54 struct request_queue *q = READ_ONCE(kiocb->private);
55
56 if (!q)
57 return 0;
58 return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
59}
60EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
61
62static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
Goldwyn Rodrigues8cecd0b2019-05-14 18:54:27 -050063 struct bio *bio, loff_t pos)
Darrick J. Wongdb074432019-07-15 08:50:59 -070064{
65 atomic_inc(&dio->ref);
66
67 if (dio->iocb->ki_flags & IOCB_HIPRI)
68 bio_set_polled(bio, dio->iocb);
69
70 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
Goldwyn Rodrigues8cecd0b2019-05-14 18:54:27 -050071 if (dio->dops && dio->dops->submit_io)
72 dio->submit.cookie = dio->dops->submit_io(
73 file_inode(dio->iocb->ki_filp),
74 iomap, bio, pos);
75 else
76 dio->submit.cookie = submit_bio(bio);
Darrick J. Wongdb074432019-07-15 08:50:59 -070077}
78
Christoph Hellwigc3d4ed12020-09-28 08:51:08 -070079ssize_t iomap_dio_complete(struct iomap_dio *dio)
Darrick J. Wongdb074432019-07-15 08:50:59 -070080{
Christoph Hellwig838c4f32019-09-19 15:32:45 -070081 const struct iomap_dio_ops *dops = dio->dops;
Darrick J. Wongdb074432019-07-15 08:50:59 -070082 struct kiocb *iocb = dio->iocb;
83 struct inode *inode = file_inode(iocb->ki_filp);
84 loff_t offset = iocb->ki_pos;
Christoph Hellwig838c4f32019-09-19 15:32:45 -070085 ssize_t ret = dio->error;
Darrick J. Wongdb074432019-07-15 08:50:59 -070086
Christoph Hellwig838c4f32019-09-19 15:32:45 -070087 if (dops && dops->end_io)
88 ret = dops->end_io(iocb, dio->size, ret, dio->flags);
Darrick J. Wongdb074432019-07-15 08:50:59 -070089
90 if (likely(!ret)) {
91 ret = dio->size;
92 /* check for short read */
93 if (offset + ret > dio->i_size &&
94 !(dio->flags & IOMAP_DIO_WRITE))
95 ret = dio->i_size - offset;
96 iocb->ki_pos += ret;
97 }
98
99 /*
100 * Try again to invalidate clean pages which might have been cached by
101 * non-direct readahead, or faulted in by get_user_pages() if the source
102 * of the write was an mmap'ed region of the file we're writing. Either
103 * one is a pretty crazy thing to do, so we don't support it 100%. If
104 * this invalidation fails, tough, the write still worked...
105 *
Christoph Hellwig838c4f32019-09-19 15:32:45 -0700106 * And this page cache invalidation has to be after ->end_io(), as some
107 * filesystems convert unwritten extents to real allocations in
108 * ->end_io() when necessary, otherwise a racing buffer read would cache
Darrick J. Wongdb074432019-07-15 08:50:59 -0700109 * zeros from unwritten extents.
110 */
Andreas Gruenbacherc114bbc2020-09-10 08:26:16 -0700111 if (!dio->error && dio->size &&
Darrick J. Wongdb074432019-07-15 08:50:59 -0700112 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
113 int err;
114 err = invalidate_inode_pages2_range(inode->i_mapping,
115 offset >> PAGE_SHIFT,
116 (offset + dio->size - 1) >> PAGE_SHIFT);
117 if (err)
118 dio_warn_stale_pagecache(iocb->ki_filp);
119 }
120
Goldwyn Rodrigues1a311822020-09-28 08:51:08 -0700121 inode_dio_end(file_inode(iocb->ki_filp));
Darrick J. Wongdb074432019-07-15 08:50:59 -0700122 /*
123 * If this is a DSYNC write, make sure we push it to stable storage now
124 * that we've written data.
125 */
126 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
127 ret = generic_write_sync(iocb, ret);
128
Darrick J. Wongdb074432019-07-15 08:50:59 -0700129 kfree(dio);
130
131 return ret;
132}
Christoph Hellwigc3d4ed12020-09-28 08:51:08 -0700133EXPORT_SYMBOL_GPL(iomap_dio_complete);
Darrick J. Wongdb074432019-07-15 08:50:59 -0700134
135static void iomap_dio_complete_work(struct work_struct *work)
136{
137 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
138 struct kiocb *iocb = dio->iocb;
139
140 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
141}
142
143/*
144 * Set an error in the dio if none is set yet. We have to use cmpxchg
145 * as the submission context and the completion context(s) can race to
146 * update the error.
147 */
148static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
149{
150 cmpxchg(&dio->error, 0, ret);
151}
152
153static void iomap_dio_bio_end_io(struct bio *bio)
154{
155 struct iomap_dio *dio = bio->bi_private;
156 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
157
158 if (bio->bi_status)
159 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
160
161 if (atomic_dec_and_test(&dio->ref)) {
162 if (dio->wait_for_completion) {
163 struct task_struct *waiter = dio->submit.waiter;
164 WRITE_ONCE(dio->submit.waiter, NULL);
165 blk_wake_io_task(waiter);
166 } else if (dio->flags & IOMAP_DIO_WRITE) {
167 struct inode *inode = file_inode(dio->iocb->ki_filp);
168
169 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
170 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
171 } else {
172 iomap_dio_complete_work(&dio->aio.work);
173 }
174 }
175
176 if (should_dirty) {
177 bio_check_pages_dirty(bio);
178 } else {
179 bio_release_pages(bio, false);
180 bio_put(bio);
181 }
182}
183
184static void
185iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
186 unsigned len)
187{
188 struct page *page = ZERO_PAGE(0);
189 int flags = REQ_SYNC | REQ_IDLE;
190 struct bio *bio;
191
192 bio = bio_alloc(GFP_KERNEL, 1);
193 bio_set_dev(bio, iomap->bdev);
194 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
195 bio->bi_private = dio;
196 bio->bi_end_io = iomap_dio_bio_end_io;
197
198 get_page(page);
199 __bio_add_page(bio, page, len, 0);
200 bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
Goldwyn Rodrigues8cecd0b2019-05-14 18:54:27 -0500201 iomap_dio_submit_bio(dio, iomap, bio, pos);
Darrick J. Wongdb074432019-07-15 08:50:59 -0700202}
203
Naohiro Aotac3b0e882021-02-04 19:21:41 +0900204/*
205 * Figure out the bio's operation flags from the dio request, the
206 * mapping, and whether or not we want FUA. Note that we can end up
207 * clearing the WRITE_FUA flag in the dio request.
208 */
209static inline unsigned int
210iomap_dio_bio_opflags(struct iomap_dio *dio, struct iomap *iomap, bool use_fua)
211{
212 unsigned int opflags = REQ_SYNC | REQ_IDLE;
213
214 if (!(dio->flags & IOMAP_DIO_WRITE)) {
215 WARN_ON_ONCE(iomap->flags & IOMAP_F_ZONE_APPEND);
216 return REQ_OP_READ;
217 }
218
219 if (iomap->flags & IOMAP_F_ZONE_APPEND)
220 opflags |= REQ_OP_ZONE_APPEND;
221 else
222 opflags |= REQ_OP_WRITE;
223
224 if (use_fua)
225 opflags |= REQ_FUA;
226 else
227 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
228
229 return opflags;
230}
231
Darrick J. Wongdb074432019-07-15 08:50:59 -0700232static loff_t
233iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
234 struct iomap_dio *dio, struct iomap *iomap)
235{
236 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
237 unsigned int fs_block_size = i_blocksize(inode), pad;
238 unsigned int align = iov_iter_alignment(dio->submit.iter);
Naohiro Aotac3b0e882021-02-04 19:21:41 +0900239 unsigned int bio_opf;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700240 struct bio *bio;
241 bool need_zeroout = false;
242 bool use_fua = false;
243 int nr_pages, ret = 0;
244 size_t copied = 0;
Jan Karaf550ee92019-11-26 09:28:47 -0800245 size_t orig_count;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700246
247 if ((pos | length | align) & ((1 << blkbits) - 1))
248 return -EINVAL;
249
250 if (iomap->type == IOMAP_UNWRITTEN) {
251 dio->flags |= IOMAP_DIO_UNWRITTEN;
252 need_zeroout = true;
253 }
254
255 if (iomap->flags & IOMAP_F_SHARED)
256 dio->flags |= IOMAP_DIO_COW;
257
258 if (iomap->flags & IOMAP_F_NEW) {
259 need_zeroout = true;
260 } else if (iomap->type == IOMAP_MAPPED) {
261 /*
262 * Use a FUA write if we need datasync semantics, this is a pure
263 * data IO that doesn't require any metadata updates (including
264 * after IO completion such as unwritten extent conversion) and
265 * the underlying device supports FUA. This allows us to avoid
266 * cache flushes on IO completion.
267 */
268 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
269 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
270 blk_queue_fua(bdev_get_queue(iomap->bdev)))
271 use_fua = true;
272 }
273
274 /*
Jan Karaf550ee92019-11-26 09:28:47 -0800275 * Save the original count and trim the iter to just the extent we
276 * are operating on right now. The iter will be re-expanded once
277 * we are done.
Darrick J. Wongdb074432019-07-15 08:50:59 -0700278 */
Jan Karaf550ee92019-11-26 09:28:47 -0800279 orig_count = iov_iter_count(dio->submit.iter);
280 iov_iter_truncate(dio->submit.iter, length);
Darrick J. Wongdb074432019-07-15 08:50:59 -0700281
Pavel Begunkov3e1a88e2021-01-09 16:03:02 +0000282 if (!iov_iter_count(dio->submit.iter))
Jan Karaf550ee92019-11-26 09:28:47 -0800283 goto out;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700284
285 if (need_zeroout) {
286 /* zero out from the start of the block to the write offset */
287 pad = pos & (fs_block_size - 1);
288 if (pad)
289 iomap_dio_zero(dio, iomap, pos - pad, pad);
290 }
291
Naohiro Aotac3b0e882021-02-04 19:21:41 +0900292 /*
293 * Set the operation flags early so that bio_iov_iter_get_pages
294 * can set up the page vector appropriately for a ZONE_APPEND
295 * operation.
296 */
297 bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua);
298
Christoph Hellwiga8affc02021-03-11 12:01:37 +0100299 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
Darrick J. Wongdb074432019-07-15 08:50:59 -0700300 do {
301 size_t n;
302 if (dio->error) {
303 iov_iter_revert(dio->submit.iter, copied);
Jan Karaf550ee92019-11-26 09:28:47 -0800304 copied = ret = 0;
305 goto out;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700306 }
307
308 bio = bio_alloc(GFP_KERNEL, nr_pages);
309 bio_set_dev(bio, iomap->bdev);
310 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
311 bio->bi_write_hint = dio->iocb->ki_hint;
312 bio->bi_ioprio = dio->iocb->ki_ioprio;
313 bio->bi_private = dio;
314 bio->bi_end_io = iomap_dio_bio_end_io;
Naohiro Aotac3b0e882021-02-04 19:21:41 +0900315 bio->bi_opf = bio_opf;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700316
Jan Karaf550ee92019-11-26 09:28:47 -0800317 ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
Darrick J. Wongdb074432019-07-15 08:50:59 -0700318 if (unlikely(ret)) {
319 /*
320 * We have to stop part way through an IO. We must fall
321 * through to the sub-block tail zeroing here, otherwise
322 * this short IO may expose stale data in the tail of
323 * the block we haven't written data to.
324 */
325 bio_put(bio);
326 goto zero_tail;
327 }
328
329 n = bio->bi_iter.bi_size;
330 if (dio->flags & IOMAP_DIO_WRITE) {
Darrick J. Wongdb074432019-07-15 08:50:59 -0700331 task_io_account_write(n);
332 } else {
Darrick J. Wongdb074432019-07-15 08:50:59 -0700333 if (dio->flags & IOMAP_DIO_DIRTY)
334 bio_set_pages_dirty(bio);
335 }
336
Darrick J. Wongdb074432019-07-15 08:50:59 -0700337 dio->size += n;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700338 copied += n;
339
Pavel Begunkov3e1a88e2021-01-09 16:03:02 +0000340 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter,
Christoph Hellwiga8affc02021-03-11 12:01:37 +0100341 BIO_MAX_VECS);
Goldwyn Rodrigues8cecd0b2019-05-14 18:54:27 -0500342 iomap_dio_submit_bio(dio, iomap, bio, pos);
343 pos += n;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700344 } while (nr_pages);
345
346 /*
347 * We need to zeroout the tail of a sub-block write if the extent type
348 * requires zeroing or the write extends beyond EOF. If we don't zero
349 * the block tail in the latter case, we can expose stale data via mmap
350 * reads of the EOF block.
351 */
352zero_tail:
353 if (need_zeroout ||
354 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
355 /* zero out from the end of the write to the end of the block */
356 pad = pos & (fs_block_size - 1);
357 if (pad)
358 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
359 }
Jan Karaf550ee92019-11-26 09:28:47 -0800360out:
361 /* Undo iter limitation to current extent */
362 iov_iter_reexpand(dio->submit.iter, orig_count - copied);
Jan Stanceke9f930a2019-11-11 12:58:24 -0800363 if (copied)
364 return copied;
365 return ret;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700366}
367
368static loff_t
369iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
370{
371 length = iov_iter_zero(length, dio->submit.iter);
372 dio->size += length;
373 return length;
374}
375
376static loff_t
377iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
378 struct iomap_dio *dio, struct iomap *iomap)
379{
380 struct iov_iter *iter = dio->submit.iter;
381 size_t copied;
382
383 BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
384
385 if (dio->flags & IOMAP_DIO_WRITE) {
386 loff_t size = inode->i_size;
387
388 if (pos > size)
389 memset(iomap->inline_data + size, 0, pos - size);
390 copied = copy_from_iter(iomap->inline_data + pos, length, iter);
391 if (copied) {
392 if (pos + copied > size)
393 i_size_write(inode, pos + copied);
394 mark_inode_dirty(inode);
395 }
396 } else {
397 copied = copy_to_iter(iomap->inline_data + pos, length, iter);
398 }
399 dio->size += copied;
400 return copied;
401}
402
403static loff_t
404iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700405 void *data, struct iomap *iomap, struct iomap *srcmap)
Darrick J. Wongdb074432019-07-15 08:50:59 -0700406{
407 struct iomap_dio *dio = data;
408
409 switch (iomap->type) {
410 case IOMAP_HOLE:
411 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
412 return -EIO;
413 return iomap_dio_hole_actor(length, dio);
414 case IOMAP_UNWRITTEN:
415 if (!(dio->flags & IOMAP_DIO_WRITE))
416 return iomap_dio_hole_actor(length, dio);
417 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
418 case IOMAP_MAPPED:
419 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
420 case IOMAP_INLINE:
421 return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
Qian Caia805c112020-09-10 08:26:15 -0700422 case IOMAP_DELALLOC:
423 /*
424 * DIO is not serialised against mmap() access at all, and so
425 * if the page_mkwrite occurs between the writeback and the
426 * iomap_apply() call in the DIO path, then it will see the
427 * DELALLOC block that the page-mkwrite allocated.
428 */
429 pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
430 dio->iocb->ki_filp, current->comm);
431 return -EIO;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700432 default:
433 WARN_ON_ONCE(1);
434 return -EIO;
435 }
436}
437
438/*
439 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
440 * is being issued as AIO or not. This allows us to optimise pure data writes
441 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
442 * REQ_FLUSH post write. This is slightly tricky because a single request here
443 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
444 * may be pure data writes. In that case, we still need to do a full data sync
445 * completion.
Christoph Hellwig60263d52020-07-23 22:45:59 -0700446 *
447 * Returns -ENOTBLK In case of a page invalidation invalidation failure for
448 * writes. The callers needs to fall back to buffered I/O in this case.
Darrick J. Wongdb074432019-07-15 08:50:59 -0700449 */
Christoph Hellwigc3d4ed12020-09-28 08:51:08 -0700450struct iomap_dio *
451__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
Jan Kara13ef9542019-10-15 08:43:42 -0700452 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
Christoph Hellwig2f632962021-01-23 10:06:09 -0800453 unsigned int dio_flags)
Darrick J. Wongdb074432019-07-15 08:50:59 -0700454{
455 struct address_space *mapping = iocb->ki_filp->f_mapping;
456 struct inode *inode = file_inode(iocb->ki_filp);
457 size_t count = iov_iter_count(iter);
Johannes Thumshirn88cfd302019-11-26 09:28:47 -0800458 loff_t pos = iocb->ki_pos;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700459 loff_t end = iocb->ki_pos + count - 1, ret = 0;
Christoph Hellwig2f632962021-01-23 10:06:09 -0800460 bool wait_for_completion =
461 is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT);
Christoph Hellwig5724be52021-01-23 10:06:09 -0800462 unsigned int iomap_flags = IOMAP_DIRECT;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700463 struct blk_plug plug;
464 struct iomap_dio *dio;
465
Darrick J. Wongdb074432019-07-15 08:50:59 -0700466 if (!count)
Christoph Hellwigc3d4ed12020-09-28 08:51:08 -0700467 return NULL;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700468
469 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
470 if (!dio)
Christoph Hellwigc3d4ed12020-09-28 08:51:08 -0700471 return ERR_PTR(-ENOMEM);
Darrick J. Wongdb074432019-07-15 08:50:59 -0700472
473 dio->iocb = iocb;
474 atomic_set(&dio->ref, 1);
475 dio->size = 0;
476 dio->i_size = i_size_read(inode);
Christoph Hellwig838c4f32019-09-19 15:32:45 -0700477 dio->dops = dops;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700478 dio->error = 0;
479 dio->flags = 0;
480
481 dio->submit.iter = iter;
482 dio->submit.waiter = current;
483 dio->submit.cookie = BLK_QC_T_NONE;
484 dio->submit.last_queue = NULL;
485
486 if (iov_iter_rw(iter) == READ) {
487 if (pos >= dio->i_size)
488 goto out_free_dio;
489
Jens Axboe985b71d2021-04-29 22:55:24 -0700490 if (iocb->ki_flags & IOCB_NOWAIT) {
491 if (filemap_range_needs_writeback(mapping, pos, end)) {
492 ret = -EAGAIN;
493 goto out_free_dio;
494 }
495 iomap_flags |= IOMAP_NOWAIT;
496 }
497
Joseph Qia9010042019-10-29 09:51:24 -0700498 if (iter_is_iovec(iter))
Darrick J. Wongdb074432019-07-15 08:50:59 -0700499 dio->flags |= IOMAP_DIO_DIRTY;
500 } else {
Christoph Hellwig5724be52021-01-23 10:06:09 -0800501 iomap_flags |= IOMAP_WRITE;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700502 dio->flags |= IOMAP_DIO_WRITE;
503
Jens Axboe985b71d2021-04-29 22:55:24 -0700504 if (iocb->ki_flags & IOCB_NOWAIT) {
505 if (filemap_range_has_page(mapping, pos, end)) {
506 ret = -EAGAIN;
507 goto out_free_dio;
508 }
509 iomap_flags |= IOMAP_NOWAIT;
510 }
511
Darrick J. Wongdb074432019-07-15 08:50:59 -0700512 /* for data sync or sync, we need sync completion processing */
513 if (iocb->ki_flags & IOCB_DSYNC)
514 dio->flags |= IOMAP_DIO_NEED_SYNC;
515
516 /*
517 * For datasync only writes, we optimistically try using FUA for
518 * this IO. Any non-FUA write that occurs will clear this flag,
519 * hence we know before completion whether a cache flush is
520 * necessary.
521 */
522 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
523 dio->flags |= IOMAP_DIO_WRITE_FUA;
524 }
525
Christoph Hellwig213f6272021-01-23 10:06:10 -0800526 if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
527 ret = -EAGAIN;
528 if (pos >= dio->i_size || pos + count > dio->i_size)
529 goto out_free_dio;
530 iomap_flags |= IOMAP_OVERWRITE_ONLY;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700531 }
532
Johannes Thumshirn88cfd302019-11-26 09:28:47 -0800533 ret = filemap_write_and_wait_range(mapping, pos, end);
Darrick J. Wongdb074432019-07-15 08:50:59 -0700534 if (ret)
535 goto out_free_dio;
536
Dave Chinner54752de2020-07-23 22:45:58 -0700537 if (iov_iter_rw(iter) == WRITE) {
538 /*
539 * Try to invalidate cache pages for the range we are writing.
Christoph Hellwig60263d52020-07-23 22:45:59 -0700540 * If this invalidation fails, let the caller fall back to
541 * buffered I/O.
Dave Chinner54752de2020-07-23 22:45:58 -0700542 */
543 if (invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
Christoph Hellwig60263d52020-07-23 22:45:59 -0700544 end >> PAGE_SHIFT)) {
545 trace_iomap_dio_invalidate_fail(inode, pos, count);
546 ret = -ENOTBLK;
547 goto out_free_dio;
548 }
Darrick J. Wongdb074432019-07-15 08:50:59 -0700549
Dave Chinner54752de2020-07-23 22:45:58 -0700550 if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
551 ret = sb_init_dio_done_wq(inode->i_sb);
552 if (ret < 0)
553 goto out_free_dio;
554 }
Darrick J. Wongdb074432019-07-15 08:50:59 -0700555 }
556
557 inode_dio_begin(inode);
558
559 blk_start_plug(&plug);
560 do {
Christoph Hellwig5724be52021-01-23 10:06:09 -0800561 ret = iomap_apply(inode, pos, count, iomap_flags, ops, dio,
Darrick J. Wongdb074432019-07-15 08:50:59 -0700562 iomap_dio_actor);
563 if (ret <= 0) {
564 /* magic error code to fall back to buffered I/O */
565 if (ret == -ENOTBLK) {
566 wait_for_completion = true;
567 ret = 0;
568 }
569 break;
570 }
571 pos += ret;
572
Jan Kara419e9c32019-11-21 16:14:38 -0800573 if (iov_iter_rw(iter) == READ && pos >= dio->i_size) {
574 /*
575 * We only report that we've read data up to i_size.
576 * Revert iter to a state corresponding to that as
577 * some callers (such as splice code) rely on it.
578 */
579 iov_iter_revert(iter, pos - dio->i_size);
Darrick J. Wongdb074432019-07-15 08:50:59 -0700580 break;
Jan Kara419e9c32019-11-21 16:14:38 -0800581 }
Darrick J. Wongdb074432019-07-15 08:50:59 -0700582 } while ((count = iov_iter_count(iter)) > 0);
583 blk_finish_plug(&plug);
584
585 if (ret < 0)
586 iomap_dio_set_error(dio, ret);
587
588 /*
589 * If all the writes we issued were FUA, we don't need to flush the
590 * cache on IO completion. Clear the sync flag for this case.
591 */
592 if (dio->flags & IOMAP_DIO_WRITE_FUA)
593 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
594
595 WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie);
596 WRITE_ONCE(iocb->private, dio->submit.last_queue);
597
598 /*
599 * We are about to drop our additional submission reference, which
yangerkund9973ce22020-03-18 08:04:36 -0700600 * might be the last reference to the dio. There are three different
601 * ways we can progress here:
Darrick J. Wongdb074432019-07-15 08:50:59 -0700602 *
603 * (a) If this is the last reference we will always complete and free
604 * the dio ourselves.
605 * (b) If this is not the last reference, and we serve an asynchronous
606 * iocb, we must never touch the dio after the decrement, the
607 * I/O completion handler will complete and free it.
608 * (c) If this is not the last reference, but we serve a synchronous
609 * iocb, the I/O completion handler will wake us up on the drop
610 * of the final reference, and we will complete and free it here
611 * after we got woken by the I/O completion handler.
612 */
613 dio->wait_for_completion = wait_for_completion;
614 if (!atomic_dec_and_test(&dio->ref)) {
615 if (!wait_for_completion)
Christoph Hellwigc3d4ed12020-09-28 08:51:08 -0700616 return ERR_PTR(-EIOCBQUEUED);
Darrick J. Wongdb074432019-07-15 08:50:59 -0700617
618 for (;;) {
619 set_current_state(TASK_UNINTERRUPTIBLE);
620 if (!READ_ONCE(dio->submit.waiter))
621 break;
622
623 if (!(iocb->ki_flags & IOCB_HIPRI) ||
624 !dio->submit.last_queue ||
625 !blk_poll(dio->submit.last_queue,
626 dio->submit.cookie, true))
Ming Leie6249cd2020-05-03 09:54:22 +0800627 blk_io_schedule();
Darrick J. Wongdb074432019-07-15 08:50:59 -0700628 }
629 __set_current_state(TASK_RUNNING);
630 }
631
Christoph Hellwigc3d4ed12020-09-28 08:51:08 -0700632 return dio;
Darrick J. Wongdb074432019-07-15 08:50:59 -0700633
634out_free_dio:
635 kfree(dio);
Christoph Hellwigc3d4ed12020-09-28 08:51:08 -0700636 if (ret)
637 return ERR_PTR(ret);
638 return NULL;
639}
640EXPORT_SYMBOL_GPL(__iomap_dio_rw);
641
642ssize_t
643iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
644 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
Christoph Hellwig2f632962021-01-23 10:06:09 -0800645 unsigned int dio_flags)
Christoph Hellwigc3d4ed12020-09-28 08:51:08 -0700646{
647 struct iomap_dio *dio;
648
Christoph Hellwig2f632962021-01-23 10:06:09 -0800649 dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags);
Christoph Hellwigc3d4ed12020-09-28 08:51:08 -0700650 if (IS_ERR_OR_NULL(dio))
651 return PTR_ERR_OR_ZERO(dio);
652 return iomap_dio_complete(dio);
Darrick J. Wongdb074432019-07-15 08:50:59 -0700653}
654EXPORT_SYMBOL_GPL(iomap_dio_rw);