blob: 19ac3fe57debb633a2e2830e4c1f5fa235a8b980 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/direct-io.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * O_DIRECT
7 *
Francois Camie1f8e872008-10-15 22:01:59 -07008 * 04Jul2002 Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Initial version
10 * 11Sep2002 janetinc@us.ibm.com
11 * added readv/writev support.
Francois Camie1f8e872008-10-15 22:01:59 -070012 * 29Oct2002 Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * rewrote bio_add_page() support.
14 * 30Oct2002 pbadari@us.ibm.com
15 * added support for non-aligned IO.
16 * 06Nov2002 pbadari@us.ibm.com
17 * added asynchronous IO support.
18 * 21Jul2003 nathans@sgi.com
19 * added IO completion notifier.
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/fs.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/highmem.h>
29#include <linux/pagemap.h>
Andrew Morton98c4d572006-12-10 02:19:47 -080030#include <linux/task_io_accounting_ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/bio.h>
32#include <linux/wait.h>
33#include <linux/err.h>
34#include <linux/blkdev.h>
35#include <linux/buffer_head.h>
36#include <linux/rwsem.h>
37#include <linux/uio.h>
Arun Sharma600634972011-07-26 16:09:06 -070038#include <linux/atomic.h>
Andi Kleen65dd2aa2012-01-12 17:20:35 -080039#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41/*
42 * How many user pages to map in one call to get_user_pages(). This determines
Andi Kleencde1ecb2011-08-01 21:38:04 -070043 * the size of a structure in the slab cache
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 */
45#define DIO_PAGES 64
46
47/*
48 * This code generally works in units of "dio_blocks". A dio_block is
49 * somewhere between the hard sector size and the filesystem block size. it
50 * is determined on a per-invocation basis. When talking to the filesystem
51 * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity
52 * down by dio->blkfactor. Similarly, fs-blocksize quantities are converted
53 * to bio_block quantities by shifting left by blkfactor.
54 *
55 * If blkfactor is zero then the user's request was aligned to the filesystem's
56 * blocksize.
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 */
58
Andi Kleeneb28be22011-08-01 21:38:03 -070059/* dio_state only used in the submission path */
60
61struct dio_submit {
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 struct bio *bio; /* bio under assembly */
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 unsigned blkbits; /* doesn't change */
64 unsigned blkfactor; /* When we're using an alignment which
65 is finer than the filesystem's soft
66 blocksize, this specifies how much
67 finer. blkfactor=2 means 1/4-block
68 alignment. Does not change */
69 unsigned start_zero_done; /* flag: sub-blocksize zeroing has
70 been performed at the start of a
71 write */
72 int pages_in_io; /* approximate total IO pages */
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 sector_t block_in_file; /* Current offset into the underlying
74 file in dio_block units. */
75 unsigned blocks_available; /* At block_in_file. changes */
Andi Kleen0dc2bc42011-08-01 21:38:05 -070076 int reap_counter; /* rate limit reaping */
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 sector_t final_block_in_request;/* doesn't change */
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 int boundary; /* prev block is at a boundary */
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -080079 get_block_t *get_block; /* block mapping function */
Josef Bacikfacd07b2010-05-23 11:00:55 -040080 dio_submit_t *submit_io; /* IO submition function */
Andi Kleeneb28be22011-08-01 21:38:03 -070081
Josef Bacikfacd07b2010-05-23 11:00:55 -040082 loff_t logical_offset_in_bio; /* current first logical block in bio */
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 sector_t final_block_in_bio; /* current final block in bio + 1 */
84 sector_t next_block_for_io; /* next block to be put under IO,
85 in dio_blocks units */
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87 /*
88 * Deferred addition of a page to the dio. These variables are
89 * private to dio_send_cur_page(), submit_page_section() and
90 * dio_bio_add_page().
91 */
92 struct page *cur_page; /* The page */
93 unsigned cur_page_offset; /* Offset into it, in bytes */
94 unsigned cur_page_len; /* Nr of bytes at cur_page_offset */
95 sector_t cur_page_block; /* Where it starts */
Josef Bacikfacd07b2010-05-23 11:00:55 -040096 loff_t cur_page_fs_offset; /* Offset in file */
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Al Viro7b2c99d2014-03-15 04:05:57 -040098 struct iov_iter *iter;
Jeff Moyer23aee092009-12-15 16:47:49 -080099 /*
100 * Page queue. These variables belong to dio_refill_pages() and
101 * dio_get_page().
102 */
103 unsigned head; /* next page to process */
104 unsigned tail; /* last valid page + 1 */
Al Viro7b2c99d2014-03-15 04:05:57 -0400105 size_t from, to;
Andi Kleeneb28be22011-08-01 21:38:03 -0700106};
107
108/* dio_state communicated between submission path and end_io */
109struct dio {
110 int flags; /* doesn't change */
Mike Christie8a4c1e42016-06-05 14:31:50 -0500111 int op;
112 int op_flags;
Jens Axboe15c4f632015-10-27 14:09:51 +0900113 blk_qc_t bio_cookie;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200114 struct gendisk *bio_disk;
Andi Kleen0dc2bc42011-08-01 21:38:05 -0700115 struct inode *inode;
Andi Kleeneb28be22011-08-01 21:38:03 -0700116 loff_t i_size; /* i_size when submitted */
117 dio_iodone_t *end_io; /* IO completion function */
Andi Kleeneb28be22011-08-01 21:38:03 -0700118
Andi Kleen18772642011-08-01 21:38:07 -0700119 void *private; /* copy from map_bh.b_private */
Andi Kleeneb28be22011-08-01 21:38:03 -0700120
121 /* BIO completion state */
122 spinlock_t bio_lock; /* protects BIO fields below */
Andi Kleen0dc2bc42011-08-01 21:38:05 -0700123 int page_errors; /* errno from get_user_pages() */
124 int is_async; /* is IO async ? */
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200125 bool defer_completion; /* defer AIO completion to workqueue? */
Ming Lei53cbf3b2015-08-17 10:31:46 +0800126 bool should_dirty; /* if pages should be dirtied */
Andi Kleen0dc2bc42011-08-01 21:38:05 -0700127 int io_error; /* IO error in completion path */
Andi Kleeneb28be22011-08-01 21:38:03 -0700128 unsigned long refcount; /* direct_io_worker() and bios */
129 struct bio *bio_list; /* singly linked via bi_private */
130 struct task_struct *waiter; /* waiting task (NULL if none) */
131
132 /* AIO related stuff */
133 struct kiocb *iocb; /* kiocb */
Andi Kleeneb28be22011-08-01 21:38:03 -0700134 ssize_t result; /* IO result */
135
Jeff Moyer23aee092009-12-15 16:47:49 -0800136 /*
137 * pages[] (and any fields placed after it) are not zeroed out at
138 * allocation time. Don't add new fields after pages[] unless you
139 * wish that they not be zeroed.
140 */
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200141 union {
142 struct page *pages[DIO_PAGES]; /* page buffer */
143 struct work_struct complete_work;/* deferred AIO completion */
144 };
Andi Kleen6e8267f2011-08-01 21:38:06 -0700145} ____cacheline_aligned_in_smp;
146
147static struct kmem_cache *dio_cache __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
149/*
150 * How many pages are in the queue?
151 */
Andi Kleeneb28be22011-08-01 21:38:03 -0700152static inline unsigned dio_pages_present(struct dio_submit *sdio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153{
Andi Kleeneb28be22011-08-01 21:38:03 -0700154 return sdio->tail - sdio->head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155}
156
157/*
158 * Go grab and pin some userspace pages. Typically we'll get 64 at a time.
159 */
Andi Kleenba253fb2011-08-01 21:38:08 -0700160static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161{
Al Viro7b2c99d2014-03-15 04:05:57 -0400162 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Miklos Szeredi2c809292014-09-24 17:09:11 +0200164 ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
Al Viro7b2c99d2014-03-15 04:05:57 -0400165 &sdio->from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Mike Christie8a4c1e42016-06-05 14:31:50 -0500167 if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) {
Nick Piggin557ed1f2007-10-16 01:24:40 -0700168 struct page *page = ZERO_PAGE(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 /*
170 * A memory fault, but the filesystem has some outstanding
171 * mapped blocks. We need to use those blocks up to avoid
172 * leaking stale data in the file.
173 */
174 if (dio->page_errors == 0)
175 dio->page_errors = ret;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300176 get_page(page);
Nick Pigginb5810032005-10-29 18:16:12 -0700177 dio->pages[0] = page;
Andi Kleeneb28be22011-08-01 21:38:03 -0700178 sdio->head = 0;
179 sdio->tail = 1;
Al Viro7b2c99d2014-03-15 04:05:57 -0400180 sdio->from = 0;
181 sdio->to = PAGE_SIZE;
182 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 }
184
185 if (ret >= 0) {
Al Viro7b2c99d2014-03-15 04:05:57 -0400186 iov_iter_advance(sdio->iter, ret);
187 ret += sdio->from;
Andi Kleeneb28be22011-08-01 21:38:03 -0700188 sdio->head = 0;
Al Viro7b2c99d2014-03-15 04:05:57 -0400189 sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
190 sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1;
191 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 return ret;
194}
195
196/*
197 * Get another userspace page. Returns an ERR_PTR on error. Pages are
198 * buffered inside the dio so that we can call get_user_pages() against a
199 * decent number of pages, less frequently. To provide nicer use of the
200 * L1 cache.
201 */
Andi Kleenba253fb2011-08-01 21:38:08 -0700202static inline struct page *dio_get_page(struct dio *dio,
Boaz Harrosh6fcc5422014-07-20 12:09:04 +0300203 struct dio_submit *sdio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204{
Andi Kleeneb28be22011-08-01 21:38:03 -0700205 if (dio_pages_present(sdio) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 int ret;
207
Andi Kleeneb28be22011-08-01 21:38:03 -0700208 ret = dio_refill_pages(dio, sdio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 if (ret)
210 return ERR_PTR(ret);
Andi Kleeneb28be22011-08-01 21:38:03 -0700211 BUG_ON(dio_pages_present(sdio) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 }
Boaz Harrosh6fcc5422014-07-20 12:09:04 +0300213 return dio->pages[sdio->head];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214}
215
Zach Brown6d544bb2006-12-10 02:20:54 -0800216/**
217 * dio_complete() - called when all DIO BIO I/O has been completed
218 * @offset: the byte offset in the file of the completed operation
219 *
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200220 * This drops i_dio_count, lets interested parties know that a DIO operation
221 * has completed, and calculates the resulting return code for the operation.
Zach Brown6d544bb2006-12-10 02:20:54 -0800222 *
223 * It lets the filesystem know if it registered an interest earlier via
224 * get_block. Pass the private field of the map buffer_head so that
225 * filesystems can use it to hold additional state between get_block calls and
226 * dio_complete.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 */
Christoph Hellwig716b9bc2016-04-07 08:51:59 -0700228static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229{
Christoph Hellwig716b9bc2016-04-07 08:51:59 -0700230 loff_t offset = dio->iocb->ki_pos;
Zach Brown6d544bb2006-12-10 02:20:54 -0800231 ssize_t transferred = 0;
Lukas Czerner332391a2017-09-21 08:16:29 -0600232 int err;
Zach Brown6d544bb2006-12-10 02:20:54 -0800233
Zach Brown8459d862006-12-10 02:21:05 -0800234 /*
235 * AIO submission can race with bio completion to get here while
236 * expecting to have the last io completed by bio completion.
237 * In that case -EIOCBQUEUED is in fact not an error we want
238 * to preserve through this call.
239 */
240 if (ret == -EIOCBQUEUED)
241 ret = 0;
242
Zach Brown6d544bb2006-12-10 02:20:54 -0800243 if (dio->result) {
244 transferred = dio->result;
245
246 /* Check for short read case */
Mike Christie8a4c1e42016-06-05 14:31:50 -0500247 if ((dio->op == REQ_OP_READ) &&
248 ((offset + transferred) > dio->i_size))
Zach Brown6d544bb2006-12-10 02:20:54 -0800249 transferred = dio->i_size - offset;
Al Viro4038acd2016-10-03 20:38:55 -0400250 /* ignore EFAULT if some IO has been done */
251 if (unlikely(ret == -EFAULT) && transferred)
252 ret = 0;
Zach Brown6d544bb2006-12-10 02:20:54 -0800253 }
254
Zach Brown6d544bb2006-12-10 02:20:54 -0800255 if (ret == 0)
256 ret = dio->page_errors;
257 if (ret == 0)
258 ret = dio->io_error;
259 if (ret == 0)
260 ret = transferred;
261
Eryu Guan5e25c262017-10-13 09:47:46 -0700262 if (dio->end_io) {
263 // XXX: ki_pos??
264 err = dio->end_io(dio->iocb, offset, ret, dio->private);
265 if (err)
266 ret = err;
267 }
268
Lukas Czerner332391a2017-09-21 08:16:29 -0600269 /*
270 * Try again to invalidate clean pages which might have been cached by
271 * non-direct readahead, or faulted in by get_user_pages() if the source
272 * of the write was an mmap'ed region of the file we're writing. Either
273 * one is a pretty crazy thing to do, so we don't support it 100%. If
274 * this invalidation fails, tough, the write still worked...
Eryu Guan5e25c262017-10-13 09:47:46 -0700275 *
276 * And this page cache invalidation has to be after dio->end_io(), as
277 * some filesystems convert unwritten extents to real allocations in
278 * end_io() when necessary, otherwise a racing buffer read would cache
279 * zeros from unwritten extents.
Lukas Czerner332391a2017-09-21 08:16:29 -0600280 */
281 if (ret > 0 && dio->op == REQ_OP_WRITE &&
282 dio->inode->i_mapping->nrpages) {
283 err = invalidate_inode_pages2_range(dio->inode->i_mapping,
284 offset >> PAGE_SHIFT,
285 (offset + ret - 1) >> PAGE_SHIFT);
286 WARN_ON_ONCE(err);
287 }
288
Jens Axboefe0f07d2015-04-15 17:05:48 -0600289 if (!(dio->flags & DIO_SKIP_DIO_COUNT))
290 inode_dio_end(dio->inode);
291
Christoph Hellwig02afc272013-09-04 15:04:40 +0200292 if (is_async) {
Christoph Hellwige2592212016-04-07 08:52:01 -0700293 /*
294 * generic_write_sync expects ki_pos to have been updated
295 * already, but the submission path only does this for
296 * synchronous I/O.
297 */
298 dio->iocb->ki_pos += transferred;
Christoph Hellwig02afc272013-09-04 15:04:40 +0200299
Mike Christie8a4c1e42016-06-05 14:31:50 -0500300 if (dio->op == REQ_OP_WRITE)
Christoph Hellwige2592212016-04-07 08:52:01 -0700301 ret = generic_write_sync(dio->iocb, transferred);
Christoph Hellwig04b2fa92015-02-02 14:49:06 +0100302 dio->iocb->ki_complete(dio->iocb, ret, 0);
Christoph Hellwig02afc272013-09-04 15:04:40 +0200303 }
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200304
305 kmem_cache_free(dio_cache, dio);
Zach Brown6d544bb2006-12-10 02:20:54 -0800306 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200309static void dio_aio_complete_work(struct work_struct *work)
310{
311 struct dio *dio = container_of(work, struct dio, complete_work);
312
Christoph Hellwig716b9bc2016-04-07 08:51:59 -0700313 dio_complete(dio, 0, true);
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200314}
315
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200316static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200317
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318/*
319 * Asynchronous IO callback.
320 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200321static void dio_bio_end_aio(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322{
323 struct dio *dio = bio->bi_private;
Zach Brown5eb6c7a2006-12-10 02:21:07 -0800324 unsigned long remaining;
325 unsigned long flags;
Lukas Czerner332391a2017-09-21 08:16:29 -0600326 bool defer_completion = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 /* cleanup the bio */
329 dio_bio_complete(dio, bio);
Zach Brown02732012006-12-10 02:20:59 -0800330
Zach Brown5eb6c7a2006-12-10 02:21:07 -0800331 spin_lock_irqsave(&dio->bio_lock, flags);
332 remaining = --dio->refcount;
333 if (remaining == 1 && dio->waiter)
Zach Brown20258b2b2006-12-10 02:21:01 -0800334 wake_up_process(dio->waiter);
Zach Brown5eb6c7a2006-12-10 02:21:07 -0800335 spin_unlock_irqrestore(&dio->bio_lock, flags);
Zach Brown20258b2b2006-12-10 02:21:01 -0800336
Zach Brown8459d862006-12-10 02:21:05 -0800337 if (remaining == 0) {
Lukas Czerner332391a2017-09-21 08:16:29 -0600338 /*
339 * Defer completion when defer_completion is set or
340 * when the inode has pages mapped and this is AIO write.
341 * We need to invalidate those pages because there is a
342 * chance they contain stale data in the case buffered IO
343 * went in between AIO submission and completion into the
344 * same region.
345 */
346 if (dio->result)
347 defer_completion = dio->defer_completion ||
348 (dio->op == REQ_OP_WRITE &&
349 dio->inode->i_mapping->nrpages);
350 if (defer_completion) {
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200351 INIT_WORK(&dio->complete_work, dio_aio_complete_work);
352 queue_work(dio->inode->i_sb->s_dio_done_wq,
353 &dio->complete_work);
354 } else {
Christoph Hellwig716b9bc2016-04-07 08:51:59 -0700355 dio_complete(dio, 0, true);
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200356 }
Zach Brown8459d862006-12-10 02:21:05 -0800357 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358}
359
360/*
361 * The BIO completion handler simply queues the BIO up for the process-context
362 * handler.
363 *
364 * During I/O bi_private points at the dio. After I/O, bi_private is used to
365 * implement a singly-linked list of completed BIOs, at dio->bio_list.
366 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200367static void dio_bio_end_io(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368{
369 struct dio *dio = bio->bi_private;
370 unsigned long flags;
371
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 spin_lock_irqsave(&dio->bio_lock, flags);
373 bio->bi_private = dio->bio_list;
374 dio->bio_list = bio;
Zach Brown5eb6c7a2006-12-10 02:21:07 -0800375 if (--dio->refcount == 1 && dio->waiter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 wake_up_process(dio->waiter);
377 spin_unlock_irqrestore(&dio->bio_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378}
379
Josef Bacikfacd07b2010-05-23 11:00:55 -0400380/**
381 * dio_end_io - handle the end io action for the given bio
382 * @bio: The direct io bio thats being completed
Josef Bacikfacd07b2010-05-23 11:00:55 -0400383 *
384 * This is meant to be called by any filesystem that uses their own dio_submit_t
385 * so that the DIO specific endio actions are dealt with after the filesystem
386 * has done it's completion work.
387 */
Christoph Hellwig40553512017-06-03 09:37:58 +0200388void dio_end_io(struct bio *bio)
Josef Bacikfacd07b2010-05-23 11:00:55 -0400389{
390 struct dio *dio = bio->bi_private;
391
392 if (dio->is_async)
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200393 dio_bio_end_aio(bio);
Josef Bacikfacd07b2010-05-23 11:00:55 -0400394 else
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200395 dio_bio_end_io(bio);
Josef Bacikfacd07b2010-05-23 11:00:55 -0400396}
397EXPORT_SYMBOL_GPL(dio_end_io);
398
Andi Kleenba253fb2011-08-01 21:38:08 -0700399static inline void
Andi Kleeneb28be22011-08-01 21:38:03 -0700400dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
401 struct block_device *bdev,
402 sector_t first_sector, int nr_vecs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403{
404 struct bio *bio;
405
David Dillow20d96002011-01-20 14:44:22 -0800406 /*
407 * bio_alloc() is guaranteed to return a bio when called with
Mel Gorman71baba42015-11-06 16:28:28 -0800408 * __GFP_RECLAIM and we request a valid number of vectors.
David Dillow20d96002011-01-20 14:44:22 -0800409 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 bio = bio_alloc(GFP_KERNEL, nr_vecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
Christoph Hellwig74d46992017-08-23 19:10:32 +0200412 bio_set_dev(bio, bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700413 bio->bi_iter.bi_sector = first_sector;
Mike Christie8a4c1e42016-06-05 14:31:50 -0500414 bio_set_op_attrs(bio, dio->op, dio->op_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 if (dio->is_async)
416 bio->bi_end_io = dio_bio_end_aio;
417 else
418 bio->bi_end_io = dio_bio_end_io;
419
Jens Axboe45d06cf2017-06-27 11:01:22 -0600420 bio->bi_write_hint = dio->iocb->ki_hint;
421
Andi Kleeneb28be22011-08-01 21:38:03 -0700422 sdio->bio = bio;
423 sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424}
425
426/*
427 * In the AIO read case we speculatively dirty the pages before starting IO.
428 * During IO completion, any of these pages which happen to have been written
429 * back will be redirtied by bio_check_pages_dirty().
Zach Brown02732012006-12-10 02:20:59 -0800430 *
431 * bios hold a dio reference between submit_bio and ->end_io.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 */
Andi Kleenba253fb2011-08-01 21:38:08 -0700433static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434{
Andi Kleeneb28be22011-08-01 21:38:03 -0700435 struct bio *bio = sdio->bio;
Zach Brown5eb6c7a2006-12-10 02:21:07 -0800436 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
438 bio->bi_private = dio;
Zach Brown5eb6c7a2006-12-10 02:21:07 -0800439
440 spin_lock_irqsave(&dio->bio_lock, flags);
441 dio->refcount++;
442 spin_unlock_irqrestore(&dio->bio_lock, flags);
443
Mike Christie8a4c1e42016-06-05 14:31:50 -0500444 if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 bio_set_pages_dirty(bio);
Zach Brown5eb6c7a2006-12-10 02:21:07 -0800446
Christoph Hellwig74d46992017-08-23 19:10:32 +0200447 dio->bio_disk = bio->bi_disk;
Jens Axboec1c53462015-11-10 10:14:38 -0700448
Jens Axboe15c4f632015-10-27 14:09:51 +0900449 if (sdio->submit_io) {
Mike Christie8a4c1e42016-06-05 14:31:50 -0500450 sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
Jens Axboe15c4f632015-10-27 14:09:51 +0900451 dio->bio_cookie = BLK_QC_T_NONE;
Jens Axboec1c53462015-11-10 10:14:38 -0700452 } else
Mike Christie4e49ea42016-06-05 14:31:41 -0500453 dio->bio_cookie = submit_bio(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
Andi Kleeneb28be22011-08-01 21:38:03 -0700455 sdio->bio = NULL;
456 sdio->boundary = 0;
457 sdio->logical_offset_in_bio = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458}
459
460/*
461 * Release any resources in case of a failure
462 */
Andi Kleenba253fb2011-08-01 21:38:08 -0700463static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464{
Al Viro7b2c99d2014-03-15 04:05:57 -0400465 while (sdio->head < sdio->tail)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300466 put_page(dio->pages[sdio->head++]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467}
468
469/*
Zach Brown02732012006-12-10 02:20:59 -0800470 * Wait for the next BIO to complete. Remove it and return it. NULL is
471 * returned once all BIOs have been completed. This must only be called once
472 * all bios have been issued so that dio->refcount can only decrease. This
473 * requires that that the caller hold a reference on the dio.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 */
475static struct bio *dio_await_one(struct dio *dio)
476{
477 unsigned long flags;
Zach Brown02732012006-12-10 02:20:59 -0800478 struct bio *bio = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
480 spin_lock_irqsave(&dio->bio_lock, flags);
Zach Brown5eb6c7a2006-12-10 02:21:07 -0800481
482 /*
483 * Wait as long as the list is empty and there are bios in flight. bio
484 * completion drops the count, maybe adds to the list, and wakes while
485 * holding the bio_lock so we don't need set_current_state()'s barrier
486 * and can call it after testing our condition.
487 */
488 while (dio->refcount > 1 && dio->bio_list == NULL) {
489 __set_current_state(TASK_UNINTERRUPTIBLE);
490 dio->waiter = current;
491 spin_unlock_irqrestore(&dio->bio_lock, flags);
Christoph Hellwigc43c83a2016-03-03 16:04:02 +0100492 if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
Christoph Hellwig74d46992017-08-23 19:10:32 +0200493 !blk_mq_poll(dio->bio_disk->queue, dio->bio_cookie))
Jens Axboe15c4f632015-10-27 14:09:51 +0900494 io_schedule();
Zach Brown5eb6c7a2006-12-10 02:21:07 -0800495 /* wake up sets us TASK_RUNNING */
496 spin_lock_irqsave(&dio->bio_lock, flags);
497 dio->waiter = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 }
Zach Brown02732012006-12-10 02:20:59 -0800499 if (dio->bio_list) {
500 bio = dio->bio_list;
501 dio->bio_list = bio->bi_private;
502 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 spin_unlock_irqrestore(&dio->bio_lock, flags);
504 return bio;
505}
506
507/*
508 * Process one completed BIO. No locks are held.
509 */
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200510static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511{
Kent Overstreetcb34e052012-09-05 15:22:02 -0700512 struct bio_vec *bvec;
513 unsigned i;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200514 blk_status_t err = bio->bi_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -0500516 if (err) {
517 if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT))
518 dio->io_error = -EAGAIN;
519 else
520 dio->io_error = -EIO;
521 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Mike Christie8a4c1e42016-06-05 14:31:50 -0500523 if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) {
Mike Krinkin7ddc9712016-01-30 19:09:59 +0300524 bio_check_pages_dirty(bio); /* transfers ownership */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 } else {
Kent Overstreetcb34e052012-09-05 15:22:02 -0700526 bio_for_each_segment_all(bvec, bio, i) {
527 struct page *page = bvec->bv_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
Mike Christie8a4c1e42016-06-05 14:31:50 -0500529 if (dio->op == REQ_OP_READ && !PageCompound(page) &&
Ming Lei53cbf3b2015-08-17 10:31:46 +0800530 dio->should_dirty)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 set_page_dirty_lock(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300532 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 }
534 bio_put(bio);
535 }
Sasha Levin9b81c842015-08-10 19:05:18 -0400536 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537}
538
539/*
Zach Brown02732012006-12-10 02:20:59 -0800540 * Wait on and process all in-flight BIOs. This must only be called once
541 * all bios have been issued so that the refcount can only decrease.
542 * This just waits for all bios to make it through dio_bio_complete. IO
Robert P. J. Daybeb7dd82007-05-09 07:14:03 +0200543 * errors are propagated through dio->io_error and should be propagated via
Zach Brown02732012006-12-10 02:20:59 -0800544 * dio_complete().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 */
Zach Brown6d544bb2006-12-10 02:20:54 -0800546static void dio_await_completion(struct dio *dio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547{
Zach Brown02732012006-12-10 02:20:59 -0800548 struct bio *bio;
549 do {
550 bio = dio_await_one(dio);
551 if (bio)
552 dio_bio_complete(dio, bio);
553 } while (bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554}
555
556/*
557 * A really large O_DIRECT read or write can generate a lot of BIOs. So
558 * to keep the memory consumption sane we periodically reap any completed BIOs
559 * during the BIO generation phase.
560 *
561 * This also helps to limit the peak amount of pinned userspace memory.
562 */
Andi Kleenba253fb2011-08-01 21:38:08 -0700563static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564{
565 int ret = 0;
566
Andi Kleeneb28be22011-08-01 21:38:03 -0700567 if (sdio->reap_counter++ >= 64) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 while (dio->bio_list) {
569 unsigned long flags;
570 struct bio *bio;
571 int ret2;
572
573 spin_lock_irqsave(&dio->bio_lock, flags);
574 bio = dio->bio_list;
575 dio->bio_list = bio->bi_private;
576 spin_unlock_irqrestore(&dio->bio_lock, flags);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200577 ret2 = blk_status_to_errno(dio_bio_complete(dio, bio));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 if (ret == 0)
579 ret = ret2;
580 }
Andi Kleeneb28be22011-08-01 21:38:03 -0700581 sdio->reap_counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 }
583 return ret;
584}
585
586/*
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200587 * Create workqueue for deferred direct IO completions. We allocate the
588 * workqueue when it's first needed. This avoids creating workqueue for
589 * filesystems that don't need it and also allows us to create the workqueue
590 * late enough so the we can include s_id in the name of the workqueue.
591 */
Christoph Hellwigec1b8262016-11-30 14:33:53 +1100592int sb_init_dio_done_wq(struct super_block *sb)
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200593{
Olof Johansson45150c42013-09-09 10:34:23 -0700594 struct workqueue_struct *old;
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200595 struct workqueue_struct *wq = alloc_workqueue("dio/%s",
596 WQ_MEM_RECLAIM, 0,
597 sb->s_id);
598 if (!wq)
599 return -ENOMEM;
600 /*
601 * This has to be atomic as more DIOs can race to create the workqueue
602 */
Olof Johansson45150c42013-09-09 10:34:23 -0700603 old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200604 /* Someone created workqueue before us? Free ours... */
Olof Johansson45150c42013-09-09 10:34:23 -0700605 if (old)
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200606 destroy_workqueue(wq);
607 return 0;
608}
609
610static int dio_set_defer_completion(struct dio *dio)
611{
612 struct super_block *sb = dio->inode->i_sb;
613
614 if (dio->defer_completion)
615 return 0;
616 dio->defer_completion = true;
617 if (!sb->s_dio_done_wq)
618 return sb_init_dio_done_wq(sb);
619 return 0;
620}
621
622/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 * Call into the fs to map some more disk blocks. We record the current number
Andi Kleeneb28be22011-08-01 21:38:03 -0700624 * of available blocks at sdio->blocks_available. These are in units of the
Fabian Frederick93407472017-02-27 14:28:32 -0800625 * fs blocksize, i_blocksize(inode).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 *
627 * The fs is allowed to map lots of blocks at once. If it wants to do that,
628 * it uses the passed inode-relative block number as the file offset, as usual.
629 *
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -0800630 * get_block() is passed the number of i_blkbits-sized blocks which direct_io
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 * has remaining to do. The fs should not map more than this number of blocks.
632 *
633 * If the fs has mapped a lot of blocks, it should populate bh->b_size to
634 * indicate how much contiguous disk space has been made available at
635 * bh->b_blocknr.
636 *
637 * If *any* of the mapped blocks are new, then the fs must set buffer_new().
638 * This isn't very efficient...
639 *
640 * In the case of filesystem holes: the fs may return an arbitrarily-large
641 * hole by returning an appropriate value in b_size and by clearing
642 * buffer_mapped(). However the direct-io code will only process holes one
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -0800643 * block at a time - it will repeatedly call get_block() as it walks the hole.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 */
Andi Kleen18772642011-08-01 21:38:07 -0700645static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
646 struct buffer_head *map_bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647{
648 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 sector_t fs_startblk; /* Into file, in filesystem-sized blocks */
Tao Maae55e1a2012-01-12 17:20:33 -0800650 sector_t fs_endblk; /* Into file, in filesystem-sized blocks */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 unsigned long fs_count; /* Number of filesystem-sized blocks */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 int create;
Linus Torvaldsab738572012-11-29 12:27:00 -0800653 unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
655 /*
656 * If there was a memory error and we've overwritten all the
657 * mapped blocks then we can now return that memory error
658 */
659 ret = dio->page_errors;
660 if (ret == 0) {
Andi Kleeneb28be22011-08-01 21:38:03 -0700661 BUG_ON(sdio->block_in_file >= sdio->final_block_in_request);
662 fs_startblk = sdio->block_in_file >> sdio->blkfactor;
Tao Maae55e1a2012-01-12 17:20:33 -0800663 fs_endblk = (sdio->final_block_in_request - 1) >>
664 sdio->blkfactor;
665 fs_count = fs_endblk - fs_startblk + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
Nathan Scott3c674e72006-03-29 09:26:15 +1000667 map_bh->b_state = 0;
Linus Torvaldsab738572012-11-29 12:27:00 -0800668 map_bh->b_size = fs_count << i_blkbits;
Nathan Scott3c674e72006-03-29 09:26:15 +1000669
Christoph Hellwig5fe878ae2009-12-15 16:47:50 -0800670 /*
Eryu Guan9ecd10b2016-05-27 14:27:18 -0700671 * For writes that could fill holes inside i_size on a
672 * DIO_SKIP_HOLES filesystem we forbid block creations: only
673 * overwrites are permitted. We will return early to the caller
674 * once we see an unmapped buffer head returned, and the caller
675 * will fall back to buffered I/O.
Christoph Hellwig5fe878ae2009-12-15 16:47:50 -0800676 *
677 * Otherwise the decision is left to the get_blocks method,
678 * which may decide to handle it or also return an unmapped
679 * buffer head.
680 */
Mike Christie8a4c1e42016-06-05 14:31:50 -0500681 create = dio->op == REQ_OP_WRITE;
Christoph Hellwig5fe878ae2009-12-15 16:47:50 -0800682 if (dio->flags & DIO_SKIP_HOLES) {
Eryu Guan9ecd10b2016-05-27 14:27:18 -0700683 if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
684 i_blkbits))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 create = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 }
Nathan Scott3c674e72006-03-29 09:26:15 +1000687
Andi Kleeneb28be22011-08-01 21:38:03 -0700688 ret = (*sdio->get_block)(dio->inode, fs_startblk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 map_bh, create);
Andi Kleen18772642011-08-01 21:38:07 -0700690
691 /* Store for completion */
692 dio->private = map_bh->b_private;
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200693
694 if (ret == 0 && buffer_defer_completion(map_bh))
695 ret = dio_set_defer_completion(dio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 }
697 return ret;
698}
699
700/*
701 * There is no bio. Make one now.
702 */
Andi Kleenba253fb2011-08-01 21:38:08 -0700703static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
704 sector_t start_sector, struct buffer_head *map_bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705{
706 sector_t sector;
707 int ret, nr_pages;
708
Andi Kleeneb28be22011-08-01 21:38:03 -0700709 ret = dio_bio_reap(dio, sdio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 if (ret)
711 goto out;
Andi Kleeneb28be22011-08-01 21:38:03 -0700712 sector = start_sector << (sdio->blkbits - 9);
Kent Overstreetb54ffb72015-05-19 14:31:01 +0200713 nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 BUG_ON(nr_pages <= 0);
Andi Kleen18772642011-08-01 21:38:07 -0700715 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
Andi Kleeneb28be22011-08-01 21:38:03 -0700716 sdio->boundary = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717out:
718 return ret;
719}
720
721/*
722 * Attempt to put the current chunk of 'cur_page' into the current BIO. If
723 * that was successful then update final_block_in_bio and take a ref against
724 * the just-added page.
725 *
726 * Return zero on success. Non-zero means the caller needs to start a new BIO.
727 */
Andi Kleenba253fb2011-08-01 21:38:08 -0700728static inline int dio_bio_add_page(struct dio_submit *sdio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729{
730 int ret;
731
Andi Kleeneb28be22011-08-01 21:38:03 -0700732 ret = bio_add_page(sdio->bio, sdio->cur_page,
733 sdio->cur_page_len, sdio->cur_page_offset);
734 if (ret == sdio->cur_page_len) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 /*
736 * Decrement count only, if we are done with this page
737 */
Andi Kleeneb28be22011-08-01 21:38:03 -0700738 if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
739 sdio->pages_in_io--;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300740 get_page(sdio->cur_page);
Andi Kleeneb28be22011-08-01 21:38:03 -0700741 sdio->final_block_in_bio = sdio->cur_page_block +
742 (sdio->cur_page_len >> sdio->blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 ret = 0;
744 } else {
745 ret = 1;
746 }
747 return ret;
748}
749
750/*
751 * Put cur_page under IO. The section of cur_page which is described by
752 * cur_page_offset,cur_page_len is put into a BIO. The section of cur_page
753 * starts on-disk at cur_page_block.
754 *
755 * We take a ref against the page here (on behalf of its presence in the bio).
756 *
757 * The caller of this function is responsible for removing cur_page from the
758 * dio, and for dropping the refcount which came from that presence.
759 */
Andi Kleenba253fb2011-08-01 21:38:08 -0700760static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
761 struct buffer_head *map_bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762{
763 int ret = 0;
764
Andi Kleeneb28be22011-08-01 21:38:03 -0700765 if (sdio->bio) {
766 loff_t cur_offset = sdio->cur_page_fs_offset;
767 loff_t bio_next_offset = sdio->logical_offset_in_bio +
Kent Overstreet4f024f32013-10-11 15:44:27 -0700768 sdio->bio->bi_iter.bi_size;
Josef Bacikc2c6ca42010-05-23 11:00:55 -0400769
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 /*
Josef Bacikc2c6ca42010-05-23 11:00:55 -0400771 * See whether this new request is contiguous with the old.
772 *
Namhyung Kimf0940ce2011-01-11 21:15:03 +0900773 * Btrfs cannot handle having logically non-contiguous requests
774 * submitted. For example if you have
Josef Bacikc2c6ca42010-05-23 11:00:55 -0400775 *
776 * Logical: [0-4095][HOLE][8192-12287]
Namhyung Kimf0940ce2011-01-11 21:15:03 +0900777 * Physical: [0-4095] [4096-8191]
Josef Bacikc2c6ca42010-05-23 11:00:55 -0400778 *
779 * We cannot submit those pages together as one BIO. So if our
780 * current logical offset in the file does not equal what would
781 * be the next logical offset in the bio, submit the bio we
782 * have.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 */
Andi Kleeneb28be22011-08-01 21:38:03 -0700784 if (sdio->final_block_in_bio != sdio->cur_page_block ||
Josef Bacikc2c6ca42010-05-23 11:00:55 -0400785 cur_offset != bio_next_offset)
Andi Kleeneb28be22011-08-01 21:38:03 -0700786 dio_bio_submit(dio, sdio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 }
788
Andi Kleeneb28be22011-08-01 21:38:03 -0700789 if (sdio->bio == NULL) {
Andi Kleen18772642011-08-01 21:38:07 -0700790 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 if (ret)
792 goto out;
793 }
794
Andi Kleeneb28be22011-08-01 21:38:03 -0700795 if (dio_bio_add_page(sdio) != 0) {
796 dio_bio_submit(dio, sdio);
Andi Kleen18772642011-08-01 21:38:07 -0700797 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 if (ret == 0) {
Andi Kleeneb28be22011-08-01 21:38:03 -0700799 ret = dio_bio_add_page(sdio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 BUG_ON(ret != 0);
801 }
802 }
803out:
804 return ret;
805}
806
807/*
808 * An autonomous function to put a chunk of a page under deferred IO.
809 *
810 * The caller doesn't actually know (or care) whether this piece of page is in
811 * a BIO, or is under IO or whatever. We just take care of all possible
812 * situations here. The separation between the logic of do_direct_IO() and
813 * that of submit_page_section() is important for clarity. Please don't break.
814 *
815 * The chunk of page starts on-disk at blocknr.
816 *
817 * We perform deferred IO, by recording the last-submitted page inside our
818 * private part of the dio structure. If possible, we just expand the IO
819 * across that page here.
820 *
821 * If that doesn't work out then we put the old page into the bio and add this
822 * page to the dio instead.
823 */
Andi Kleenba253fb2011-08-01 21:38:08 -0700824static inline int
Andi Kleeneb28be22011-08-01 21:38:03 -0700825submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
Andi Kleen18772642011-08-01 21:38:07 -0700826 unsigned offset, unsigned len, sector_t blocknr,
827 struct buffer_head *map_bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828{
829 int ret = 0;
830
Mike Christie8a4c1e42016-06-05 14:31:50 -0500831 if (dio->op == REQ_OP_WRITE) {
Andrew Morton98c4d572006-12-10 02:19:47 -0800832 /*
833 * Read accounting is performed in submit_bio()
834 */
835 task_io_account_write(len);
836 }
837
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 /*
839 * Can we just grow the current page's presence in the dio?
840 */
Andi Kleeneb28be22011-08-01 21:38:03 -0700841 if (sdio->cur_page == page &&
842 sdio->cur_page_offset + sdio->cur_page_len == offset &&
843 sdio->cur_page_block +
844 (sdio->cur_page_len >> sdio->blkbits) == blocknr) {
845 sdio->cur_page_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 goto out;
847 }
848
849 /*
850 * If there's a deferred page already there then send it.
851 */
Andi Kleeneb28be22011-08-01 21:38:03 -0700852 if (sdio->cur_page) {
Andi Kleen18772642011-08-01 21:38:07 -0700853 ret = dio_send_cur_page(dio, sdio, map_bh);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300854 put_page(sdio->cur_page);
Andi Kleeneb28be22011-08-01 21:38:03 -0700855 sdio->cur_page = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 if (ret)
Jan Karab1058b92013-04-29 15:06:18 -0700857 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 }
859
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300860 get_page(page); /* It is in dio */
Andi Kleeneb28be22011-08-01 21:38:03 -0700861 sdio->cur_page = page;
862 sdio->cur_page_offset = offset;
863 sdio->cur_page_len = len;
864 sdio->cur_page_block = blocknr;
865 sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866out:
Jan Karab1058b92013-04-29 15:06:18 -0700867 /*
868 * If sdio->boundary then we want to schedule the IO now to
869 * avoid metadata seeks.
870 */
871 if (sdio->boundary) {
872 ret = dio_send_cur_page(dio, sdio, map_bh);
Andreas Gruenbacher899f0422017-10-09 11:13:18 +0200873 if (sdio->bio)
874 dio_bio_submit(dio, sdio);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300875 put_page(sdio->cur_page);
Jan Karab1058b92013-04-29 15:06:18 -0700876 sdio->cur_page = NULL;
877 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 return ret;
879}
880
881/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 * If we are not writing the entire block and get_block() allocated
883 * the block for us, we need to fill-in the unused portion of the
884 * block with zeros. This happens only if user-buffer, fileoffset or
885 * io length is not filesystem block-size multiple.
886 *
887 * `end' is zero if we're doing the start of the IO, 1 at the end of the
888 * IO.
889 */
Andi Kleenba253fb2011-08-01 21:38:08 -0700890static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
891 int end, struct buffer_head *map_bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892{
893 unsigned dio_blocks_per_fs_block;
894 unsigned this_chunk_blocks; /* In dio_blocks */
895 unsigned this_chunk_bytes;
896 struct page *page;
897
Andi Kleeneb28be22011-08-01 21:38:03 -0700898 sdio->start_zero_done = 1;
Andi Kleen18772642011-08-01 21:38:07 -0700899 if (!sdio->blkfactor || !buffer_new(map_bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 return;
901
Andi Kleeneb28be22011-08-01 21:38:03 -0700902 dio_blocks_per_fs_block = 1 << sdio->blkfactor;
903 this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
905 if (!this_chunk_blocks)
906 return;
907
908 /*
909 * We need to zero out part of an fs block. It is either at the
910 * beginning or the end of the fs block.
911 */
912 if (end)
913 this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;
914
Andi Kleeneb28be22011-08-01 21:38:03 -0700915 this_chunk_bytes = this_chunk_blocks << sdio->blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916
Nick Piggin557ed1f2007-10-16 01:24:40 -0700917 page = ZERO_PAGE(0);
Andi Kleeneb28be22011-08-01 21:38:03 -0700918 if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
Andi Kleen18772642011-08-01 21:38:07 -0700919 sdio->next_block_for_io, map_bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 return;
921
Andi Kleeneb28be22011-08-01 21:38:03 -0700922 sdio->next_block_for_io += this_chunk_blocks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923}
924
925/*
926 * Walk the user pages, and the file, mapping blocks to disk and generating
927 * a sequence of (page,offset,len,block) mappings. These mappings are injected
928 * into submit_page_section(), which takes care of the next stage of submission
929 *
930 * Direct IO against a blockdev is different from a file. Because we can
931 * happily perform page-sized but 512-byte aligned IOs. It is important that
932 * blockdev IO be able to have fine alignment and large sizes.
933 *
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -0800934 * So what we do is to permit the ->get_block function to populate bh.b_size
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 * with the size of IO which is permitted at this offset and this i_blkbits.
936 *
937 * For best results, the blockdev should be set up with 512-byte i_blkbits and
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -0800938 * it should set b_size to PAGE_SIZE or more inside get_block(). This gives
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 * fine alignment but still allows this function to work in PAGE_SIZE units.
940 */
Andi Kleen18772642011-08-01 21:38:07 -0700941static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
942 struct buffer_head *map_bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943{
Andi Kleeneb28be22011-08-01 21:38:03 -0700944 const unsigned blkbits = sdio->blkbits;
Chandan Rajendradd545b52017-01-10 13:29:54 -0700945 const unsigned i_blkbits = blkbits + sdio->blkfactor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 int ret = 0;
947
Andi Kleeneb28be22011-08-01 21:38:03 -0700948 while (sdio->block_in_file < sdio->final_block_in_request) {
Al Viro7b2c99d2014-03-15 04:05:57 -0400949 struct page *page;
950 size_t from, to;
Boaz Harrosh6fcc5422014-07-20 12:09:04 +0300951
952 page = dio_get_page(dio, sdio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 if (IS_ERR(page)) {
954 ret = PTR_ERR(page);
955 goto out;
956 }
Boaz Harrosh6fcc5422014-07-20 12:09:04 +0300957 from = sdio->head ? 0 : sdio->from;
958 to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
959 sdio->head++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960
Al Viro7b2c99d2014-03-15 04:05:57 -0400961 while (from < to) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 unsigned this_chunk_bytes; /* # of bytes mapped */
963 unsigned this_chunk_blocks; /* # of blocks */
964 unsigned u;
965
Andi Kleeneb28be22011-08-01 21:38:03 -0700966 if (sdio->blocks_available == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 /*
968 * Need to go and map some more disk
969 */
970 unsigned long blkmask;
971 unsigned long dio_remainder;
972
Andi Kleen18772642011-08-01 21:38:07 -0700973 ret = get_more_blocks(dio, sdio, map_bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 if (ret) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300975 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 goto out;
977 }
978 if (!buffer_mapped(map_bh))
979 goto do_holes;
980
Andi Kleeneb28be22011-08-01 21:38:03 -0700981 sdio->blocks_available =
Jan Karaf734c892016-11-04 18:08:12 +0100982 map_bh->b_size >> blkbits;
Andi Kleeneb28be22011-08-01 21:38:03 -0700983 sdio->next_block_for_io =
984 map_bh->b_blocknr << sdio->blkfactor;
Jan Karaf734c892016-11-04 18:08:12 +0100985 if (buffer_new(map_bh)) {
986 clean_bdev_aliases(
987 map_bh->b_bdev,
988 map_bh->b_blocknr,
Chandan Rajendradd545b52017-01-10 13:29:54 -0700989 map_bh->b_size >> i_blkbits);
Jan Karaf734c892016-11-04 18:08:12 +0100990 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
Andi Kleeneb28be22011-08-01 21:38:03 -0700992 if (!sdio->blkfactor)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 goto do_holes;
994
Andi Kleeneb28be22011-08-01 21:38:03 -0700995 blkmask = (1 << sdio->blkfactor) - 1;
996 dio_remainder = (sdio->block_in_file & blkmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
998 /*
999 * If we are at the start of IO and that IO
1000 * starts partway into a fs-block,
1001 * dio_remainder will be non-zero. If the IO
1002 * is a read then we can simply advance the IO
1003 * cursor to the first block which is to be
1004 * read. But if the IO is a write and the
1005 * block was newly allocated we cannot do that;
1006 * the start of the fs block must be zeroed out
1007 * on-disk
1008 */
1009 if (!buffer_new(map_bh))
Andi Kleeneb28be22011-08-01 21:38:03 -07001010 sdio->next_block_for_io += dio_remainder;
1011 sdio->blocks_available -= dio_remainder;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 }
1013do_holes:
1014 /* Handle holes */
1015 if (!buffer_mapped(map_bh)) {
Jeff Moyer35dc8162006-02-03 03:04:27 -08001016 loff_t i_size_aligned;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
1018 /* AKPM: eargh, -ENOTBLK is a hack */
Mike Christie8a4c1e42016-06-05 14:31:50 -05001019 if (dio->op == REQ_OP_WRITE) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001020 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 return -ENOTBLK;
1022 }
1023
Jeff Moyer35dc8162006-02-03 03:04:27 -08001024 /*
1025 * Be sure to account for a partial block as the
1026 * last block in the file
1027 */
1028 i_size_aligned = ALIGN(i_size_read(dio->inode),
1029 1 << blkbits);
Andi Kleeneb28be22011-08-01 21:38:03 -07001030 if (sdio->block_in_file >=
Jeff Moyer35dc8162006-02-03 03:04:27 -08001031 i_size_aligned >> blkbits) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 /* We hit eof */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001033 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 goto out;
1035 }
Al Viro7b2c99d2014-03-15 04:05:57 -04001036 zero_user(page, from, 1 << blkbits);
Andi Kleeneb28be22011-08-01 21:38:03 -07001037 sdio->block_in_file++;
Al Viro7b2c99d2014-03-15 04:05:57 -04001038 from += 1 << blkbits;
Al Viro3320c602014-03-10 02:30:55 -04001039 dio->result += 1 << blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 goto next_block;
1041 }
1042
1043 /*
1044 * If we're performing IO which has an alignment which
1045 * is finer than the underlying fs, go check to see if
1046 * we must zero out the start of this block.
1047 */
Andi Kleeneb28be22011-08-01 21:38:03 -07001048 if (unlikely(sdio->blkfactor && !sdio->start_zero_done))
Andi Kleen18772642011-08-01 21:38:07 -07001049 dio_zero_block(dio, sdio, 0, map_bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050
1051 /*
1052 * Work out, in this_chunk_blocks, how much disk we
1053 * can add to this page
1054 */
Andi Kleeneb28be22011-08-01 21:38:03 -07001055 this_chunk_blocks = sdio->blocks_available;
Al Viro7b2c99d2014-03-15 04:05:57 -04001056 u = (to - from) >> blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 if (this_chunk_blocks > u)
1058 this_chunk_blocks = u;
Andi Kleeneb28be22011-08-01 21:38:03 -07001059 u = sdio->final_block_in_request - sdio->block_in_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 if (this_chunk_blocks > u)
1061 this_chunk_blocks = u;
1062 this_chunk_bytes = this_chunk_blocks << blkbits;
1063 BUG_ON(this_chunk_bytes == 0);
1064
Jan Kara092c8d42013-04-29 15:06:17 -07001065 if (this_chunk_blocks == sdio->blocks_available)
1066 sdio->boundary = buffer_boundary(map_bh);
Andi Kleeneb28be22011-08-01 21:38:03 -07001067 ret = submit_page_section(dio, sdio, page,
Al Viro7b2c99d2014-03-15 04:05:57 -04001068 from,
Andi Kleeneb28be22011-08-01 21:38:03 -07001069 this_chunk_bytes,
Andi Kleen18772642011-08-01 21:38:07 -07001070 sdio->next_block_for_io,
1071 map_bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 if (ret) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001073 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 goto out;
1075 }
Andi Kleeneb28be22011-08-01 21:38:03 -07001076 sdio->next_block_for_io += this_chunk_blocks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
Andi Kleeneb28be22011-08-01 21:38:03 -07001078 sdio->block_in_file += this_chunk_blocks;
Al Viro7b2c99d2014-03-15 04:05:57 -04001079 from += this_chunk_bytes;
1080 dio->result += this_chunk_bytes;
Andi Kleeneb28be22011-08-01 21:38:03 -07001081 sdio->blocks_available -= this_chunk_blocks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082next_block:
Andi Kleeneb28be22011-08-01 21:38:03 -07001083 BUG_ON(sdio->block_in_file > sdio->final_block_in_request);
1084 if (sdio->block_in_file == sdio->final_block_in_request)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 break;
1086 }
1087
1088 /* Drop the ref which was taken in get_user_pages() */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001089 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 }
1091out:
1092 return ret;
1093}
1094
Andi Kleen847cc632011-08-01 21:38:09 -07001095static inline int drop_refcount(struct dio *dio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096{
Andi Kleen847cc632011-08-01 21:38:09 -07001097 int ret2;
Zach Brown5eb6c7a2006-12-10 02:21:07 -08001098 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
Zach Brown8459d862006-12-10 02:21:05 -08001100 /*
1101 * Sync will always be dropping the final ref and completing the
Zach Brown5eb6c7a2006-12-10 02:21:07 -08001102 * operation. AIO can if it was a broken operation described above or
1103 * in fact if all the bios race to complete before we get here. In
1104 * that case dio_complete() translates the EIOCBQUEUED into the proper
Christoph Hellwig04b2fa92015-02-02 14:49:06 +01001105 * return code that the caller will hand to ->complete().
Zach Brown5eb6c7a2006-12-10 02:21:07 -08001106 *
1107 * This is managed by the bio_lock instead of being an atomic_t so that
1108 * completion paths can drop their ref and use the remaining count to
1109 * decide to wake the submission path atomically.
Zach Brown8459d862006-12-10 02:21:05 -08001110 */
Zach Brown5eb6c7a2006-12-10 02:21:07 -08001111 spin_lock_irqsave(&dio->bio_lock, flags);
1112 ret2 = --dio->refcount;
1113 spin_unlock_irqrestore(&dio->bio_lock, flags);
Andi Kleen847cc632011-08-01 21:38:09 -07001114 return ret2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115}
1116
Christoph Hellwigeafdc7d2010-06-04 11:29:53 +02001117/*
1118 * This is a library function for use by filesystem drivers.
1119 *
1120 * The locking rules are governed by the flags parameter:
1121 * - if the flags value contains DIO_LOCKING we use a fancy locking
1122 * scheme for dumb filesystems.
1123 * For writes this function is called under i_mutex and returns with
1124 * i_mutex held, for reads, i_mutex is not held on entry, but it is
1125 * taken and dropped again before returning.
Christoph Hellwigeafdc7d2010-06-04 11:29:53 +02001126 * - if the flags value does NOT contain DIO_LOCKING we don't use any
1127 * internal locking but rather rely on the filesystem to synchronize
1128 * direct I/O reads/writes versus each other and truncate.
Christoph Hellwigdf2d6f22011-06-24 14:29:46 -04001129 *
1130 * To help with locking against truncate we incremented the i_dio_count
1131 * counter before starting direct I/O, and decrement it once we are done.
1132 * Truncate can wait for it to reach zero to provide exclusion. It is
1133 * expected that filesystem provide exclusion between new direct I/O
1134 * and truncates. For DIO_LOCKING filesystems this is done by i_mutex,
1135 * but other filesystems need to take care of this on their own.
Andi Kleenba253fb2011-08-01 21:38:08 -07001136 *
1137 * NOTE: if you pass "sdio" to anything by pointer make sure that function
1138 * is always inlined. Otherwise gcc is unable to split the structure into
1139 * individual fields and will generate much worse code. This is important
1140 * for the whole file.
Christoph Hellwigeafdc7d2010-06-04 11:29:53 +02001141 */
Andi Kleen65dd2aa2012-01-12 17:20:35 -08001142static inline ssize_t
Omar Sandoval17f8c842015-03-16 04:33:50 -07001143do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1144 struct block_device *bdev, struct iov_iter *iter,
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001145 get_block_t get_block, dio_iodone_t end_io,
Omar Sandoval17f8c842015-03-16 04:33:50 -07001146 dio_submit_t submit_io, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147{
Linus Torvaldsab738572012-11-29 12:27:00 -08001148 unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
1149 unsigned blkbits = i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 unsigned blocksize_mask = (1 << blkbits) - 1;
1151 ssize_t retval = -EINVAL;
Christoph Hellwigaf436472014-07-30 07:18:48 -04001152 size_t count = iov_iter_count(iter);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001153 loff_t offset = iocb->ki_pos;
Christoph Hellwigaf436472014-07-30 07:18:48 -04001154 loff_t end = offset + count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 struct dio *dio;
Andi Kleeneb28be22011-08-01 21:38:03 -07001156 struct dio_submit sdio = { 0, };
Andi Kleen847cc632011-08-01 21:38:09 -07001157 struct buffer_head map_bh = { 0, };
Fengguang Wu647d1e42012-08-09 15:23:09 +02001158 struct blk_plug plug;
Al Viro886a3912014-03-05 13:50:45 -05001159 unsigned long align = offset | iov_iter_alignment(iter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160
Andi Kleen65dd2aa2012-01-12 17:20:35 -08001161 /*
1162 * Avoid references to bdev if not absolutely needed to give
1163 * the early prefetch in the caller enough time.
1164 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165
Al Viro886a3912014-03-05 13:50:45 -05001166 if (align & blocksize_mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 if (bdev)
Andi Kleen65dd2aa2012-01-12 17:20:35 -08001168 blkbits = blksize_bits(bdev_logical_block_size(bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 blocksize_mask = (1 << blkbits) - 1;
Al Viro886a3912014-03-05 13:50:45 -05001170 if (align & blocksize_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 goto out;
1172 }
1173
Christoph Hellwigf9b55702011-06-24 14:29:42 -04001174 /* watch out for a 0 len io from a tricksy fs */
Omar Sandoval17f8c842015-03-16 04:33:50 -07001175 if (iov_iter_rw(iter) == READ && !iov_iter_count(iter))
Christoph Hellwigf9b55702011-06-24 14:29:42 -04001176 return 0;
1177
Andi Kleen6e8267f2011-08-01 21:38:06 -07001178 dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 retval = -ENOMEM;
1180 if (!dio)
1181 goto out;
Jeff Moyer23aee092009-12-15 16:47:49 -08001182 /*
1183 * Believe it or not, zeroing out the page array caused a .5%
1184 * performance regression in a database benchmark. So, we take
1185 * care to only zero out what's needed.
1186 */
1187 memset(dio, 0, offsetof(struct dio, pages));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188
Christoph Hellwig5fe878ae2009-12-15 16:47:50 -08001189 dio->flags = flags;
1190 if (dio->flags & DIO_LOCKING) {
Omar Sandoval17f8c842015-03-16 04:33:50 -07001191 if (iov_iter_rw(iter) == READ) {
Christoph Hellwig5fe878ae2009-12-15 16:47:50 -08001192 struct address_space *mapping =
1193 iocb->ki_filp->f_mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
Christoph Hellwig5fe878ae2009-12-15 16:47:50 -08001195 /* will be released by direct_io_worker */
Al Viro59551022016-01-22 15:40:57 -05001196 inode_lock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
1198 retval = filemap_write_and_wait_range(mapping, offset,
1199 end - 1);
1200 if (retval) {
Al Viro59551022016-01-22 15:40:57 -05001201 inode_unlock(inode);
Andi Kleen6e8267f2011-08-01 21:38:06 -07001202 kmem_cache_free(dio_cache, dio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 goto out;
1204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 }
1207
Jan Kara74cedf92015-11-30 10:15:42 -07001208 /* Once we sampled i_size check for reads beyond EOF */
1209 dio->i_size = i_size_read(inode);
1210 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
1211 if (dio->flags & DIO_LOCKING)
Al Viro59551022016-01-22 15:40:57 -05001212 inode_unlock(inode);
Jan Kara74cedf92015-11-30 10:15:42 -07001213 kmem_cache_free(dio_cache, dio);
Al Viro2d4594a2015-12-08 12:22:47 -05001214 retval = 0;
Jan Kara74cedf92015-11-30 10:15:42 -07001215 goto out;
1216 }
1217
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 /*
Christoph Hellwig60392572014-02-10 10:27:11 +11001219 * For file extending writes updating i_size before data writeouts
1220 * complete can expose uninitialized blocks in dumb filesystems.
1221 * In that case we need to wait for I/O completion even if asked
1222 * for an asynchronous write.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 */
Christoph Hellwig60392572014-02-10 10:27:11 +11001224 if (is_sync_kiocb(iocb))
1225 dio->is_async = false;
1226 else if (!(dio->flags & DIO_ASYNC_EXTEND) &&
Omar Sandoval17f8c842015-03-16 04:33:50 -07001227 iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
Christoph Hellwig60392572014-02-10 10:27:11 +11001228 dio->is_async = false;
1229 else
1230 dio->is_async = true;
1231
Andi Kleen847cc632011-08-01 21:38:09 -07001232 dio->inode = inode;
Mike Christie8a4c1e42016-06-05 14:31:50 -05001233 if (iov_iter_rw(iter) == WRITE) {
1234 dio->op = REQ_OP_WRITE;
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001235 dio->op_flags = REQ_SYNC | REQ_IDLE;
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -05001236 if (iocb->ki_flags & IOCB_NOWAIT)
1237 dio->op_flags |= REQ_NOWAIT;
Mike Christie8a4c1e42016-06-05 14:31:50 -05001238 } else {
1239 dio->op = REQ_OP_READ;
1240 }
Christoph Hellwig02afc272013-09-04 15:04:40 +02001241
1242 /*
1243 * For AIO O_(D)SYNC writes we need to defer completions to a workqueue
1244 * so that we can call ->fsync.
1245 */
Lukas Czerner332391a2017-09-21 08:16:29 -06001246 if (dio->is_async && iov_iter_rw(iter) == WRITE) {
1247 retval = 0;
1248 if ((iocb->ki_filp->f_flags & O_DSYNC) ||
1249 IS_SYNC(iocb->ki_filp->f_mapping->host))
1250 retval = dio_set_defer_completion(dio);
1251 else if (!dio->inode->i_sb->s_dio_done_wq) {
1252 /*
1253 * In case of AIO write racing with buffered read we
1254 * need to defer completion. We can't decide this now,
1255 * however the workqueue needs to be initialized here.
1256 */
1257 retval = sb_init_dio_done_wq(dio->inode->i_sb);
1258 }
Christoph Hellwig02afc272013-09-04 15:04:40 +02001259 if (retval) {
1260 /*
1261 * We grab i_mutex only for reads so we don't have
1262 * to release it here
1263 */
1264 kmem_cache_free(dio_cache, dio);
1265 goto out;
1266 }
1267 }
1268
1269 /*
1270 * Will be decremented at I/O completion time.
1271 */
Jens Axboefe0f07d2015-04-15 17:05:48 -06001272 if (!(dio->flags & DIO_SKIP_DIO_COUNT))
1273 inode_dio_begin(inode);
Christoph Hellwig02afc272013-09-04 15:04:40 +02001274
1275 retval = 0;
Andi Kleen847cc632011-08-01 21:38:09 -07001276 sdio.blkbits = blkbits;
Linus Torvaldsab738572012-11-29 12:27:00 -08001277 sdio.blkfactor = i_blkbits - blkbits;
Andi Kleen847cc632011-08-01 21:38:09 -07001278 sdio.block_in_file = offset >> blkbits;
1279
1280 sdio.get_block = get_block;
1281 dio->end_io = end_io;
1282 sdio.submit_io = submit_io;
1283 sdio.final_block_in_bio = -1;
1284 sdio.next_block_for_io = -1;
1285
1286 dio->iocb = iocb;
Andi Kleen847cc632011-08-01 21:38:09 -07001287
1288 spin_lock_init(&dio->bio_lock);
1289 dio->refcount = 1;
1290
Ming Lei53cbf3b2015-08-17 10:31:46 +08001291 dio->should_dirty = (iter->type == ITER_IOVEC);
Al Viro7b2c99d2014-03-15 04:05:57 -04001292 sdio.iter = iter;
1293 sdio.final_block_in_request =
1294 (offset + iov_iter_count(iter)) >> blkbits;
1295
Andi Kleen847cc632011-08-01 21:38:09 -07001296 /*
1297 * In case of non-aligned buffers, we may need 2 more
1298 * pages since we need to zero out first and last block.
1299 */
1300 if (unlikely(sdio.blkfactor))
1301 sdio.pages_in_io = 2;
1302
Al Virof67da302014-03-19 01:16:16 -04001303 sdio.pages_in_io += iov_iter_npages(iter, INT_MAX);
Andi Kleen847cc632011-08-01 21:38:09 -07001304
Fengguang Wu647d1e42012-08-09 15:23:09 +02001305 blk_start_plug(&plug);
1306
Al Viro7b2c99d2014-03-15 04:05:57 -04001307 retval = do_direct_IO(dio, &sdio, &map_bh);
1308 if (retval)
1309 dio_cleanup(dio, &sdio);
Andi Kleen847cc632011-08-01 21:38:09 -07001310
1311 if (retval == -ENOTBLK) {
1312 /*
1313 * The remaining part of the request will be
1314 * be handled by buffered I/O when we return
1315 */
1316 retval = 0;
1317 }
1318 /*
1319 * There may be some unwritten disk at the end of a part-written
1320 * fs-block-sized block. Go zero that now.
1321 */
1322 dio_zero_block(dio, &sdio, 1, &map_bh);
1323
1324 if (sdio.cur_page) {
1325 ssize_t ret2;
1326
1327 ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
1328 if (retval == 0)
1329 retval = ret2;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001330 put_page(sdio.cur_page);
Andi Kleen847cc632011-08-01 21:38:09 -07001331 sdio.cur_page = NULL;
1332 }
1333 if (sdio.bio)
1334 dio_bio_submit(dio, &sdio);
1335
Fengguang Wu647d1e42012-08-09 15:23:09 +02001336 blk_finish_plug(&plug);
1337
Andi Kleen847cc632011-08-01 21:38:09 -07001338 /*
1339 * It is possible that, we return short IO due to end of file.
1340 * In that case, we need to release all the pages we got hold on.
1341 */
1342 dio_cleanup(dio, &sdio);
1343
1344 /*
1345 * All block lookups have been performed. For READ requests
1346 * we can let i_mutex go now that its achieved its purpose
1347 * of protecting us from looking up uninitialized blocks.
1348 */
Omar Sandoval17f8c842015-03-16 04:33:50 -07001349 if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
Al Viro59551022016-01-22 15:40:57 -05001350 inode_unlock(dio->inode);
Andi Kleen847cc632011-08-01 21:38:09 -07001351
1352 /*
1353 * The only time we want to leave bios in flight is when a successful
1354 * partial aio read or full aio write have been setup. In that case
1355 * bio completion will call aio_complete. The only time it's safe to
1356 * call aio_complete is when we return -EIOCBQUEUED, so we key on that.
1357 * This had *better* be the only place that raises -EIOCBQUEUED.
1358 */
1359 BUG_ON(retval == -EIOCBQUEUED);
1360 if (dio->is_async && retval == 0 && dio->result &&
Omar Sandoval17f8c842015-03-16 04:33:50 -07001361 (iov_iter_rw(iter) == READ || dio->result == count))
Andi Kleen847cc632011-08-01 21:38:09 -07001362 retval = -EIOCBQUEUED;
Christoph Hellwigaf436472014-07-30 07:18:48 -04001363 else
Andi Kleen847cc632011-08-01 21:38:09 -07001364 dio_await_completion(dio);
1365
1366 if (drop_refcount(dio) == 0) {
Christoph Hellwig716b9bc2016-04-07 08:51:59 -07001367 retval = dio_complete(dio, retval, false);
Andi Kleen847cc632011-08-01 21:38:09 -07001368 } else
1369 BUG_ON(retval != -EIOCBQUEUED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370
npiggin@suse.de7bb46a62010-05-27 01:05:33 +10001371out:
1372 return retval;
1373}
Andi Kleen65dd2aa2012-01-12 17:20:35 -08001374
Omar Sandoval17f8c842015-03-16 04:33:50 -07001375ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1376 struct block_device *bdev, struct iov_iter *iter,
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001377 get_block_t get_block,
Omar Sandoval17f8c842015-03-16 04:33:50 -07001378 dio_iodone_t end_io, dio_submit_t submit_io,
1379 int flags)
Andi Kleen65dd2aa2012-01-12 17:20:35 -08001380{
1381 /*
1382 * The block device state is needed in the end to finally
1383 * submit everything. Since it's likely to be cache cold
1384 * prefetch it here as first thing to hide some of the
1385 * latency.
1386 *
1387 * Attempt to prefetch the pieces we likely need later.
1388 */
1389 prefetch(&bdev->bd_disk->part_tbl);
1390 prefetch(bdev->bd_queue);
1391 prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
1392
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001393 return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block,
Omar Sandoval17f8c842015-03-16 04:33:50 -07001394 end_io, submit_io, flags);
Andi Kleen65dd2aa2012-01-12 17:20:35 -08001395}
1396
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397EXPORT_SYMBOL(__blockdev_direct_IO);
Andi Kleen6e8267f2011-08-01 21:38:06 -07001398
1399static __init int dio_init(void)
1400{
1401 dio_cache = KMEM_CACHE(dio, SLAB_PANIC);
1402 return 0;
1403}
1404module_init(dio_init)