Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * fs/direct-io.c |
| 4 | * |
| 5 | * Copyright (C) 2002, Linus Torvalds. |
| 6 | * |
| 7 | * O_DIRECT |
| 8 | * |
Francois Cami | e1f8e87 | 2008-10-15 22:01:59 -0700 | [diff] [blame] | 9 | * 04Jul2002 Andrew Morton |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * Initial version |
| 11 | * 11Sep2002 janetinc@us.ibm.com |
| 12 | * added readv/writev support. |
Francois Cami | e1f8e87 | 2008-10-15 22:01:59 -0700 | [diff] [blame] | 13 | * 29Oct2002 Andrew Morton |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | * rewrote bio_add_page() support. |
| 15 | * 30Oct2002 pbadari@us.ibm.com |
| 16 | * added support for non-aligned IO. |
| 17 | * 06Nov2002 pbadari@us.ibm.com |
| 18 | * added asynchronous IO support. |
| 19 | * 21Jul2003 nathans@sgi.com |
| 20 | * added IO completion notifier. |
| 21 | */ |
| 22 | |
| 23 | #include <linux/kernel.h> |
| 24 | #include <linux/module.h> |
| 25 | #include <linux/types.h> |
| 26 | #include <linux/fs.h> |
| 27 | #include <linux/mm.h> |
| 28 | #include <linux/slab.h> |
| 29 | #include <linux/highmem.h> |
| 30 | #include <linux/pagemap.h> |
Andrew Morton | 98c4d57 | 2006-12-10 02:19:47 -0800 | [diff] [blame] | 31 | #include <linux/task_io_accounting_ops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include <linux/bio.h> |
| 33 | #include <linux/wait.h> |
| 34 | #include <linux/err.h> |
| 35 | #include <linux/blkdev.h> |
| 36 | #include <linux/buffer_head.h> |
| 37 | #include <linux/rwsem.h> |
| 38 | #include <linux/uio.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 39 | #include <linux/atomic.h> |
Andi Kleen | 65dd2aa | 2012-01-12 17:20:35 -0800 | [diff] [blame] | 40 | #include <linux/prefetch.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Eric Biggers | b16155a | 2020-01-04 12:59:49 -0800 | [diff] [blame] | 42 | #include "internal.h" |
| 43 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | /* |
| 45 | * How many user pages to map in one call to get_user_pages(). This determines |
Andi Kleen | cde1ecb | 2011-08-01 21:38:04 -0700 | [diff] [blame] | 46 | * the size of a structure in the slab cache |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | */ |
| 48 | #define DIO_PAGES 64 |
| 49 | |
| 50 | /* |
Lukas Czerner | ffe51f0 | 2017-10-17 08:43:09 -0600 | [diff] [blame] | 51 | * Flags for dio_complete() |
| 52 | */ |
| 53 | #define DIO_COMPLETE_ASYNC 0x01 /* This is async IO */ |
| 54 | #define DIO_COMPLETE_INVALIDATE 0x02 /* Can invalidate pages */ |
| 55 | |
| 56 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | * This code generally works in units of "dio_blocks". A dio_block is |
| 58 | * somewhere between the hard sector size and the filesystem block size. it |
| 59 | * is determined on a per-invocation basis. When talking to the filesystem |
| 60 | * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity |
| 61 | * down by dio->blkfactor. Similarly, fs-blocksize quantities are converted |
| 62 | * to bio_block quantities by shifting left by blkfactor. |
| 63 | * |
| 64 | * If blkfactor is zero then the user's request was aligned to the filesystem's |
| 65 | * blocksize. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | */ |
| 67 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 68 | /* dio_state only used in the submission path */ |
| 69 | |
| 70 | struct dio_submit { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | struct bio *bio; /* bio under assembly */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | unsigned blkbits; /* doesn't change */ |
| 73 | unsigned blkfactor; /* When we're using an alignment which |
| 74 | is finer than the filesystem's soft |
| 75 | blocksize, this specifies how much |
| 76 | finer. blkfactor=2 means 1/4-block |
| 77 | alignment. Does not change */ |
| 78 | unsigned start_zero_done; /* flag: sub-blocksize zeroing has |
| 79 | been performed at the start of a |
| 80 | write */ |
| 81 | int pages_in_io; /* approximate total IO pages */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | sector_t block_in_file; /* Current offset into the underlying |
| 83 | file in dio_block units. */ |
| 84 | unsigned blocks_available; /* At block_in_file. changes */ |
Andi Kleen | 0dc2bc4 | 2011-08-01 21:38:05 -0700 | [diff] [blame] | 85 | int reap_counter; /* rate limit reaping */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | sector_t final_block_in_request;/* doesn't change */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | int boundary; /* prev block is at a boundary */ |
Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 88 | get_block_t *get_block; /* block mapping function */ |
Josef Bacik | facd07b | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 89 | dio_submit_t *submit_io; /* IO submition function */ |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 90 | |
Josef Bacik | facd07b | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 91 | loff_t logical_offset_in_bio; /* current first logical block in bio */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | sector_t final_block_in_bio; /* current final block in bio + 1 */ |
| 93 | sector_t next_block_for_io; /* next block to be put under IO, |
| 94 | in dio_blocks units */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | |
| 96 | /* |
| 97 | * Deferred addition of a page to the dio. These variables are |
| 98 | * private to dio_send_cur_page(), submit_page_section() and |
| 99 | * dio_bio_add_page(). |
| 100 | */ |
| 101 | struct page *cur_page; /* The page */ |
| 102 | unsigned cur_page_offset; /* Offset into it, in bytes */ |
| 103 | unsigned cur_page_len; /* Nr of bytes at cur_page_offset */ |
| 104 | sector_t cur_page_block; /* Where it starts */ |
Josef Bacik | facd07b | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 105 | loff_t cur_page_fs_offset; /* Offset in file */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 107 | struct iov_iter *iter; |
Jeff Moyer | 23aee09 | 2009-12-15 16:47:49 -0800 | [diff] [blame] | 108 | /* |
| 109 | * Page queue. These variables belong to dio_refill_pages() and |
| 110 | * dio_get_page(). |
| 111 | */ |
| 112 | unsigned head; /* next page to process */ |
| 113 | unsigned tail; /* last valid page + 1 */ |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 114 | size_t from, to; |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 115 | }; |
| 116 | |
| 117 | /* dio_state communicated between submission path and end_io */ |
| 118 | struct dio { |
| 119 | int flags; /* doesn't change */ |
Mike Christie | 8a4c1e4 | 2016-06-05 14:31:50 -0500 | [diff] [blame] | 120 | int op; |
| 121 | int op_flags; |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 122 | struct gendisk *bio_disk; |
Andi Kleen | 0dc2bc4 | 2011-08-01 21:38:05 -0700 | [diff] [blame] | 123 | struct inode *inode; |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 124 | loff_t i_size; /* i_size when submitted */ |
| 125 | dio_iodone_t *end_io; /* IO completion function */ |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 126 | |
Andi Kleen | 1877264 | 2011-08-01 21:38:07 -0700 | [diff] [blame] | 127 | void *private; /* copy from map_bh.b_private */ |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 128 | |
| 129 | /* BIO completion state */ |
| 130 | spinlock_t bio_lock; /* protects BIO fields below */ |
Andi Kleen | 0dc2bc4 | 2011-08-01 21:38:05 -0700 | [diff] [blame] | 131 | int page_errors; /* errno from get_user_pages() */ |
| 132 | int is_async; /* is IO async ? */ |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 133 | bool defer_completion; /* defer AIO completion to workqueue? */ |
Ming Lei | 53cbf3b | 2015-08-17 10:31:46 +0800 | [diff] [blame] | 134 | bool should_dirty; /* if pages should be dirtied */ |
Andi Kleen | 0dc2bc4 | 2011-08-01 21:38:05 -0700 | [diff] [blame] | 135 | int io_error; /* IO error in completion path */ |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 136 | unsigned long refcount; /* direct_io_worker() and bios */ |
| 137 | struct bio *bio_list; /* singly linked via bi_private */ |
| 138 | struct task_struct *waiter; /* waiting task (NULL if none) */ |
| 139 | |
| 140 | /* AIO related stuff */ |
| 141 | struct kiocb *iocb; /* kiocb */ |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 142 | ssize_t result; /* IO result */ |
| 143 | |
Jeff Moyer | 23aee09 | 2009-12-15 16:47:49 -0800 | [diff] [blame] | 144 | /* |
| 145 | * pages[] (and any fields placed after it) are not zeroed out at |
| 146 | * allocation time. Don't add new fields after pages[] unless you |
| 147 | * wish that they not be zeroed. |
| 148 | */ |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 149 | union { |
| 150 | struct page *pages[DIO_PAGES]; /* page buffer */ |
| 151 | struct work_struct complete_work;/* deferred AIO completion */ |
| 152 | }; |
Andi Kleen | 6e8267f | 2011-08-01 21:38:06 -0700 | [diff] [blame] | 153 | } ____cacheline_aligned_in_smp; |
| 154 | |
| 155 | static struct kmem_cache *dio_cache __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | |
| 157 | /* |
| 158 | * How many pages are in the queue? |
| 159 | */ |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 160 | static inline unsigned dio_pages_present(struct dio_submit *sdio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | { |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 162 | return sdio->tail - sdio->head; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | } |
| 164 | |
| 165 | /* |
| 166 | * Go grab and pin some userspace pages. Typically we'll get 64 at a time. |
| 167 | */ |
Andi Kleen | ba253fb | 2011-08-01 21:38:08 -0700 | [diff] [blame] | 168 | static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | { |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 170 | ssize_t ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | |
Miklos Szeredi | 2c80929 | 2014-09-24 17:09:11 +0200 | [diff] [blame] | 172 | ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 173 | &sdio->from); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | |
Mike Christie | 8a4c1e4 | 2016-06-05 14:31:50 -0500 | [diff] [blame] | 175 | if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) { |
Nick Piggin | 557ed1f | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 176 | struct page *page = ZERO_PAGE(0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | /* |
| 178 | * A memory fault, but the filesystem has some outstanding |
| 179 | * mapped blocks. We need to use those blocks up to avoid |
| 180 | * leaking stale data in the file. |
| 181 | */ |
| 182 | if (dio->page_errors == 0) |
| 183 | dio->page_errors = ret; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 184 | get_page(page); |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 185 | dio->pages[0] = page; |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 186 | sdio->head = 0; |
| 187 | sdio->tail = 1; |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 188 | sdio->from = 0; |
| 189 | sdio->to = PAGE_SIZE; |
| 190 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | } |
| 192 | |
| 193 | if (ret >= 0) { |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 194 | iov_iter_advance(sdio->iter, ret); |
| 195 | ret += sdio->from; |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 196 | sdio->head = 0; |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 197 | sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE; |
| 198 | sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1; |
| 199 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | return ret; |
| 202 | } |
| 203 | |
| 204 | /* |
| 205 | * Get another userspace page. Returns an ERR_PTR on error. Pages are |
| 206 | * buffered inside the dio so that we can call get_user_pages() against a |
| 207 | * decent number of pages, less frequently. To provide nicer use of the |
| 208 | * L1 cache. |
| 209 | */ |
Andi Kleen | ba253fb | 2011-08-01 21:38:08 -0700 | [diff] [blame] | 210 | static inline struct page *dio_get_page(struct dio *dio, |
Boaz Harrosh | 6fcc542 | 2014-07-20 12:09:04 +0300 | [diff] [blame] | 211 | struct dio_submit *sdio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | { |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 213 | if (dio_pages_present(sdio) == 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | int ret; |
| 215 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 216 | ret = dio_refill_pages(dio, sdio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | if (ret) |
| 218 | return ERR_PTR(ret); |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 219 | BUG_ON(dio_pages_present(sdio) == 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | } |
Boaz Harrosh | 6fcc542 | 2014-07-20 12:09:04 +0300 | [diff] [blame] | 221 | return dio->pages[sdio->head]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | } |
| 223 | |
Darrick J. Wong | 5a9d929 | 2018-01-08 10:41:39 -0800 | [diff] [blame] | 224 | /* |
Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 225 | * dio_complete() - called when all DIO BIO I/O has been completed |
Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 226 | * |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 227 | * This drops i_dio_count, lets interested parties know that a DIO operation |
| 228 | * has completed, and calculates the resulting return code for the operation. |
Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 229 | * |
| 230 | * It lets the filesystem know if it registered an interest earlier via |
| 231 | * get_block. Pass the private field of the map buffer_head so that |
| 232 | * filesystems can use it to hold additional state between get_block calls and |
| 233 | * dio_complete. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | */ |
Lukas Czerner | ffe51f0 | 2017-10-17 08:43:09 -0600 | [diff] [blame] | 235 | static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | { |
Christoph Hellwig | 716b9bc | 2016-04-07 08:51:59 -0700 | [diff] [blame] | 237 | loff_t offset = dio->iocb->ki_pos; |
Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 238 | ssize_t transferred = 0; |
Lukas Czerner | 332391a | 2017-09-21 08:16:29 -0600 | [diff] [blame] | 239 | int err; |
Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 240 | |
Zach Brown | 8459d86 | 2006-12-10 02:21:05 -0800 | [diff] [blame] | 241 | /* |
| 242 | * AIO submission can race with bio completion to get here while |
| 243 | * expecting to have the last io completed by bio completion. |
| 244 | * In that case -EIOCBQUEUED is in fact not an error we want |
| 245 | * to preserve through this call. |
| 246 | */ |
| 247 | if (ret == -EIOCBQUEUED) |
| 248 | ret = 0; |
| 249 | |
Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 250 | if (dio->result) { |
| 251 | transferred = dio->result; |
| 252 | |
| 253 | /* Check for short read case */ |
Mike Christie | 8a4c1e4 | 2016-06-05 14:31:50 -0500 | [diff] [blame] | 254 | if ((dio->op == REQ_OP_READ) && |
| 255 | ((offset + transferred) > dio->i_size)) |
Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 256 | transferred = dio->i_size - offset; |
Al Viro | 4038acd | 2016-10-03 20:38:55 -0400 | [diff] [blame] | 257 | /* ignore EFAULT if some IO has been done */ |
| 258 | if (unlikely(ret == -EFAULT) && transferred) |
| 259 | ret = 0; |
Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 260 | } |
| 261 | |
Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 262 | if (ret == 0) |
| 263 | ret = dio->page_errors; |
| 264 | if (ret == 0) |
| 265 | ret = dio->io_error; |
| 266 | if (ret == 0) |
| 267 | ret = transferred; |
| 268 | |
Eryu Guan | 5e25c26 | 2017-10-13 09:47:46 -0700 | [diff] [blame] | 269 | if (dio->end_io) { |
| 270 | // XXX: ki_pos?? |
| 271 | err = dio->end_io(dio->iocb, offset, ret, dio->private); |
| 272 | if (err) |
| 273 | ret = err; |
| 274 | } |
| 275 | |
Lukas Czerner | 332391a | 2017-09-21 08:16:29 -0600 | [diff] [blame] | 276 | /* |
| 277 | * Try again to invalidate clean pages which might have been cached by |
| 278 | * non-direct readahead, or faulted in by get_user_pages() if the source |
| 279 | * of the write was an mmap'ed region of the file we're writing. Either |
| 280 | * one is a pretty crazy thing to do, so we don't support it 100%. If |
| 281 | * this invalidation fails, tough, the write still worked... |
Eryu Guan | 5e25c26 | 2017-10-13 09:47:46 -0700 | [diff] [blame] | 282 | * |
| 283 | * And this page cache invalidation has to be after dio->end_io(), as |
| 284 | * some filesystems convert unwritten extents to real allocations in |
| 285 | * end_io() when necessary, otherwise a racing buffer read would cache |
| 286 | * zeros from unwritten extents. |
Lukas Czerner | 332391a | 2017-09-21 08:16:29 -0600 | [diff] [blame] | 287 | */ |
Lukas Czerner | ffe51f0 | 2017-10-17 08:43:09 -0600 | [diff] [blame] | 288 | if (flags & DIO_COMPLETE_INVALIDATE && |
| 289 | ret > 0 && dio->op == REQ_OP_WRITE && |
Lukas Czerner | 332391a | 2017-09-21 08:16:29 -0600 | [diff] [blame] | 290 | dio->inode->i_mapping->nrpages) { |
| 291 | err = invalidate_inode_pages2_range(dio->inode->i_mapping, |
| 292 | offset >> PAGE_SHIFT, |
| 293 | (offset + ret - 1) >> PAGE_SHIFT); |
Darrick J. Wong | 5a9d929 | 2018-01-08 10:41:39 -0800 | [diff] [blame] | 294 | if (err) |
| 295 | dio_warn_stale_pagecache(dio->iocb->ki_filp); |
Lukas Czerner | 332391a | 2017-09-21 08:16:29 -0600 | [diff] [blame] | 296 | } |
| 297 | |
Nikolay Borisov | ce3077e | 2018-02-23 13:45:29 +0200 | [diff] [blame] | 298 | inode_dio_end(dio->inode); |
Jens Axboe | fe0f07d | 2015-04-15 17:05:48 -0600 | [diff] [blame] | 299 | |
Lukas Czerner | ffe51f0 | 2017-10-17 08:43:09 -0600 | [diff] [blame] | 300 | if (flags & DIO_COMPLETE_ASYNC) { |
Christoph Hellwig | e259221 | 2016-04-07 08:52:01 -0700 | [diff] [blame] | 301 | /* |
| 302 | * generic_write_sync expects ki_pos to have been updated |
| 303 | * already, but the submission path only does this for |
| 304 | * synchronous I/O. |
| 305 | */ |
| 306 | dio->iocb->ki_pos += transferred; |
Christoph Hellwig | 02afc27 | 2013-09-04 15:04:40 +0200 | [diff] [blame] | 307 | |
Maximilian Heyne | 41e817b | 2018-11-30 08:35:14 -0700 | [diff] [blame] | 308 | if (ret > 0 && dio->op == REQ_OP_WRITE) |
| 309 | ret = generic_write_sync(dio->iocb, ret); |
Jens Axboe | 6b19b76 | 2021-10-21 09:22:35 -0600 | [diff] [blame] | 310 | dio->iocb->ki_complete(dio->iocb, ret); |
Christoph Hellwig | 02afc27 | 2013-09-04 15:04:40 +0200 | [diff] [blame] | 311 | } |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 312 | |
| 313 | kmem_cache_free(dio_cache, dio); |
Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 314 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | } |
| 316 | |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 317 | static void dio_aio_complete_work(struct work_struct *work) |
| 318 | { |
| 319 | struct dio *dio = container_of(work, struct dio, complete_work); |
| 320 | |
Lukas Czerner | ffe51f0 | 2017-10-17 08:43:09 -0600 | [diff] [blame] | 321 | dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE); |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 322 | } |
| 323 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 324 | static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio); |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 325 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | /* |
| 327 | * Asynchronous IO callback. |
| 328 | */ |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 329 | static void dio_bio_end_aio(struct bio *bio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | { |
| 331 | struct dio *dio = bio->bi_private; |
Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 332 | unsigned long remaining; |
| 333 | unsigned long flags; |
Lukas Czerner | 332391a | 2017-09-21 08:16:29 -0600 | [diff] [blame] | 334 | bool defer_completion = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | /* cleanup the bio */ |
| 337 | dio_bio_complete(dio, bio); |
Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 338 | |
Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 339 | spin_lock_irqsave(&dio->bio_lock, flags); |
| 340 | remaining = --dio->refcount; |
| 341 | if (remaining == 1 && dio->waiter) |
Zach Brown | 20258b2b | 2006-12-10 02:21:01 -0800 | [diff] [blame] | 342 | wake_up_process(dio->waiter); |
Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 343 | spin_unlock_irqrestore(&dio->bio_lock, flags); |
Zach Brown | 20258b2b | 2006-12-10 02:21:01 -0800 | [diff] [blame] | 344 | |
Zach Brown | 8459d86 | 2006-12-10 02:21:05 -0800 | [diff] [blame] | 345 | if (remaining == 0) { |
Lukas Czerner | 332391a | 2017-09-21 08:16:29 -0600 | [diff] [blame] | 346 | /* |
| 347 | * Defer completion when defer_completion is set or |
| 348 | * when the inode has pages mapped and this is AIO write. |
| 349 | * We need to invalidate those pages because there is a |
| 350 | * chance they contain stale data in the case buffered IO |
| 351 | * went in between AIO submission and completion into the |
| 352 | * same region. |
| 353 | */ |
| 354 | if (dio->result) |
| 355 | defer_completion = dio->defer_completion || |
| 356 | (dio->op == REQ_OP_WRITE && |
| 357 | dio->inode->i_mapping->nrpages); |
| 358 | if (defer_completion) { |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 359 | INIT_WORK(&dio->complete_work, dio_aio_complete_work); |
| 360 | queue_work(dio->inode->i_sb->s_dio_done_wq, |
| 361 | &dio->complete_work); |
| 362 | } else { |
Lukas Czerner | ffe51f0 | 2017-10-17 08:43:09 -0600 | [diff] [blame] | 363 | dio_complete(dio, 0, DIO_COMPLETE_ASYNC); |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 364 | } |
Zach Brown | 8459d86 | 2006-12-10 02:21:05 -0800 | [diff] [blame] | 365 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | } |
| 367 | |
| 368 | /* |
| 369 | * The BIO completion handler simply queues the BIO up for the process-context |
| 370 | * handler. |
| 371 | * |
| 372 | * During I/O bi_private points at the dio. After I/O, bi_private is used to |
| 373 | * implement a singly-linked list of completed BIOs, at dio->bio_list. |
| 374 | */ |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 375 | static void dio_bio_end_io(struct bio *bio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | { |
| 377 | struct dio *dio = bio->bi_private; |
| 378 | unsigned long flags; |
| 379 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | spin_lock_irqsave(&dio->bio_lock, flags); |
| 381 | bio->bi_private = dio->bio_list; |
| 382 | dio->bio_list = bio; |
Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 383 | if (--dio->refcount == 1 && dio->waiter) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | wake_up_process(dio->waiter); |
| 385 | spin_unlock_irqrestore(&dio->bio_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | } |
| 387 | |
Andi Kleen | ba253fb | 2011-08-01 21:38:08 -0700 | [diff] [blame] | 388 | static inline void |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 389 | dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, |
| 390 | struct block_device *bdev, |
| 391 | sector_t first_sector, int nr_vecs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | { |
| 393 | struct bio *bio; |
| 394 | |
David Dillow | 20d9600 | 2011-01-20 14:44:22 -0800 | [diff] [blame] | 395 | /* |
Christoph Hellwig | 0eb0b63 | 2018-05-09 09:54:08 +0200 | [diff] [blame] | 396 | * bio_alloc() is guaranteed to return a bio when allowed to sleep and |
| 397 | * we request a valid number of vectors. |
David Dillow | 20d9600 | 2011-01-20 14:44:22 -0800 | [diff] [blame] | 398 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | bio = bio_alloc(GFP_KERNEL, nr_vecs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 401 | bio_set_dev(bio, bdev); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 402 | bio->bi_iter.bi_sector = first_sector; |
Mike Christie | 8a4c1e4 | 2016-06-05 14:31:50 -0500 | [diff] [blame] | 403 | bio_set_op_attrs(bio, dio->op, dio->op_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | if (dio->is_async) |
| 405 | bio->bi_end_io = dio_bio_end_aio; |
| 406 | else |
| 407 | bio->bi_end_io = dio_bio_end_io; |
| 408 | |
Jens Axboe | 45d06cf | 2017-06-27 11:01:22 -0600 | [diff] [blame] | 409 | bio->bi_write_hint = dio->iocb->ki_hint; |
| 410 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 411 | sdio->bio = bio; |
| 412 | sdio->logical_offset_in_bio = sdio->cur_page_fs_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | } |
| 414 | |
| 415 | /* |
| 416 | * In the AIO read case we speculatively dirty the pages before starting IO. |
| 417 | * During IO completion, any of these pages which happen to have been written |
| 418 | * back will be redirtied by bio_check_pages_dirty(). |
Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 419 | * |
| 420 | * bios hold a dio reference between submit_bio and ->end_io. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | */ |
Andi Kleen | ba253fb | 2011-08-01 21:38:08 -0700 | [diff] [blame] | 422 | static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | { |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 424 | struct bio *bio = sdio->bio; |
Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 425 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | |
| 427 | bio->bi_private = dio; |
Pavel Begunkov | 0cf41e5 | 2021-01-09 16:02:59 +0000 | [diff] [blame] | 428 | /* don't account direct I/O as memory stall */ |
| 429 | bio_clear_flag(bio, BIO_WORKINGSET); |
Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 430 | |
| 431 | spin_lock_irqsave(&dio->bio_lock, flags); |
| 432 | dio->refcount++; |
| 433 | spin_unlock_irqrestore(&dio->bio_lock, flags); |
| 434 | |
Mike Christie | 8a4c1e4 | 2016-06-05 14:31:50 -0500 | [diff] [blame] | 435 | if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | bio_set_pages_dirty(bio); |
Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 437 | |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 438 | dio->bio_disk = bio->bi_bdev->bd_disk; |
Jens Axboe | c1c5346 | 2015-11-10 10:14:38 -0700 | [diff] [blame] | 439 | |
Christoph Hellwig | 94c2ed5 | 2021-10-12 13:12:11 +0200 | [diff] [blame] | 440 | if (sdio->submit_io) |
Mike Christie | 8a4c1e4 | 2016-06-05 14:31:50 -0500 | [diff] [blame] | 441 | sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); |
Christoph Hellwig | 94c2ed5 | 2021-10-12 13:12:11 +0200 | [diff] [blame] | 442 | else |
| 443 | submit_bio(bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 445 | sdio->bio = NULL; |
| 446 | sdio->boundary = 0; |
| 447 | sdio->logical_offset_in_bio = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | } |
| 449 | |
| 450 | /* |
| 451 | * Release any resources in case of a failure |
| 452 | */ |
Andi Kleen | ba253fb | 2011-08-01 21:38:08 -0700 | [diff] [blame] | 453 | static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | { |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 455 | while (sdio->head < sdio->tail) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 456 | put_page(dio->pages[sdio->head++]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | } |
| 458 | |
| 459 | /* |
Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 460 | * Wait for the next BIO to complete. Remove it and return it. NULL is |
| 461 | * returned once all BIOs have been completed. This must only be called once |
| 462 | * all bios have been issued so that dio->refcount can only decrease. This |
Randy Dunlap | 3d742d4 | 2021-02-24 12:00:48 -0800 | [diff] [blame] | 463 | * requires that the caller hold a reference on the dio. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | */ |
| 465 | static struct bio *dio_await_one(struct dio *dio) |
| 466 | { |
| 467 | unsigned long flags; |
Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 468 | struct bio *bio = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | |
| 470 | spin_lock_irqsave(&dio->bio_lock, flags); |
Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 471 | |
| 472 | /* |
| 473 | * Wait as long as the list is empty and there are bios in flight. bio |
| 474 | * completion drops the count, maybe adds to the list, and wakes while |
| 475 | * holding the bio_lock so we don't need set_current_state()'s barrier |
| 476 | * and can call it after testing our condition. |
| 477 | */ |
| 478 | while (dio->refcount > 1 && dio->bio_list == NULL) { |
| 479 | __set_current_state(TASK_UNINTERRUPTIBLE); |
| 480 | dio->waiter = current; |
| 481 | spin_unlock_irqrestore(&dio->bio_lock, flags); |
Christoph Hellwig | 94c2ed5 | 2021-10-12 13:12:11 +0200 | [diff] [blame] | 482 | blk_io_schedule(); |
Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 483 | /* wake up sets us TASK_RUNNING */ |
| 484 | spin_lock_irqsave(&dio->bio_lock, flags); |
| 485 | dio->waiter = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | } |
Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 487 | if (dio->bio_list) { |
| 488 | bio = dio->bio_list; |
| 489 | dio->bio_list = bio->bi_private; |
| 490 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | spin_unlock_irqrestore(&dio->bio_lock, flags); |
| 492 | return bio; |
| 493 | } |
| 494 | |
| 495 | /* |
| 496 | * Process one completed BIO. No locks are held. |
| 497 | */ |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 498 | static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 500 | blk_status_t err = bio->bi_status; |
Christoph Hellwig | d7c8aa8 | 2019-06-26 15:49:27 +0200 | [diff] [blame] | 501 | bool should_dirty = dio->op == REQ_OP_READ && dio->should_dirty; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 502 | |
Goldwyn Rodrigues | 03a07c9 | 2017-06-20 07:05:46 -0500 | [diff] [blame] | 503 | if (err) { |
| 504 | if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT)) |
| 505 | dio->io_error = -EAGAIN; |
| 506 | else |
| 507 | dio->io_error = -EIO; |
| 508 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | |
Christoph Hellwig | d7c8aa8 | 2019-06-26 15:49:27 +0200 | [diff] [blame] | 510 | if (dio->is_async && should_dirty) { |
Mike Krinkin | 7ddc971 | 2016-01-30 19:09:59 +0300 | [diff] [blame] | 511 | bio_check_pages_dirty(bio); /* transfers ownership */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | } else { |
Christoph Hellwig | d7c8aa8 | 2019-06-26 15:49:27 +0200 | [diff] [blame] | 513 | bio_release_pages(bio, should_dirty); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | bio_put(bio); |
| 515 | } |
Sasha Levin | 9b81c84 | 2015-08-10 19:05:18 -0400 | [diff] [blame] | 516 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | } |
| 518 | |
| 519 | /* |
Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 520 | * Wait on and process all in-flight BIOs. This must only be called once |
| 521 | * all bios have been issued so that the refcount can only decrease. |
| 522 | * This just waits for all bios to make it through dio_bio_complete. IO |
Robert P. J. Day | beb7dd8 | 2007-05-09 07:14:03 +0200 | [diff] [blame] | 523 | * errors are propagated through dio->io_error and should be propagated via |
Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 524 | * dio_complete(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | */ |
Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 526 | static void dio_await_completion(struct dio *dio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | { |
Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 528 | struct bio *bio; |
| 529 | do { |
| 530 | bio = dio_await_one(dio); |
| 531 | if (bio) |
| 532 | dio_bio_complete(dio, bio); |
| 533 | } while (bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | } |
| 535 | |
| 536 | /* |
| 537 | * A really large O_DIRECT read or write can generate a lot of BIOs. So |
| 538 | * to keep the memory consumption sane we periodically reap any completed BIOs |
| 539 | * during the BIO generation phase. |
| 540 | * |
| 541 | * This also helps to limit the peak amount of pinned userspace memory. |
| 542 | */ |
Andi Kleen | ba253fb | 2011-08-01 21:38:08 -0700 | [diff] [blame] | 543 | static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 | { |
| 545 | int ret = 0; |
| 546 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 547 | if (sdio->reap_counter++ >= 64) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | while (dio->bio_list) { |
| 549 | unsigned long flags; |
| 550 | struct bio *bio; |
| 551 | int ret2; |
| 552 | |
| 553 | spin_lock_irqsave(&dio->bio_lock, flags); |
| 554 | bio = dio->bio_list; |
| 555 | dio->bio_list = bio->bi_private; |
| 556 | spin_unlock_irqrestore(&dio->bio_lock, flags); |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 557 | ret2 = blk_status_to_errno(dio_bio_complete(dio, bio)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 558 | if (ret == 0) |
| 559 | ret = ret2; |
| 560 | } |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 561 | sdio->reap_counter = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | } |
| 563 | return ret; |
| 564 | } |
| 565 | |
| 566 | /* |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 567 | * Create workqueue for deferred direct IO completions. We allocate the |
| 568 | * workqueue when it's first needed. This avoids creating workqueue for |
| 569 | * filesystems that don't need it and also allows us to create the workqueue |
| 570 | * late enough so the we can include s_id in the name of the workqueue. |
| 571 | */ |
Christoph Hellwig | ec1b826 | 2016-11-30 14:33:53 +1100 | [diff] [blame] | 572 | int sb_init_dio_done_wq(struct super_block *sb) |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 573 | { |
Olof Johansson | 45150c4 | 2013-09-09 10:34:23 -0700 | [diff] [blame] | 574 | struct workqueue_struct *old; |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 575 | struct workqueue_struct *wq = alloc_workqueue("dio/%s", |
| 576 | WQ_MEM_RECLAIM, 0, |
| 577 | sb->s_id); |
| 578 | if (!wq) |
| 579 | return -ENOMEM; |
| 580 | /* |
| 581 | * This has to be atomic as more DIOs can race to create the workqueue |
| 582 | */ |
Olof Johansson | 45150c4 | 2013-09-09 10:34:23 -0700 | [diff] [blame] | 583 | old = cmpxchg(&sb->s_dio_done_wq, NULL, wq); |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 584 | /* Someone created workqueue before us? Free ours... */ |
Olof Johansson | 45150c4 | 2013-09-09 10:34:23 -0700 | [diff] [blame] | 585 | if (old) |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 586 | destroy_workqueue(wq); |
| 587 | return 0; |
| 588 | } |
| 589 | |
| 590 | static int dio_set_defer_completion(struct dio *dio) |
| 591 | { |
| 592 | struct super_block *sb = dio->inode->i_sb; |
| 593 | |
| 594 | if (dio->defer_completion) |
| 595 | return 0; |
| 596 | dio->defer_completion = true; |
| 597 | if (!sb->s_dio_done_wq) |
| 598 | return sb_init_dio_done_wq(sb); |
| 599 | return 0; |
| 600 | } |
| 601 | |
| 602 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | * Call into the fs to map some more disk blocks. We record the current number |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 604 | * of available blocks at sdio->blocks_available. These are in units of the |
Fabian Frederick | 9340747 | 2017-02-27 14:28:32 -0800 | [diff] [blame] | 605 | * fs blocksize, i_blocksize(inode). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | * |
| 607 | * The fs is allowed to map lots of blocks at once. If it wants to do that, |
| 608 | * it uses the passed inode-relative block number as the file offset, as usual. |
| 609 | * |
Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 610 | * get_block() is passed the number of i_blkbits-sized blocks which direct_io |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | * has remaining to do. The fs should not map more than this number of blocks. |
| 612 | * |
| 613 | * If the fs has mapped a lot of blocks, it should populate bh->b_size to |
| 614 | * indicate how much contiguous disk space has been made available at |
| 615 | * bh->b_blocknr. |
| 616 | * |
| 617 | * If *any* of the mapped blocks are new, then the fs must set buffer_new(). |
| 618 | * This isn't very efficient... |
| 619 | * |
| 620 | * In the case of filesystem holes: the fs may return an arbitrarily-large |
| 621 | * hole by returning an appropriate value in b_size and by clearing |
| 622 | * buffer_mapped(). However the direct-io code will only process holes one |
Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 623 | * block at a time - it will repeatedly call get_block() as it walks the hole. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | */ |
Andi Kleen | 1877264 | 2011-08-01 21:38:07 -0700 | [diff] [blame] | 625 | static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, |
| 626 | struct buffer_head *map_bh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | { |
| 628 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | sector_t fs_startblk; /* Into file, in filesystem-sized blocks */ |
Tao Ma | ae55e1a | 2012-01-12 17:20:33 -0800 | [diff] [blame] | 630 | sector_t fs_endblk; /* Into file, in filesystem-sized blocks */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | unsigned long fs_count; /* Number of filesystem-sized blocks */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 632 | int create; |
Linus Torvalds | ab73857 | 2012-11-29 12:27:00 -0800 | [diff] [blame] | 633 | unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor; |
Ernesto A. Fernández | 8b9433e | 2018-10-08 20:58:23 -0300 | [diff] [blame] | 634 | loff_t i_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | |
| 636 | /* |
| 637 | * If there was a memory error and we've overwritten all the |
| 638 | * mapped blocks then we can now return that memory error |
| 639 | */ |
| 640 | ret = dio->page_errors; |
| 641 | if (ret == 0) { |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 642 | BUG_ON(sdio->block_in_file >= sdio->final_block_in_request); |
| 643 | fs_startblk = sdio->block_in_file >> sdio->blkfactor; |
Tao Ma | ae55e1a | 2012-01-12 17:20:33 -0800 | [diff] [blame] | 644 | fs_endblk = (sdio->final_block_in_request - 1) >> |
| 645 | sdio->blkfactor; |
| 646 | fs_count = fs_endblk - fs_startblk + 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | |
Nathan Scott | 3c674e7 | 2006-03-29 09:26:15 +1000 | [diff] [blame] | 648 | map_bh->b_state = 0; |
Linus Torvalds | ab73857 | 2012-11-29 12:27:00 -0800 | [diff] [blame] | 649 | map_bh->b_size = fs_count << i_blkbits; |
Nathan Scott | 3c674e7 | 2006-03-29 09:26:15 +1000 | [diff] [blame] | 650 | |
Christoph Hellwig | 5fe878ae | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 651 | /* |
Eryu Guan | 9ecd10b | 2016-05-27 14:27:18 -0700 | [diff] [blame] | 652 | * For writes that could fill holes inside i_size on a |
| 653 | * DIO_SKIP_HOLES filesystem we forbid block creations: only |
| 654 | * overwrites are permitted. We will return early to the caller |
| 655 | * once we see an unmapped buffer head returned, and the caller |
| 656 | * will fall back to buffered I/O. |
Christoph Hellwig | 5fe878ae | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 657 | * |
| 658 | * Otherwise the decision is left to the get_blocks method, |
| 659 | * which may decide to handle it or also return an unmapped |
| 660 | * buffer head. |
| 661 | */ |
Mike Christie | 8a4c1e4 | 2016-06-05 14:31:50 -0500 | [diff] [blame] | 662 | create = dio->op == REQ_OP_WRITE; |
Christoph Hellwig | 5fe878ae | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 663 | if (dio->flags & DIO_SKIP_HOLES) { |
Ernesto A. Fernández | 8b9433e | 2018-10-08 20:58:23 -0300 | [diff] [blame] | 664 | i_size = i_size_read(dio->inode); |
| 665 | if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | create = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | } |
Nathan Scott | 3c674e7 | 2006-03-29 09:26:15 +1000 | [diff] [blame] | 668 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 669 | ret = (*sdio->get_block)(dio->inode, fs_startblk, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 670 | map_bh, create); |
Andi Kleen | 1877264 | 2011-08-01 21:38:07 -0700 | [diff] [blame] | 671 | |
| 672 | /* Store for completion */ |
| 673 | dio->private = map_bh->b_private; |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 674 | |
| 675 | if (ret == 0 && buffer_defer_completion(map_bh)) |
| 676 | ret = dio_set_defer_completion(dio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | } |
| 678 | return ret; |
| 679 | } |
| 680 | |
| 681 | /* |
| 682 | * There is no bio. Make one now. |
| 683 | */ |
Andi Kleen | ba253fb | 2011-08-01 21:38:08 -0700 | [diff] [blame] | 684 | static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio, |
| 685 | sector_t start_sector, struct buffer_head *map_bh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 | { |
| 687 | sector_t sector; |
| 688 | int ret, nr_pages; |
| 689 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 690 | ret = dio_bio_reap(dio, sdio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | if (ret) |
| 692 | goto out; |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 693 | sector = start_sector << (sdio->blkbits - 9); |
Matthew Wilcox (Oracle) | 5f7136d | 2021-01-29 04:38:57 +0000 | [diff] [blame] | 694 | nr_pages = bio_max_segs(sdio->pages_in_io); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | BUG_ON(nr_pages <= 0); |
Andi Kleen | 1877264 | 2011-08-01 21:38:07 -0700 | [diff] [blame] | 696 | dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 697 | sdio->boundary = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | out: |
| 699 | return ret; |
| 700 | } |
| 701 | |
| 702 | /* |
| 703 | * Attempt to put the current chunk of 'cur_page' into the current BIO. If |
| 704 | * that was successful then update final_block_in_bio and take a ref against |
| 705 | * the just-added page. |
| 706 | * |
| 707 | * Return zero on success. Non-zero means the caller needs to start a new BIO. |
| 708 | */ |
Andi Kleen | ba253fb | 2011-08-01 21:38:08 -0700 | [diff] [blame] | 709 | static inline int dio_bio_add_page(struct dio_submit *sdio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | { |
| 711 | int ret; |
| 712 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 713 | ret = bio_add_page(sdio->bio, sdio->cur_page, |
| 714 | sdio->cur_page_len, sdio->cur_page_offset); |
| 715 | if (ret == sdio->cur_page_len) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | /* |
| 717 | * Decrement count only, if we are done with this page |
| 718 | */ |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 719 | if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE) |
| 720 | sdio->pages_in_io--; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 721 | get_page(sdio->cur_page); |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 722 | sdio->final_block_in_bio = sdio->cur_page_block + |
| 723 | (sdio->cur_page_len >> sdio->blkbits); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | ret = 0; |
| 725 | } else { |
| 726 | ret = 1; |
| 727 | } |
| 728 | return ret; |
| 729 | } |
| 730 | |
| 731 | /* |
| 732 | * Put cur_page under IO. The section of cur_page which is described by |
| 733 | * cur_page_offset,cur_page_len is put into a BIO. The section of cur_page |
| 734 | * starts on-disk at cur_page_block. |
| 735 | * |
| 736 | * We take a ref against the page here (on behalf of its presence in the bio). |
| 737 | * |
| 738 | * The caller of this function is responsible for removing cur_page from the |
| 739 | * dio, and for dropping the refcount which came from that presence. |
| 740 | */ |
Andi Kleen | ba253fb | 2011-08-01 21:38:08 -0700 | [diff] [blame] | 741 | static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio, |
| 742 | struct buffer_head *map_bh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | { |
| 744 | int ret = 0; |
| 745 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 746 | if (sdio->bio) { |
| 747 | loff_t cur_offset = sdio->cur_page_fs_offset; |
| 748 | loff_t bio_next_offset = sdio->logical_offset_in_bio + |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 749 | sdio->bio->bi_iter.bi_size; |
Josef Bacik | c2c6ca4 | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 750 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 751 | /* |
Josef Bacik | c2c6ca4 | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 752 | * See whether this new request is contiguous with the old. |
| 753 | * |
Namhyung Kim | f0940ce | 2011-01-11 21:15:03 +0900 | [diff] [blame] | 754 | * Btrfs cannot handle having logically non-contiguous requests |
| 755 | * submitted. For example if you have |
Josef Bacik | c2c6ca4 | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 756 | * |
| 757 | * Logical: [0-4095][HOLE][8192-12287] |
Namhyung Kim | f0940ce | 2011-01-11 21:15:03 +0900 | [diff] [blame] | 758 | * Physical: [0-4095] [4096-8191] |
Josef Bacik | c2c6ca4 | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 759 | * |
| 760 | * We cannot submit those pages together as one BIO. So if our |
| 761 | * current logical offset in the file does not equal what would |
| 762 | * be the next logical offset in the bio, submit the bio we |
| 763 | * have. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | */ |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 765 | if (sdio->final_block_in_bio != sdio->cur_page_block || |
Josef Bacik | c2c6ca4 | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 766 | cur_offset != bio_next_offset) |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 767 | dio_bio_submit(dio, sdio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 768 | } |
| 769 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 770 | if (sdio->bio == NULL) { |
Andi Kleen | 1877264 | 2011-08-01 21:38:07 -0700 | [diff] [blame] | 771 | ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 772 | if (ret) |
| 773 | goto out; |
| 774 | } |
| 775 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 776 | if (dio_bio_add_page(sdio) != 0) { |
| 777 | dio_bio_submit(dio, sdio); |
Andi Kleen | 1877264 | 2011-08-01 21:38:07 -0700 | [diff] [blame] | 778 | ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 779 | if (ret == 0) { |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 780 | ret = dio_bio_add_page(sdio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | BUG_ON(ret != 0); |
| 782 | } |
| 783 | } |
| 784 | out: |
| 785 | return ret; |
| 786 | } |
| 787 | |
| 788 | /* |
| 789 | * An autonomous function to put a chunk of a page under deferred IO. |
| 790 | * |
| 791 | * The caller doesn't actually know (or care) whether this piece of page is in |
| 792 | * a BIO, or is under IO or whatever. We just take care of all possible |
| 793 | * situations here. The separation between the logic of do_direct_IO() and |
| 794 | * that of submit_page_section() is important for clarity. Please don't break. |
| 795 | * |
| 796 | * The chunk of page starts on-disk at blocknr. |
| 797 | * |
| 798 | * We perform deferred IO, by recording the last-submitted page inside our |
| 799 | * private part of the dio structure. If possible, we just expand the IO |
| 800 | * across that page here. |
| 801 | * |
| 802 | * If that doesn't work out then we put the old page into the bio and add this |
| 803 | * page to the dio instead. |
| 804 | */ |
Andi Kleen | ba253fb | 2011-08-01 21:38:08 -0700 | [diff] [blame] | 805 | static inline int |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 806 | submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, |
Andi Kleen | 1877264 | 2011-08-01 21:38:07 -0700 | [diff] [blame] | 807 | unsigned offset, unsigned len, sector_t blocknr, |
| 808 | struct buffer_head *map_bh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 809 | { |
| 810 | int ret = 0; |
Jack Qiu | df41872 | 2021-04-09 13:27:35 -0700 | [diff] [blame] | 811 | int boundary = sdio->boundary; /* dio_send_cur_page may clear it */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | |
Mike Christie | 8a4c1e4 | 2016-06-05 14:31:50 -0500 | [diff] [blame] | 813 | if (dio->op == REQ_OP_WRITE) { |
Andrew Morton | 98c4d57 | 2006-12-10 02:19:47 -0800 | [diff] [blame] | 814 | /* |
| 815 | * Read accounting is performed in submit_bio() |
| 816 | */ |
| 817 | task_io_account_write(len); |
| 818 | } |
| 819 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 820 | /* |
| 821 | * Can we just grow the current page's presence in the dio? |
| 822 | */ |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 823 | if (sdio->cur_page == page && |
| 824 | sdio->cur_page_offset + sdio->cur_page_len == offset && |
| 825 | sdio->cur_page_block + |
| 826 | (sdio->cur_page_len >> sdio->blkbits) == blocknr) { |
| 827 | sdio->cur_page_len += len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 828 | goto out; |
| 829 | } |
| 830 | |
| 831 | /* |
| 832 | * If there's a deferred page already there then send it. |
| 833 | */ |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 834 | if (sdio->cur_page) { |
Andi Kleen | 1877264 | 2011-08-01 21:38:07 -0700 | [diff] [blame] | 835 | ret = dio_send_cur_page(dio, sdio, map_bh); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 836 | put_page(sdio->cur_page); |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 837 | sdio->cur_page = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | if (ret) |
Jan Kara | b1058b9 | 2013-04-29 15:06:18 -0700 | [diff] [blame] | 839 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 840 | } |
| 841 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 842 | get_page(page); /* It is in dio */ |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 843 | sdio->cur_page = page; |
| 844 | sdio->cur_page_offset = offset; |
| 845 | sdio->cur_page_len = len; |
| 846 | sdio->cur_page_block = blocknr; |
| 847 | sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | out: |
Jan Kara | b1058b9 | 2013-04-29 15:06:18 -0700 | [diff] [blame] | 849 | /* |
Jack Qiu | df41872 | 2021-04-09 13:27:35 -0700 | [diff] [blame] | 850 | * If boundary then we want to schedule the IO now to |
Jan Kara | b1058b9 | 2013-04-29 15:06:18 -0700 | [diff] [blame] | 851 | * avoid metadata seeks. |
| 852 | */ |
Jack Qiu | df41872 | 2021-04-09 13:27:35 -0700 | [diff] [blame] | 853 | if (boundary) { |
Jan Kara | b1058b9 | 2013-04-29 15:06:18 -0700 | [diff] [blame] | 854 | ret = dio_send_cur_page(dio, sdio, map_bh); |
Andreas Gruenbacher | 899f042 | 2017-10-09 11:13:18 +0200 | [diff] [blame] | 855 | if (sdio->bio) |
| 856 | dio_bio_submit(dio, sdio); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 857 | put_page(sdio->cur_page); |
Jan Kara | b1058b9 | 2013-04-29 15:06:18 -0700 | [diff] [blame] | 858 | sdio->cur_page = NULL; |
| 859 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | return ret; |
| 861 | } |
| 862 | |
| 863 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 864 | * If we are not writing the entire block and get_block() allocated |
| 865 | * the block for us, we need to fill-in the unused portion of the |
| 866 | * block with zeros. This happens only if user-buffer, fileoffset or |
| 867 | * io length is not filesystem block-size multiple. |
| 868 | * |
| 869 | * `end' is zero if we're doing the start of the IO, 1 at the end of the |
| 870 | * IO. |
| 871 | */ |
Andi Kleen | ba253fb | 2011-08-01 21:38:08 -0700 | [diff] [blame] | 872 | static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio, |
| 873 | int end, struct buffer_head *map_bh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | { |
| 875 | unsigned dio_blocks_per_fs_block; |
| 876 | unsigned this_chunk_blocks; /* In dio_blocks */ |
| 877 | unsigned this_chunk_bytes; |
| 878 | struct page *page; |
| 879 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 880 | sdio->start_zero_done = 1; |
Andi Kleen | 1877264 | 2011-08-01 21:38:07 -0700 | [diff] [blame] | 881 | if (!sdio->blkfactor || !buffer_new(map_bh)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | return; |
| 883 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 884 | dio_blocks_per_fs_block = 1 << sdio->blkfactor; |
| 885 | this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 886 | |
| 887 | if (!this_chunk_blocks) |
| 888 | return; |
| 889 | |
| 890 | /* |
| 891 | * We need to zero out part of an fs block. It is either at the |
| 892 | * beginning or the end of the fs block. |
| 893 | */ |
| 894 | if (end) |
| 895 | this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks; |
| 896 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 897 | this_chunk_bytes = this_chunk_blocks << sdio->blkbits; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 898 | |
Nick Piggin | 557ed1f | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 899 | page = ZERO_PAGE(0); |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 900 | if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes, |
Andi Kleen | 1877264 | 2011-08-01 21:38:07 -0700 | [diff] [blame] | 901 | sdio->next_block_for_io, map_bh)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 902 | return; |
| 903 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 904 | sdio->next_block_for_io += this_chunk_blocks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 905 | } |
| 906 | |
| 907 | /* |
| 908 | * Walk the user pages, and the file, mapping blocks to disk and generating |
| 909 | * a sequence of (page,offset,len,block) mappings. These mappings are injected |
| 910 | * into submit_page_section(), which takes care of the next stage of submission |
| 911 | * |
| 912 | * Direct IO against a blockdev is different from a file. Because we can |
| 913 | * happily perform page-sized but 512-byte aligned IOs. It is important that |
| 914 | * blockdev IO be able to have fine alignment and large sizes. |
| 915 | * |
Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 916 | * So what we do is to permit the ->get_block function to populate bh.b_size |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 917 | * with the size of IO which is permitted at this offset and this i_blkbits. |
| 918 | * |
| 919 | * For best results, the blockdev should be set up with 512-byte i_blkbits and |
Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 920 | * it should set b_size to PAGE_SIZE or more inside get_block(). This gives |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 | * fine alignment but still allows this function to work in PAGE_SIZE units. |
| 922 | */ |
Andi Kleen | 1877264 | 2011-08-01 21:38:07 -0700 | [diff] [blame] | 923 | static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, |
| 924 | struct buffer_head *map_bh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 925 | { |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 926 | const unsigned blkbits = sdio->blkbits; |
Chandan Rajendra | dd545b5 | 2017-01-10 13:29:54 -0700 | [diff] [blame] | 927 | const unsigned i_blkbits = blkbits + sdio->blkfactor; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 928 | int ret = 0; |
| 929 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 930 | while (sdio->block_in_file < sdio->final_block_in_request) { |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 931 | struct page *page; |
| 932 | size_t from, to; |
Boaz Harrosh | 6fcc542 | 2014-07-20 12:09:04 +0300 | [diff] [blame] | 933 | |
| 934 | page = dio_get_page(dio, sdio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 | if (IS_ERR(page)) { |
| 936 | ret = PTR_ERR(page); |
| 937 | goto out; |
| 938 | } |
Boaz Harrosh | 6fcc542 | 2014-07-20 12:09:04 +0300 | [diff] [blame] | 939 | from = sdio->head ? 0 : sdio->from; |
| 940 | to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE; |
| 941 | sdio->head++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 942 | |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 943 | while (from < to) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | unsigned this_chunk_bytes; /* # of bytes mapped */ |
| 945 | unsigned this_chunk_blocks; /* # of blocks */ |
| 946 | unsigned u; |
| 947 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 948 | if (sdio->blocks_available == 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 949 | /* |
| 950 | * Need to go and map some more disk |
| 951 | */ |
| 952 | unsigned long blkmask; |
| 953 | unsigned long dio_remainder; |
| 954 | |
Andi Kleen | 1877264 | 2011-08-01 21:38:07 -0700 | [diff] [blame] | 955 | ret = get_more_blocks(dio, sdio, map_bh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 956 | if (ret) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 957 | put_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 958 | goto out; |
| 959 | } |
| 960 | if (!buffer_mapped(map_bh)) |
| 961 | goto do_holes; |
| 962 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 963 | sdio->blocks_available = |
Jan Kara | f734c89 | 2016-11-04 18:08:12 +0100 | [diff] [blame] | 964 | map_bh->b_size >> blkbits; |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 965 | sdio->next_block_for_io = |
| 966 | map_bh->b_blocknr << sdio->blkfactor; |
Jan Kara | f734c89 | 2016-11-04 18:08:12 +0100 | [diff] [blame] | 967 | if (buffer_new(map_bh)) { |
| 968 | clean_bdev_aliases( |
| 969 | map_bh->b_bdev, |
| 970 | map_bh->b_blocknr, |
Chandan Rajendra | dd545b5 | 2017-01-10 13:29:54 -0700 | [diff] [blame] | 971 | map_bh->b_size >> i_blkbits); |
Jan Kara | f734c89 | 2016-11-04 18:08:12 +0100 | [diff] [blame] | 972 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 974 | if (!sdio->blkfactor) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 975 | goto do_holes; |
| 976 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 977 | blkmask = (1 << sdio->blkfactor) - 1; |
| 978 | dio_remainder = (sdio->block_in_file & blkmask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 979 | |
| 980 | /* |
| 981 | * If we are at the start of IO and that IO |
| 982 | * starts partway into a fs-block, |
| 983 | * dio_remainder will be non-zero. If the IO |
| 984 | * is a read then we can simply advance the IO |
| 985 | * cursor to the first block which is to be |
| 986 | * read. But if the IO is a write and the |
| 987 | * block was newly allocated we cannot do that; |
| 988 | * the start of the fs block must be zeroed out |
| 989 | * on-disk |
| 990 | */ |
| 991 | if (!buffer_new(map_bh)) |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 992 | sdio->next_block_for_io += dio_remainder; |
| 993 | sdio->blocks_available -= dio_remainder; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 994 | } |
| 995 | do_holes: |
| 996 | /* Handle holes */ |
| 997 | if (!buffer_mapped(map_bh)) { |
Jeff Moyer | 35dc816 | 2006-02-03 03:04:27 -0800 | [diff] [blame] | 998 | loff_t i_size_aligned; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | |
| 1000 | /* AKPM: eargh, -ENOTBLK is a hack */ |
Mike Christie | 8a4c1e4 | 2016-06-05 14:31:50 -0500 | [diff] [blame] | 1001 | if (dio->op == REQ_OP_WRITE) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1002 | put_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1003 | return -ENOTBLK; |
| 1004 | } |
| 1005 | |
Jeff Moyer | 35dc816 | 2006-02-03 03:04:27 -0800 | [diff] [blame] | 1006 | /* |
| 1007 | * Be sure to account for a partial block as the |
| 1008 | * last block in the file |
| 1009 | */ |
| 1010 | i_size_aligned = ALIGN(i_size_read(dio->inode), |
| 1011 | 1 << blkbits); |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 1012 | if (sdio->block_in_file >= |
Jeff Moyer | 35dc816 | 2006-02-03 03:04:27 -0800 | [diff] [blame] | 1013 | i_size_aligned >> blkbits) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1014 | /* We hit eof */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1015 | put_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1016 | goto out; |
| 1017 | } |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 1018 | zero_user(page, from, 1 << blkbits); |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 1019 | sdio->block_in_file++; |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 1020 | from += 1 << blkbits; |
Al Viro | 3320c60 | 2014-03-10 02:30:55 -0400 | [diff] [blame] | 1021 | dio->result += 1 << blkbits; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1022 | goto next_block; |
| 1023 | } |
| 1024 | |
| 1025 | /* |
| 1026 | * If we're performing IO which has an alignment which |
| 1027 | * is finer than the underlying fs, go check to see if |
| 1028 | * we must zero out the start of this block. |
| 1029 | */ |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 1030 | if (unlikely(sdio->blkfactor && !sdio->start_zero_done)) |
Andi Kleen | 1877264 | 2011-08-01 21:38:07 -0700 | [diff] [blame] | 1031 | dio_zero_block(dio, sdio, 0, map_bh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1032 | |
| 1033 | /* |
| 1034 | * Work out, in this_chunk_blocks, how much disk we |
| 1035 | * can add to this page |
| 1036 | */ |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 1037 | this_chunk_blocks = sdio->blocks_available; |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 1038 | u = (to - from) >> blkbits; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1039 | if (this_chunk_blocks > u) |
| 1040 | this_chunk_blocks = u; |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 1041 | u = sdio->final_block_in_request - sdio->block_in_file; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1042 | if (this_chunk_blocks > u) |
| 1043 | this_chunk_blocks = u; |
| 1044 | this_chunk_bytes = this_chunk_blocks << blkbits; |
| 1045 | BUG_ON(this_chunk_bytes == 0); |
| 1046 | |
Jan Kara | 092c8d4 | 2013-04-29 15:06:17 -0700 | [diff] [blame] | 1047 | if (this_chunk_blocks == sdio->blocks_available) |
| 1048 | sdio->boundary = buffer_boundary(map_bh); |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 1049 | ret = submit_page_section(dio, sdio, page, |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 1050 | from, |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 1051 | this_chunk_bytes, |
Andi Kleen | 1877264 | 2011-08-01 21:38:07 -0700 | [diff] [blame] | 1052 | sdio->next_block_for_io, |
| 1053 | map_bh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1054 | if (ret) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1055 | put_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1056 | goto out; |
| 1057 | } |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 1058 | sdio->next_block_for_io += this_chunk_blocks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 | |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 1060 | sdio->block_in_file += this_chunk_blocks; |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 1061 | from += this_chunk_bytes; |
| 1062 | dio->result += this_chunk_bytes; |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 1063 | sdio->blocks_available -= this_chunk_blocks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 | next_block: |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 1065 | BUG_ON(sdio->block_in_file > sdio->final_block_in_request); |
| 1066 | if (sdio->block_in_file == sdio->final_block_in_request) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1067 | break; |
| 1068 | } |
| 1069 | |
| 1070 | /* Drop the ref which was taken in get_user_pages() */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1071 | put_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1072 | } |
| 1073 | out: |
| 1074 | return ret; |
| 1075 | } |
| 1076 | |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1077 | static inline int drop_refcount(struct dio *dio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1078 | { |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1079 | int ret2; |
Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 1080 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1081 | |
Zach Brown | 8459d86 | 2006-12-10 02:21:05 -0800 | [diff] [blame] | 1082 | /* |
| 1083 | * Sync will always be dropping the final ref and completing the |
Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 1084 | * operation. AIO can if it was a broken operation described above or |
| 1085 | * in fact if all the bios race to complete before we get here. In |
| 1086 | * that case dio_complete() translates the EIOCBQUEUED into the proper |
Christoph Hellwig | 04b2fa9 | 2015-02-02 14:49:06 +0100 | [diff] [blame] | 1087 | * return code that the caller will hand to ->complete(). |
Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 1088 | * |
| 1089 | * This is managed by the bio_lock instead of being an atomic_t so that |
| 1090 | * completion paths can drop their ref and use the remaining count to |
| 1091 | * decide to wake the submission path atomically. |
Zach Brown | 8459d86 | 2006-12-10 02:21:05 -0800 | [diff] [blame] | 1092 | */ |
Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 1093 | spin_lock_irqsave(&dio->bio_lock, flags); |
| 1094 | ret2 = --dio->refcount; |
| 1095 | spin_unlock_irqrestore(&dio->bio_lock, flags); |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1096 | return ret2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1097 | } |
| 1098 | |
Christoph Hellwig | eafdc7d | 2010-06-04 11:29:53 +0200 | [diff] [blame] | 1099 | /* |
| 1100 | * This is a library function for use by filesystem drivers. |
| 1101 | * |
| 1102 | * The locking rules are governed by the flags parameter: |
| 1103 | * - if the flags value contains DIO_LOCKING we use a fancy locking |
| 1104 | * scheme for dumb filesystems. |
| 1105 | * For writes this function is called under i_mutex and returns with |
| 1106 | * i_mutex held, for reads, i_mutex is not held on entry, but it is |
| 1107 | * taken and dropped again before returning. |
Christoph Hellwig | eafdc7d | 2010-06-04 11:29:53 +0200 | [diff] [blame] | 1108 | * - if the flags value does NOT contain DIO_LOCKING we don't use any |
| 1109 | * internal locking but rather rely on the filesystem to synchronize |
| 1110 | * direct I/O reads/writes versus each other and truncate. |
Christoph Hellwig | df2d6f2 | 2011-06-24 14:29:46 -0400 | [diff] [blame] | 1111 | * |
| 1112 | * To help with locking against truncate we incremented the i_dio_count |
| 1113 | * counter before starting direct I/O, and decrement it once we are done. |
| 1114 | * Truncate can wait for it to reach zero to provide exclusion. It is |
| 1115 | * expected that filesystem provide exclusion between new direct I/O |
| 1116 | * and truncates. For DIO_LOCKING filesystems this is done by i_mutex, |
| 1117 | * but other filesystems need to take care of this on their own. |
Andi Kleen | ba253fb | 2011-08-01 21:38:08 -0700 | [diff] [blame] | 1118 | * |
| 1119 | * NOTE: if you pass "sdio" to anything by pointer make sure that function |
| 1120 | * is always inlined. Otherwise gcc is unable to split the structure into |
| 1121 | * individual fields and will generate much worse code. This is important |
| 1122 | * for the whole file. |
Christoph Hellwig | eafdc7d | 2010-06-04 11:29:53 +0200 | [diff] [blame] | 1123 | */ |
Andi Kleen | 65dd2aa | 2012-01-12 17:20:35 -0800 | [diff] [blame] | 1124 | static inline ssize_t |
Omar Sandoval | 17f8c84 | 2015-03-16 04:33:50 -0700 | [diff] [blame] | 1125 | do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, |
| 1126 | struct block_device *bdev, struct iov_iter *iter, |
Christoph Hellwig | c8b8e32 | 2016-04-07 08:51:58 -0700 | [diff] [blame] | 1127 | get_block_t get_block, dio_iodone_t end_io, |
Omar Sandoval | 17f8c84 | 2015-03-16 04:33:50 -0700 | [diff] [blame] | 1128 | dio_submit_t submit_io, int flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1129 | { |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 1130 | unsigned i_blkbits = READ_ONCE(inode->i_blkbits); |
Linus Torvalds | ab73857 | 2012-11-29 12:27:00 -0800 | [diff] [blame] | 1131 | unsigned blkbits = i_blkbits; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1132 | unsigned blocksize_mask = (1 << blkbits) - 1; |
| 1133 | ssize_t retval = -EINVAL; |
Nikolay Borisov | 1c0ff0f | 2018-04-05 16:24:36 -0700 | [diff] [blame] | 1134 | const size_t count = iov_iter_count(iter); |
Christoph Hellwig | c8b8e32 | 2016-04-07 08:51:58 -0700 | [diff] [blame] | 1135 | loff_t offset = iocb->ki_pos; |
Nikolay Borisov | 1c0ff0f | 2018-04-05 16:24:36 -0700 | [diff] [blame] | 1136 | const loff_t end = offset + count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1137 | struct dio *dio; |
Andi Kleen | eb28be2 | 2011-08-01 21:38:03 -0700 | [diff] [blame] | 1138 | struct dio_submit sdio = { 0, }; |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1139 | struct buffer_head map_bh = { 0, }; |
Fengguang Wu | 647d1e4 | 2012-08-09 15:23:09 +0200 | [diff] [blame] | 1140 | struct blk_plug plug; |
Al Viro | 886a391 | 2014-03-05 13:50:45 -0500 | [diff] [blame] | 1141 | unsigned long align = offset | iov_iter_alignment(iter); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1142 | |
Andi Kleen | 65dd2aa | 2012-01-12 17:20:35 -0800 | [diff] [blame] | 1143 | /* |
| 1144 | * Avoid references to bdev if not absolutely needed to give |
| 1145 | * the early prefetch in the caller enough time. |
| 1146 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1147 | |
Christoph Hellwig | f9b5570 | 2011-06-24 14:29:42 -0400 | [diff] [blame] | 1148 | /* watch out for a 0 len io from a tricksy fs */ |
Nikolay Borisov | 1c0ff0f | 2018-04-05 16:24:36 -0700 | [diff] [blame] | 1149 | if (iov_iter_rw(iter) == READ && !count) |
Christoph Hellwig | f9b5570 | 2011-06-24 14:29:42 -0400 | [diff] [blame] | 1150 | return 0; |
| 1151 | |
Andi Kleen | 6e8267f | 2011-08-01 21:38:06 -0700 | [diff] [blame] | 1152 | dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1153 | if (!dio) |
Gabriel Krisman Bertazi | 46d7160 | 2020-10-08 02:26:18 -0400 | [diff] [blame] | 1154 | return -ENOMEM; |
Jeff Moyer | 23aee09 | 2009-12-15 16:47:49 -0800 | [diff] [blame] | 1155 | /* |
| 1156 | * Believe it or not, zeroing out the page array caused a .5% |
| 1157 | * performance regression in a database benchmark. So, we take |
| 1158 | * care to only zero out what's needed. |
| 1159 | */ |
| 1160 | memset(dio, 0, offsetof(struct dio, pages)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1161 | |
Christoph Hellwig | 5fe878ae | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 1162 | dio->flags = flags; |
Gabriel Krisman Bertazi | 0a9164c | 2020-10-08 02:26:19 -0400 | [diff] [blame] | 1163 | if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) { |
| 1164 | /* will be released by direct_io_worker */ |
| 1165 | inode_lock(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1166 | } |
| 1167 | |
Jan Kara | 74cedf9 | 2015-11-30 10:15:42 -0700 | [diff] [blame] | 1168 | /* Once we sampled i_size check for reads beyond EOF */ |
| 1169 | dio->i_size = i_size_read(inode); |
| 1170 | if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { |
Al Viro | 2d4594a | 2015-12-08 12:22:47 -0500 | [diff] [blame] | 1171 | retval = 0; |
Gabriel Krisman Bertazi | 46d7160 | 2020-10-08 02:26:18 -0400 | [diff] [blame] | 1172 | goto fail_dio; |
Jan Kara | 74cedf9 | 2015-11-30 10:15:42 -0700 | [diff] [blame] | 1173 | } |
| 1174 | |
Gabriel Krisman Bertazi | 41b21af | 2020-10-08 02:26:20 -0400 | [diff] [blame] | 1175 | if (align & blocksize_mask) { |
| 1176 | if (bdev) |
| 1177 | blkbits = blksize_bits(bdev_logical_block_size(bdev)); |
| 1178 | blocksize_mask = (1 << blkbits) - 1; |
| 1179 | if (align & blocksize_mask) |
| 1180 | goto fail_dio; |
| 1181 | } |
| 1182 | |
Gabriel Krisman Bertazi | 0a9164c | 2020-10-08 02:26:19 -0400 | [diff] [blame] | 1183 | if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) { |
| 1184 | struct address_space *mapping = iocb->ki_filp->f_mapping; |
| 1185 | |
| 1186 | retval = filemap_write_and_wait_range(mapping, offset, end - 1); |
| 1187 | if (retval) |
| 1188 | goto fail_dio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1189 | } |
| 1190 | |
| 1191 | /* |
Christoph Hellwig | 6039257 | 2014-02-10 10:27:11 +1100 | [diff] [blame] | 1192 | * For file extending writes updating i_size before data writeouts |
| 1193 | * complete can expose uninitialized blocks in dumb filesystems. |
| 1194 | * In that case we need to wait for I/O completion even if asked |
| 1195 | * for an asynchronous write. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1196 | */ |
Christoph Hellwig | 6039257 | 2014-02-10 10:27:11 +1100 | [diff] [blame] | 1197 | if (is_sync_kiocb(iocb)) |
| 1198 | dio->is_async = false; |
Nikolay Borisov | c8f4c36 | 2018-02-23 13:45:28 +0200 | [diff] [blame] | 1199 | else if (iov_iter_rw(iter) == WRITE && end > i_size_read(inode)) |
Christoph Hellwig | 6039257 | 2014-02-10 10:27:11 +1100 | [diff] [blame] | 1200 | dio->is_async = false; |
| 1201 | else |
| 1202 | dio->is_async = true; |
| 1203 | |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1204 | dio->inode = inode; |
Mike Christie | 8a4c1e4 | 2016-06-05 14:31:50 -0500 | [diff] [blame] | 1205 | if (iov_iter_rw(iter) == WRITE) { |
| 1206 | dio->op = REQ_OP_WRITE; |
Christoph Hellwig | 70fd761 | 2016-11-01 07:40:10 -0600 | [diff] [blame] | 1207 | dio->op_flags = REQ_SYNC | REQ_IDLE; |
Goldwyn Rodrigues | 03a07c9 | 2017-06-20 07:05:46 -0500 | [diff] [blame] | 1208 | if (iocb->ki_flags & IOCB_NOWAIT) |
| 1209 | dio->op_flags |= REQ_NOWAIT; |
Mike Christie | 8a4c1e4 | 2016-06-05 14:31:50 -0500 | [diff] [blame] | 1210 | } else { |
| 1211 | dio->op = REQ_OP_READ; |
| 1212 | } |
Christoph Hellwig | 02afc27 | 2013-09-04 15:04:40 +0200 | [diff] [blame] | 1213 | |
| 1214 | /* |
| 1215 | * For AIO O_(D)SYNC writes we need to defer completions to a workqueue |
| 1216 | * so that we can call ->fsync. |
| 1217 | */ |
Lukas Czerner | 332391a | 2017-09-21 08:16:29 -0600 | [diff] [blame] | 1218 | if (dio->is_async && iov_iter_rw(iter) == WRITE) { |
| 1219 | retval = 0; |
Jan Kara | d9c10e5 | 2018-02-26 12:51:43 +0100 | [diff] [blame] | 1220 | if (iocb->ki_flags & IOCB_DSYNC) |
Lukas Czerner | 332391a | 2017-09-21 08:16:29 -0600 | [diff] [blame] | 1221 | retval = dio_set_defer_completion(dio); |
| 1222 | else if (!dio->inode->i_sb->s_dio_done_wq) { |
| 1223 | /* |
| 1224 | * In case of AIO write racing with buffered read we |
| 1225 | * need to defer completion. We can't decide this now, |
| 1226 | * however the workqueue needs to be initialized here. |
| 1227 | */ |
| 1228 | retval = sb_init_dio_done_wq(dio->inode->i_sb); |
| 1229 | } |
Gabriel Krisman Bertazi | 46d7160 | 2020-10-08 02:26:18 -0400 | [diff] [blame] | 1230 | if (retval) |
| 1231 | goto fail_dio; |
Christoph Hellwig | 02afc27 | 2013-09-04 15:04:40 +0200 | [diff] [blame] | 1232 | } |
| 1233 | |
| 1234 | /* |
| 1235 | * Will be decremented at I/O completion time. |
| 1236 | */ |
Nikolay Borisov | ce3077e | 2018-02-23 13:45:29 +0200 | [diff] [blame] | 1237 | inode_dio_begin(inode); |
Christoph Hellwig | 02afc27 | 2013-09-04 15:04:40 +0200 | [diff] [blame] | 1238 | |
| 1239 | retval = 0; |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1240 | sdio.blkbits = blkbits; |
Linus Torvalds | ab73857 | 2012-11-29 12:27:00 -0800 | [diff] [blame] | 1241 | sdio.blkfactor = i_blkbits - blkbits; |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1242 | sdio.block_in_file = offset >> blkbits; |
| 1243 | |
| 1244 | sdio.get_block = get_block; |
| 1245 | dio->end_io = end_io; |
| 1246 | sdio.submit_io = submit_io; |
| 1247 | sdio.final_block_in_bio = -1; |
| 1248 | sdio.next_block_for_io = -1; |
| 1249 | |
| 1250 | dio->iocb = iocb; |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1251 | |
| 1252 | spin_lock_init(&dio->bio_lock); |
| 1253 | dio->refcount = 1; |
| 1254 | |
David Howells | 00e2370 | 2018-10-22 13:07:28 +0100 | [diff] [blame] | 1255 | dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ; |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 1256 | sdio.iter = iter; |
Nikolay Borisov | 1c0ff0f | 2018-04-05 16:24:36 -0700 | [diff] [blame] | 1257 | sdio.final_block_in_request = end >> blkbits; |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 1258 | |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1259 | /* |
| 1260 | * In case of non-aligned buffers, we may need 2 more |
| 1261 | * pages since we need to zero out first and last block. |
| 1262 | */ |
| 1263 | if (unlikely(sdio.blkfactor)) |
| 1264 | sdio.pages_in_io = 2; |
| 1265 | |
Al Viro | f67da30 | 2014-03-19 01:16:16 -0400 | [diff] [blame] | 1266 | sdio.pages_in_io += iov_iter_npages(iter, INT_MAX); |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1267 | |
Fengguang Wu | 647d1e4 | 2012-08-09 15:23:09 +0200 | [diff] [blame] | 1268 | blk_start_plug(&plug); |
| 1269 | |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 1270 | retval = do_direct_IO(dio, &sdio, &map_bh); |
| 1271 | if (retval) |
| 1272 | dio_cleanup(dio, &sdio); |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1273 | |
| 1274 | if (retval == -ENOTBLK) { |
| 1275 | /* |
| 1276 | * The remaining part of the request will be |
Randy Dunlap | 3d742d4 | 2021-02-24 12:00:48 -0800 | [diff] [blame] | 1277 | * handled by buffered I/O when we return |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1278 | */ |
| 1279 | retval = 0; |
| 1280 | } |
| 1281 | /* |
| 1282 | * There may be some unwritten disk at the end of a part-written |
| 1283 | * fs-block-sized block. Go zero that now. |
| 1284 | */ |
| 1285 | dio_zero_block(dio, &sdio, 1, &map_bh); |
| 1286 | |
| 1287 | if (sdio.cur_page) { |
| 1288 | ssize_t ret2; |
| 1289 | |
| 1290 | ret2 = dio_send_cur_page(dio, &sdio, &map_bh); |
| 1291 | if (retval == 0) |
| 1292 | retval = ret2; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1293 | put_page(sdio.cur_page); |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1294 | sdio.cur_page = NULL; |
| 1295 | } |
| 1296 | if (sdio.bio) |
| 1297 | dio_bio_submit(dio, &sdio); |
| 1298 | |
Fengguang Wu | 647d1e4 | 2012-08-09 15:23:09 +0200 | [diff] [blame] | 1299 | blk_finish_plug(&plug); |
| 1300 | |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1301 | /* |
| 1302 | * It is possible that, we return short IO due to end of file. |
| 1303 | * In that case, we need to release all the pages we got hold on. |
| 1304 | */ |
| 1305 | dio_cleanup(dio, &sdio); |
| 1306 | |
| 1307 | /* |
| 1308 | * All block lookups have been performed. For READ requests |
| 1309 | * we can let i_mutex go now that its achieved its purpose |
| 1310 | * of protecting us from looking up uninitialized blocks. |
| 1311 | */ |
Omar Sandoval | 17f8c84 | 2015-03-16 04:33:50 -0700 | [diff] [blame] | 1312 | if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING)) |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 1313 | inode_unlock(dio->inode); |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1314 | |
| 1315 | /* |
| 1316 | * The only time we want to leave bios in flight is when a successful |
| 1317 | * partial aio read or full aio write have been setup. In that case |
| 1318 | * bio completion will call aio_complete. The only time it's safe to |
| 1319 | * call aio_complete is when we return -EIOCBQUEUED, so we key on that. |
| 1320 | * This had *better* be the only place that raises -EIOCBQUEUED. |
| 1321 | */ |
| 1322 | BUG_ON(retval == -EIOCBQUEUED); |
| 1323 | if (dio->is_async && retval == 0 && dio->result && |
Omar Sandoval | 17f8c84 | 2015-03-16 04:33:50 -0700 | [diff] [blame] | 1324 | (iov_iter_rw(iter) == READ || dio->result == count)) |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1325 | retval = -EIOCBQUEUED; |
Christoph Hellwig | af43647 | 2014-07-30 07:18:48 -0400 | [diff] [blame] | 1326 | else |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1327 | dio_await_completion(dio); |
| 1328 | |
| 1329 | if (drop_refcount(dio) == 0) { |
Lukas Czerner | ffe51f0 | 2017-10-17 08:43:09 -0600 | [diff] [blame] | 1330 | retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE); |
Andi Kleen | 847cc63 | 2011-08-01 21:38:09 -0700 | [diff] [blame] | 1331 | } else |
| 1332 | BUG_ON(retval != -EIOCBQUEUED); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1333 | |
Gabriel Krisman Bertazi | 46d7160 | 2020-10-08 02:26:18 -0400 | [diff] [blame] | 1334 | return retval; |
| 1335 | |
| 1336 | fail_dio: |
| 1337 | if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) |
| 1338 | inode_unlock(inode); |
| 1339 | |
| 1340 | kmem_cache_free(dio_cache, dio); |
npiggin@suse.de | 7bb46a6 | 2010-05-27 01:05:33 +1000 | [diff] [blame] | 1341 | return retval; |
| 1342 | } |
Andi Kleen | 65dd2aa | 2012-01-12 17:20:35 -0800 | [diff] [blame] | 1343 | |
Omar Sandoval | 17f8c84 | 2015-03-16 04:33:50 -0700 | [diff] [blame] | 1344 | ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, |
| 1345 | struct block_device *bdev, struct iov_iter *iter, |
Christoph Hellwig | c8b8e32 | 2016-04-07 08:51:58 -0700 | [diff] [blame] | 1346 | get_block_t get_block, |
Omar Sandoval | 17f8c84 | 2015-03-16 04:33:50 -0700 | [diff] [blame] | 1347 | dio_iodone_t end_io, dio_submit_t submit_io, |
| 1348 | int flags) |
Andi Kleen | 65dd2aa | 2012-01-12 17:20:35 -0800 | [diff] [blame] | 1349 | { |
| 1350 | /* |
| 1351 | * The block device state is needed in the end to finally |
| 1352 | * submit everything. Since it's likely to be cache cold |
| 1353 | * prefetch it here as first thing to hide some of the |
| 1354 | * latency. |
| 1355 | * |
| 1356 | * Attempt to prefetch the pieces we likely need later. |
| 1357 | */ |
| 1358 | prefetch(&bdev->bd_disk->part_tbl); |
Christoph Hellwig | e556f6b | 2020-06-26 10:01:56 +0200 | [diff] [blame] | 1359 | prefetch(bdev->bd_disk->queue); |
| 1360 | prefetch((char *)bdev->bd_disk->queue + SMP_CACHE_BYTES); |
Andi Kleen | 65dd2aa | 2012-01-12 17:20:35 -0800 | [diff] [blame] | 1361 | |
Christoph Hellwig | c8b8e32 | 2016-04-07 08:51:58 -0700 | [diff] [blame] | 1362 | return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block, |
Omar Sandoval | 17f8c84 | 2015-03-16 04:33:50 -0700 | [diff] [blame] | 1363 | end_io, submit_io, flags); |
Andi Kleen | 65dd2aa | 2012-01-12 17:20:35 -0800 | [diff] [blame] | 1364 | } |
| 1365 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1366 | EXPORT_SYMBOL(__blockdev_direct_IO); |
Andi Kleen | 6e8267f | 2011-08-01 21:38:06 -0700 | [diff] [blame] | 1367 | |
| 1368 | static __init int dio_init(void) |
| 1369 | { |
| 1370 | dio_cache = KMEM_CACHE(dio, SLAB_PANIC); |
| 1371 | return 0; |
| 1372 | } |
| 1373 | module_init(dio_init) |