Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Shared application/kernel submission and completion ring pairs, for |
| 4 | * supporting fast/efficient IO. |
| 5 | * |
| 6 | * A note on the read/write ordering memory barriers that are matched between |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 7 | * the application and kernel side. |
| 8 | * |
| 9 | * After the application reads the CQ ring tail, it must use an |
| 10 | * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses |
| 11 | * before writing the tail (using smp_load_acquire to read the tail will |
| 12 | * do). It also needs a smp_mb() before updating CQ head (ordering the |
| 13 | * entry load(s) with the head store), pairing with an implicit barrier |
| 14 | * through a control-dependency in io_get_cqring (smp_store_release to |
| 15 | * store head will do). Failure to do so could lead to reading invalid |
| 16 | * CQ entries. |
| 17 | * |
| 18 | * Likewise, the application must use an appropriate smp_wmb() before |
| 19 | * writing the SQ tail (ordering SQ entry stores with the tail store), |
| 20 | * which pairs with smp_load_acquire in io_get_sqring (smp_store_release |
| 21 | * to store the tail will do). And it needs a barrier ordering the SQ |
| 22 | * head load before writing new SQ entries (smp_load_acquire to read |
| 23 | * head will do). |
| 24 | * |
| 25 | * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application |
| 26 | * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after* |
| 27 | * updating the SQ tail; a full memory barrier smp_mb() is needed |
| 28 | * between. |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 29 | * |
| 30 | * Also see the examples in the liburing library: |
| 31 | * |
| 32 | * git://git.kernel.dk/liburing |
| 33 | * |
| 34 | * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens |
| 35 | * from data shared between the kernel and application. This is done both |
| 36 | * for ordering purposes, but also to ensure that once a value is loaded from |
| 37 | * data that the application could potentially modify, it remains stable. |
| 38 | * |
| 39 | * Copyright (C) 2018-2019 Jens Axboe |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 40 | * Copyright (c) 2018-2019 Christoph Hellwig |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 41 | */ |
| 42 | #include <linux/kernel.h> |
| 43 | #include <linux/init.h> |
| 44 | #include <linux/errno.h> |
| 45 | #include <linux/syscalls.h> |
| 46 | #include <linux/compat.h> |
| 47 | #include <linux/refcount.h> |
| 48 | #include <linux/uio.h> |
| 49 | |
| 50 | #include <linux/sched/signal.h> |
| 51 | #include <linux/fs.h> |
| 52 | #include <linux/file.h> |
| 53 | #include <linux/fdtable.h> |
| 54 | #include <linux/mm.h> |
| 55 | #include <linux/mman.h> |
| 56 | #include <linux/mmu_context.h> |
| 57 | #include <linux/percpu.h> |
| 58 | #include <linux/slab.h> |
| 59 | #include <linux/workqueue.h> |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 60 | #include <linux/kthread.h> |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 61 | #include <linux/blkdev.h> |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 62 | #include <linux/bvec.h> |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 63 | #include <linux/net.h> |
| 64 | #include <net/sock.h> |
| 65 | #include <net/af_unix.h> |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 66 | #include <net/scm.h> |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 67 | #include <linux/anon_inodes.h> |
| 68 | #include <linux/sched/mm.h> |
| 69 | #include <linux/uaccess.h> |
| 70 | #include <linux/nospec.h> |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 71 | #include <linux/sizes.h> |
| 72 | #include <linux/hugetlb.h> |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 73 | |
| 74 | #include <uapi/linux/io_uring.h> |
| 75 | |
| 76 | #include "internal.h" |
| 77 | |
Daniel Xu | 5277dea | 2019-09-14 14:23:45 -0700 | [diff] [blame] | 78 | #define IORING_MAX_ENTRIES 32768 |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 79 | #define IORING_MAX_FIXED_FILES 1024 |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 80 | |
| 81 | struct io_uring { |
| 82 | u32 head ____cacheline_aligned_in_smp; |
| 83 | u32 tail ____cacheline_aligned_in_smp; |
| 84 | }; |
| 85 | |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 86 | /* |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 87 | * This data is shared with the application through the mmap at offsets |
| 88 | * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING. |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 89 | * |
| 90 | * The offsets to the member fields are published through struct |
| 91 | * io_sqring_offsets when calling io_uring_setup. |
| 92 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 93 | struct io_rings { |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 94 | /* |
| 95 | * Head and tail offsets into the ring; the offsets need to be |
| 96 | * masked to get valid indices. |
| 97 | * |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 98 | * The kernel controls head of the sq ring and the tail of the cq ring, |
| 99 | * and the application controls tail of the sq ring and the head of the |
| 100 | * cq ring. |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 101 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 102 | struct io_uring sq, cq; |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 103 | /* |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 104 | * Bitmasks to apply to head and tail offsets (constant, equals |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 105 | * ring_entries - 1) |
| 106 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 107 | u32 sq_ring_mask, cq_ring_mask; |
| 108 | /* Ring sizes (constant, power of 2) */ |
| 109 | u32 sq_ring_entries, cq_ring_entries; |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 110 | /* |
| 111 | * Number of invalid entries dropped by the kernel due to |
| 112 | * invalid index stored in array |
| 113 | * |
| 114 | * Written by the kernel, shouldn't be modified by the |
| 115 | * application (i.e. get number of "new events" by comparing to |
| 116 | * cached value). |
| 117 | * |
| 118 | * After a new SQ head value was read by the application this |
| 119 | * counter includes all submissions that were dropped reaching |
| 120 | * the new SQ head (and possibly more). |
| 121 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 122 | u32 sq_dropped; |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 123 | /* |
| 124 | * Runtime flags |
| 125 | * |
| 126 | * Written by the kernel, shouldn't be modified by the |
| 127 | * application. |
| 128 | * |
| 129 | * The application needs a full memory barrier before checking |
| 130 | * for IORING_SQ_NEED_WAKEUP after updating the sq tail. |
| 131 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 132 | u32 sq_flags; |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 133 | /* |
| 134 | * Number of completion events lost because the queue was full; |
| 135 | * this should be avoided by the application by making sure |
| 136 | * there are not more requests pending thatn there is space in |
| 137 | * the completion queue. |
| 138 | * |
| 139 | * Written by the kernel, shouldn't be modified by the |
| 140 | * application (i.e. get number of "new events" by comparing to |
| 141 | * cached value). |
| 142 | * |
| 143 | * As completion events come in out of order this counter is not |
| 144 | * ordered with any other data. |
| 145 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 146 | u32 cq_overflow; |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 147 | /* |
| 148 | * Ring buffer of completion events. |
| 149 | * |
| 150 | * The kernel writes completion events fresh every time they are |
| 151 | * produced, so the application is allowed to modify pending |
| 152 | * entries. |
| 153 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 154 | struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 155 | }; |
| 156 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 157 | struct io_mapped_ubuf { |
| 158 | u64 ubuf; |
| 159 | size_t len; |
| 160 | struct bio_vec *bvec; |
| 161 | unsigned int nr_bvecs; |
| 162 | }; |
| 163 | |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 164 | struct async_list { |
| 165 | spinlock_t lock; |
| 166 | atomic_t cnt; |
| 167 | struct list_head list; |
| 168 | |
| 169 | struct file *file; |
Jens Axboe | 6d5d5ac | 2019-09-11 10:16:13 -0600 | [diff] [blame] | 170 | off_t io_start; |
Zhengyuan Liu | 9310a7ba | 2019-07-22 10:23:27 +0800 | [diff] [blame] | 171 | size_t io_len; |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 172 | }; |
| 173 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 174 | struct io_ring_ctx { |
| 175 | struct { |
| 176 | struct percpu_ref refs; |
| 177 | } ____cacheline_aligned_in_smp; |
| 178 | |
| 179 | struct { |
| 180 | unsigned int flags; |
| 181 | bool compat; |
| 182 | bool account_mem; |
| 183 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 184 | /* |
| 185 | * Ring buffer of indices into array of io_uring_sqe, which is |
| 186 | * mmapped by the application using the IORING_OFF_SQES offset. |
| 187 | * |
| 188 | * This indirection could e.g. be used to assign fixed |
| 189 | * io_uring_sqe entries to operations and only submit them to |
| 190 | * the queue when needed. |
| 191 | * |
| 192 | * The kernel modifies neither the indices array nor the entries |
| 193 | * array. |
| 194 | */ |
| 195 | u32 *sq_array; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 196 | unsigned cached_sq_head; |
| 197 | unsigned sq_entries; |
| 198 | unsigned sq_mask; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 199 | unsigned sq_thread_idle; |
Jens Axboe | 498ccd9 | 2019-10-25 10:04:25 -0600 | [diff] [blame] | 200 | unsigned cached_sq_dropped; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 201 | struct io_uring_sqe *sq_sqes; |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 202 | |
| 203 | struct list_head defer_list; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 204 | struct list_head timeout_list; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 205 | } ____cacheline_aligned_in_smp; |
| 206 | |
| 207 | /* IO offload */ |
Jens Axboe | 54a91f3 | 2019-09-10 09:15:04 -0600 | [diff] [blame] | 208 | struct workqueue_struct *sqo_wq[2]; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 209 | struct task_struct *sqo_thread; /* if using sq thread polling */ |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 210 | struct mm_struct *sqo_mm; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 211 | wait_queue_head_t sqo_wait; |
Jackie Liu | a4c0b3d | 2019-07-08 13:41:12 +0800 | [diff] [blame] | 212 | struct completion sqo_thread_started; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 213 | |
| 214 | struct { |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 215 | unsigned cached_cq_tail; |
Jens Axboe | 498ccd9 | 2019-10-25 10:04:25 -0600 | [diff] [blame] | 216 | atomic_t cached_cq_overflow; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 217 | unsigned cq_entries; |
| 218 | unsigned cq_mask; |
| 219 | struct wait_queue_head cq_wait; |
| 220 | struct fasync_struct *cq_fasync; |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 221 | struct eventfd_ctx *cq_ev_fd; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 222 | atomic_t cq_timeouts; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 223 | } ____cacheline_aligned_in_smp; |
| 224 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 225 | struct io_rings *rings; |
| 226 | |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 227 | /* |
| 228 | * If used, fixed file set. Writers must ensure that ->refs is dead, |
| 229 | * readers must ensure that ->refs is alive as long as the file* is |
| 230 | * used. Only updated through io_uring_register(2). |
| 231 | */ |
| 232 | struct file **user_files; |
| 233 | unsigned nr_user_files; |
| 234 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 235 | /* if used, fixed mapped user buffers */ |
| 236 | unsigned nr_user_bufs; |
| 237 | struct io_mapped_ubuf *user_bufs; |
| 238 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 239 | struct user_struct *user; |
| 240 | |
| 241 | struct completion ctx_done; |
| 242 | |
| 243 | struct { |
| 244 | struct mutex uring_lock; |
| 245 | wait_queue_head_t wait; |
| 246 | } ____cacheline_aligned_in_smp; |
| 247 | |
| 248 | struct { |
| 249 | spinlock_t completion_lock; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 250 | bool poll_multi_file; |
| 251 | /* |
| 252 | * ->poll_list is protected by the ctx->uring_lock for |
| 253 | * io_uring instances that don't use IORING_SETUP_SQPOLL. |
| 254 | * For SQPOLL, only the single threaded io_sq_thread() will |
| 255 | * manipulate the list, hence no extra locking is needed there. |
| 256 | */ |
| 257 | struct list_head poll_list; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 258 | struct list_head cancel_list; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 259 | } ____cacheline_aligned_in_smp; |
| 260 | |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 261 | struct async_list pending_async[2]; |
| 262 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 263 | #if defined(CONFIG_UNIX) |
| 264 | struct socket *ring_sock; |
| 265 | #endif |
| 266 | }; |
| 267 | |
| 268 | struct sqe_submit { |
| 269 | const struct io_uring_sqe *sqe; |
| 270 | unsigned short index; |
Jackie Liu | 8776f3f | 2019-09-09 20:50:39 +0800 | [diff] [blame] | 271 | u32 sequence; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 272 | bool has_user; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 273 | bool needs_lock; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 274 | bool needs_fixed_file; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 275 | }; |
| 276 | |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 277 | /* |
| 278 | * First field must be the file pointer in all the |
| 279 | * iocb unions! See also 'struct kiocb' in <linux/fs.h> |
| 280 | */ |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 281 | struct io_poll_iocb { |
| 282 | struct file *file; |
| 283 | struct wait_queue_head *head; |
| 284 | __poll_t events; |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 285 | bool done; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 286 | bool canceled; |
| 287 | struct wait_queue_entry wait; |
| 288 | }; |
| 289 | |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 290 | struct io_timeout { |
| 291 | struct file *file; |
| 292 | struct hrtimer timer; |
| 293 | }; |
| 294 | |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 295 | /* |
| 296 | * NOTE! Each of the iocb union members has the file pointer |
| 297 | * as the first entry in their struct definition. So you can |
| 298 | * access the file pointer through any of the sub-structs, |
| 299 | * or directly as just 'ki_filp' in this struct. |
| 300 | */ |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 301 | struct io_kiocb { |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 302 | union { |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 303 | struct file *file; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 304 | struct kiocb rw; |
| 305 | struct io_poll_iocb poll; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 306 | struct io_timeout timeout; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 307 | }; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 308 | |
| 309 | struct sqe_submit submit; |
| 310 | |
| 311 | struct io_ring_ctx *ctx; |
| 312 | struct list_head list; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 313 | struct list_head link_list; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 314 | unsigned int flags; |
Jens Axboe | c16361c | 2019-01-17 08:39:48 -0700 | [diff] [blame] | 315 | refcount_t refs; |
Stefan Bühler | 8449eed | 2019-04-27 20:34:19 +0200 | [diff] [blame] | 316 | #define REQ_F_NOWAIT 1 /* must not punt to workers */ |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 317 | #define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */ |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 318 | #define REQ_F_FIXED_FILE 4 /* ctx owns file */ |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 319 | #define REQ_F_SEQ_PREV 8 /* sequential with previous */ |
Stefan Bühler | e2033e3 | 2019-05-11 19:08:01 +0200 | [diff] [blame] | 320 | #define REQ_F_IO_DRAIN 16 /* drain existing IO first */ |
| 321 | #define REQ_F_IO_DRAINED 32 /* drain done */ |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 322 | #define REQ_F_LINK 64 /* linked sqes */ |
Zhengyuan Liu | f7b76ac | 2019-07-16 23:26:14 +0800 | [diff] [blame] | 323 | #define REQ_F_LINK_DONE 128 /* linked sqes done */ |
| 324 | #define REQ_F_FAIL_LINK 256 /* fail rest of links */ |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 325 | #define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */ |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 326 | #define REQ_F_TIMEOUT 1024 /* timeout request */ |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 327 | #define REQ_F_ISREG 2048 /* regular file */ |
| 328 | #define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */ |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 329 | u64 user_data; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 330 | u32 result; |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 331 | u32 sequence; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 332 | |
| 333 | struct work_struct work; |
| 334 | }; |
| 335 | |
| 336 | #define IO_PLUG_THRESHOLD 2 |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 337 | #define IO_IOPOLL_BATCH 8 |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 338 | |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 339 | struct io_submit_state { |
| 340 | struct blk_plug plug; |
| 341 | |
| 342 | /* |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 343 | * io_kiocb alloc cache |
| 344 | */ |
| 345 | void *reqs[IO_IOPOLL_BATCH]; |
| 346 | unsigned int free_reqs; |
| 347 | unsigned int cur_req; |
| 348 | |
| 349 | /* |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 350 | * File reference cache |
| 351 | */ |
| 352 | struct file *file; |
| 353 | unsigned int fd; |
| 354 | unsigned int has_refs; |
| 355 | unsigned int used_refs; |
| 356 | unsigned int ios_left; |
| 357 | }; |
| 358 | |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 359 | static void io_sq_wq_submit_work(struct work_struct *work); |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 360 | static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data, |
| 361 | long res); |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 362 | static void __io_free_req(struct io_kiocb *req); |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 363 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 364 | static struct kmem_cache *req_cachep; |
| 365 | |
| 366 | static const struct file_operations io_uring_fops; |
| 367 | |
| 368 | struct sock *io_uring_get_socket(struct file *file) |
| 369 | { |
| 370 | #if defined(CONFIG_UNIX) |
| 371 | if (file->f_op == &io_uring_fops) { |
| 372 | struct io_ring_ctx *ctx = file->private_data; |
| 373 | |
| 374 | return ctx->ring_sock->sk; |
| 375 | } |
| 376 | #endif |
| 377 | return NULL; |
| 378 | } |
| 379 | EXPORT_SYMBOL(io_uring_get_socket); |
| 380 | |
| 381 | static void io_ring_ctx_ref_free(struct percpu_ref *ref) |
| 382 | { |
| 383 | struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); |
| 384 | |
| 385 | complete(&ctx->ctx_done); |
| 386 | } |
| 387 | |
| 388 | static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) |
| 389 | { |
| 390 | struct io_ring_ctx *ctx; |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 391 | int i; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 392 | |
| 393 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
| 394 | if (!ctx) |
| 395 | return NULL; |
| 396 | |
Roman Gushchin | 2148289 | 2019-05-07 10:01:48 -0700 | [diff] [blame] | 397 | if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, |
| 398 | PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) { |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 399 | kfree(ctx); |
| 400 | return NULL; |
| 401 | } |
| 402 | |
| 403 | ctx->flags = p->flags; |
| 404 | init_waitqueue_head(&ctx->cq_wait); |
| 405 | init_completion(&ctx->ctx_done); |
Jackie Liu | a4c0b3d | 2019-07-08 13:41:12 +0800 | [diff] [blame] | 406 | init_completion(&ctx->sqo_thread_started); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 407 | mutex_init(&ctx->uring_lock); |
| 408 | init_waitqueue_head(&ctx->wait); |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 409 | for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) { |
| 410 | spin_lock_init(&ctx->pending_async[i].lock); |
| 411 | INIT_LIST_HEAD(&ctx->pending_async[i].list); |
| 412 | atomic_set(&ctx->pending_async[i].cnt, 0); |
| 413 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 414 | spin_lock_init(&ctx->completion_lock); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 415 | INIT_LIST_HEAD(&ctx->poll_list); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 416 | INIT_LIST_HEAD(&ctx->cancel_list); |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 417 | INIT_LIST_HEAD(&ctx->defer_list); |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 418 | INIT_LIST_HEAD(&ctx->timeout_list); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 419 | return ctx; |
| 420 | } |
| 421 | |
Jens Axboe | 7adf4ea | 2019-10-10 21:42:58 -0600 | [diff] [blame] | 422 | static inline bool __io_sequence_defer(struct io_ring_ctx *ctx, |
| 423 | struct io_kiocb *req) |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 424 | { |
Jens Axboe | 498ccd9 | 2019-10-25 10:04:25 -0600 | [diff] [blame] | 425 | return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped |
| 426 | + atomic_read(&ctx->cached_cq_overflow); |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 427 | } |
| 428 | |
Jens Axboe | 7adf4ea | 2019-10-10 21:42:58 -0600 | [diff] [blame] | 429 | static inline bool io_sequence_defer(struct io_ring_ctx *ctx, |
| 430 | struct io_kiocb *req) |
| 431 | { |
| 432 | if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN) |
| 433 | return false; |
| 434 | |
| 435 | return __io_sequence_defer(ctx, req); |
| 436 | } |
| 437 | |
| 438 | static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 439 | { |
| 440 | struct io_kiocb *req; |
| 441 | |
Jens Axboe | 7adf4ea | 2019-10-10 21:42:58 -0600 | [diff] [blame] | 442 | req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list); |
| 443 | if (req && !io_sequence_defer(ctx, req)) { |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 444 | list_del_init(&req->list); |
| 445 | return req; |
| 446 | } |
| 447 | |
| 448 | return NULL; |
| 449 | } |
| 450 | |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 451 | static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx) |
| 452 | { |
Jens Axboe | 7adf4ea | 2019-10-10 21:42:58 -0600 | [diff] [blame] | 453 | struct io_kiocb *req; |
| 454 | |
| 455 | req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list); |
| 456 | if (req && !__io_sequence_defer(ctx, req)) { |
| 457 | list_del_init(&req->list); |
| 458 | return req; |
| 459 | } |
| 460 | |
| 461 | return NULL; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 462 | } |
| 463 | |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 464 | static void __io_commit_cqring(struct io_ring_ctx *ctx) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 465 | { |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 466 | struct io_rings *rings = ctx->rings; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 467 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 468 | if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) { |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 469 | /* order cqe stores with ring update */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 470 | smp_store_release(&rings->cq.tail, ctx->cached_cq_tail); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 471 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 472 | if (wq_has_sleeper(&ctx->cq_wait)) { |
| 473 | wake_up_interruptible(&ctx->cq_wait); |
| 474 | kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN); |
| 475 | } |
| 476 | } |
| 477 | } |
| 478 | |
Jens Axboe | 18d9be1 | 2019-09-10 09:13:05 -0600 | [diff] [blame] | 479 | static inline void io_queue_async_work(struct io_ring_ctx *ctx, |
| 480 | struct io_kiocb *req) |
| 481 | { |
Jens Axboe | 6cc47d1 | 2019-09-18 11:18:23 -0600 | [diff] [blame] | 482 | int rw = 0; |
Jens Axboe | 54a91f3 | 2019-09-10 09:15:04 -0600 | [diff] [blame] | 483 | |
Jens Axboe | 6cc47d1 | 2019-09-18 11:18:23 -0600 | [diff] [blame] | 484 | if (req->submit.sqe) { |
| 485 | switch (req->submit.sqe->opcode) { |
| 486 | case IORING_OP_WRITEV: |
| 487 | case IORING_OP_WRITE_FIXED: |
| 488 | rw = !(req->rw.ki_flags & IOCB_DIRECT); |
| 489 | break; |
| 490 | } |
Jens Axboe | 54a91f3 | 2019-09-10 09:15:04 -0600 | [diff] [blame] | 491 | } |
| 492 | |
| 493 | queue_work(ctx->sqo_wq[rw], &req->work); |
Jens Axboe | 18d9be1 | 2019-09-10 09:13:05 -0600 | [diff] [blame] | 494 | } |
| 495 | |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 496 | static void io_kill_timeout(struct io_kiocb *req) |
| 497 | { |
| 498 | int ret; |
| 499 | |
| 500 | ret = hrtimer_try_to_cancel(&req->timeout.timer); |
| 501 | if (ret != -1) { |
| 502 | atomic_inc(&req->ctx->cq_timeouts); |
| 503 | list_del(&req->list); |
| 504 | io_cqring_fill_event(req->ctx, req->user_data, 0); |
| 505 | __io_free_req(req); |
| 506 | } |
| 507 | } |
| 508 | |
| 509 | static void io_kill_timeouts(struct io_ring_ctx *ctx) |
| 510 | { |
| 511 | struct io_kiocb *req, *tmp; |
| 512 | |
| 513 | spin_lock_irq(&ctx->completion_lock); |
| 514 | list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list) |
| 515 | io_kill_timeout(req); |
| 516 | spin_unlock_irq(&ctx->completion_lock); |
| 517 | } |
| 518 | |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 519 | static void io_commit_cqring(struct io_ring_ctx *ctx) |
| 520 | { |
| 521 | struct io_kiocb *req; |
| 522 | |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 523 | while ((req = io_get_timeout_req(ctx)) != NULL) |
| 524 | io_kill_timeout(req); |
| 525 | |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 526 | __io_commit_cqring(ctx); |
| 527 | |
| 528 | while ((req = io_get_deferred_req(ctx)) != NULL) { |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 529 | if (req->flags & REQ_F_SHADOW_DRAIN) { |
| 530 | /* Just for drain, free it. */ |
| 531 | __io_free_req(req); |
| 532 | continue; |
| 533 | } |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 534 | req->flags |= REQ_F_IO_DRAINED; |
Jens Axboe | 18d9be1 | 2019-09-10 09:13:05 -0600 | [diff] [blame] | 535 | io_queue_async_work(ctx, req); |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 536 | } |
| 537 | } |
| 538 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 539 | static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) |
| 540 | { |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 541 | struct io_rings *rings = ctx->rings; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 542 | unsigned tail; |
| 543 | |
| 544 | tail = ctx->cached_cq_tail; |
Stefan Bühler | 115e12e | 2019-04-24 23:54:18 +0200 | [diff] [blame] | 545 | /* |
| 546 | * writes to the cq entry need to come after reading head; the |
| 547 | * control dependency is enough as we're using WRITE_ONCE to |
| 548 | * fill the cq entry |
| 549 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 550 | if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 551 | return NULL; |
| 552 | |
| 553 | ctx->cached_cq_tail++; |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 554 | return &rings->cqes[tail & ctx->cq_mask]; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 555 | } |
| 556 | |
| 557 | static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data, |
Jens Axboe | c71ffb6 | 2019-05-13 20:58:29 -0600 | [diff] [blame] | 558 | long res) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 559 | { |
| 560 | struct io_uring_cqe *cqe; |
| 561 | |
| 562 | /* |
| 563 | * If we can't get a cq entry, userspace overflowed the |
| 564 | * submission (by quite a lot). Increment the overflow count in |
| 565 | * the ring. |
| 566 | */ |
| 567 | cqe = io_get_cqring(ctx); |
| 568 | if (cqe) { |
| 569 | WRITE_ONCE(cqe->user_data, ki_user_data); |
| 570 | WRITE_ONCE(cqe->res, res); |
Jens Axboe | c71ffb6 | 2019-05-13 20:58:29 -0600 | [diff] [blame] | 571 | WRITE_ONCE(cqe->flags, 0); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 572 | } else { |
Jens Axboe | 498ccd9 | 2019-10-25 10:04:25 -0600 | [diff] [blame] | 573 | WRITE_ONCE(ctx->rings->cq_overflow, |
| 574 | atomic_inc_return(&ctx->cached_cq_overflow)); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 575 | } |
| 576 | } |
| 577 | |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 578 | static void io_cqring_ev_posted(struct io_ring_ctx *ctx) |
| 579 | { |
| 580 | if (waitqueue_active(&ctx->wait)) |
| 581 | wake_up(&ctx->wait); |
| 582 | if (waitqueue_active(&ctx->sqo_wait)) |
| 583 | wake_up(&ctx->sqo_wait); |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 584 | if (ctx->cq_ev_fd) |
| 585 | eventfd_signal(ctx->cq_ev_fd, 1); |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 586 | } |
| 587 | |
| 588 | static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data, |
Jens Axboe | c71ffb6 | 2019-05-13 20:58:29 -0600 | [diff] [blame] | 589 | long res) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 590 | { |
| 591 | unsigned long flags; |
| 592 | |
| 593 | spin_lock_irqsave(&ctx->completion_lock, flags); |
Jens Axboe | c71ffb6 | 2019-05-13 20:58:29 -0600 | [diff] [blame] | 594 | io_cqring_fill_event(ctx, user_data, res); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 595 | io_commit_cqring(ctx); |
| 596 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
| 597 | |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 598 | io_cqring_ev_posted(ctx); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 599 | } |
| 600 | |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 601 | static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, |
| 602 | struct io_submit_state *state) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 603 | { |
Jens Axboe | fd6fab2 | 2019-03-14 16:30:06 -0600 | [diff] [blame] | 604 | gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 605 | struct io_kiocb *req; |
| 606 | |
| 607 | if (!percpu_ref_tryget(&ctx->refs)) |
| 608 | return NULL; |
| 609 | |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 610 | if (!state) { |
Jens Axboe | fd6fab2 | 2019-03-14 16:30:06 -0600 | [diff] [blame] | 611 | req = kmem_cache_alloc(req_cachep, gfp); |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 612 | if (unlikely(!req)) |
| 613 | goto out; |
| 614 | } else if (!state->free_reqs) { |
| 615 | size_t sz; |
| 616 | int ret; |
| 617 | |
| 618 | sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs)); |
Jens Axboe | fd6fab2 | 2019-03-14 16:30:06 -0600 | [diff] [blame] | 619 | ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs); |
| 620 | |
| 621 | /* |
| 622 | * Bulk alloc is all-or-nothing. If we fail to get a batch, |
| 623 | * retry single alloc to be on the safe side. |
| 624 | */ |
| 625 | if (unlikely(ret <= 0)) { |
| 626 | state->reqs[0] = kmem_cache_alloc(req_cachep, gfp); |
| 627 | if (!state->reqs[0]) |
| 628 | goto out; |
| 629 | ret = 1; |
| 630 | } |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 631 | state->free_reqs = ret - 1; |
| 632 | state->cur_req = 1; |
| 633 | req = state->reqs[0]; |
| 634 | } else { |
| 635 | req = state->reqs[state->cur_req]; |
| 636 | state->free_reqs--; |
| 637 | state->cur_req++; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 638 | } |
| 639 | |
Jens Axboe | 60c112b | 2019-06-21 10:20:18 -0600 | [diff] [blame] | 640 | req->file = NULL; |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 641 | req->ctx = ctx; |
| 642 | req->flags = 0; |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 643 | /* one is dropped after submission, the other at completion */ |
| 644 | refcount_set(&req->refs, 2); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 645 | req->result = 0; |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 646 | return req; |
| 647 | out: |
Pavel Begunkov | 6805b32 | 2019-10-08 02:18:42 +0300 | [diff] [blame] | 648 | percpu_ref_put(&ctx->refs); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 649 | return NULL; |
| 650 | } |
| 651 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 652 | static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr) |
| 653 | { |
| 654 | if (*nr) { |
| 655 | kmem_cache_free_bulk(req_cachep, *nr, reqs); |
Pavel Begunkov | 6805b32 | 2019-10-08 02:18:42 +0300 | [diff] [blame] | 656 | percpu_ref_put_many(&ctx->refs, *nr); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 657 | *nr = 0; |
| 658 | } |
| 659 | } |
| 660 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 661 | static void __io_free_req(struct io_kiocb *req) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 662 | { |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 663 | if (req->file && !(req->flags & REQ_F_FIXED_FILE)) |
| 664 | fput(req->file); |
Pavel Begunkov | 6805b32 | 2019-10-08 02:18:42 +0300 | [diff] [blame] | 665 | percpu_ref_put(&req->ctx->refs); |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 666 | kmem_cache_free(req_cachep, req); |
| 667 | } |
| 668 | |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 669 | static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr) |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 670 | { |
| 671 | struct io_kiocb *nxt; |
| 672 | |
| 673 | /* |
| 674 | * The list should never be empty when we are called here. But could |
| 675 | * potentially happen if the chain is messed up, check to be on the |
| 676 | * safe side. |
| 677 | */ |
| 678 | nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list); |
| 679 | if (nxt) { |
| 680 | list_del(&nxt->list); |
| 681 | if (!list_empty(&req->link_list)) { |
| 682 | INIT_LIST_HEAD(&nxt->link_list); |
| 683 | list_splice(&req->link_list, &nxt->link_list); |
| 684 | nxt->flags |= REQ_F_LINK; |
| 685 | } |
| 686 | |
Zhengyuan Liu | f7b76ac | 2019-07-16 23:26:14 +0800 | [diff] [blame] | 687 | nxt->flags |= REQ_F_LINK_DONE; |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 688 | /* |
| 689 | * If we're in async work, we can continue processing the chain |
| 690 | * in this context instead of having to queue up new async work. |
| 691 | */ |
| 692 | if (nxtptr && current_work()) { |
| 693 | *nxtptr = nxt; |
| 694 | } else { |
| 695 | INIT_WORK(&nxt->work, io_sq_wq_submit_work); |
| 696 | io_queue_async_work(req->ctx, nxt); |
| 697 | } |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 698 | } |
| 699 | } |
| 700 | |
| 701 | /* |
| 702 | * Called if REQ_F_LINK is set, and we fail the head request |
| 703 | */ |
| 704 | static void io_fail_links(struct io_kiocb *req) |
| 705 | { |
| 706 | struct io_kiocb *link; |
| 707 | |
| 708 | while (!list_empty(&req->link_list)) { |
| 709 | link = list_first_entry(&req->link_list, struct io_kiocb, list); |
| 710 | list_del(&link->list); |
| 711 | |
| 712 | io_cqring_add_event(req->ctx, link->user_data, -ECANCELED); |
| 713 | __io_free_req(link); |
| 714 | } |
| 715 | } |
| 716 | |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 717 | static void io_free_req(struct io_kiocb *req, struct io_kiocb **nxt) |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 718 | { |
| 719 | /* |
| 720 | * If LINK is set, we have dependent requests in this chain. If we |
| 721 | * didn't fail this request, queue the first one up, moving any other |
| 722 | * dependencies to the next request. In case of failure, fail the rest |
| 723 | * of the chain. |
| 724 | */ |
| 725 | if (req->flags & REQ_F_LINK) { |
| 726 | if (req->flags & REQ_F_FAIL_LINK) |
| 727 | io_fail_links(req); |
| 728 | else |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 729 | io_req_link_next(req, nxt); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 730 | } |
| 731 | |
| 732 | __io_free_req(req); |
| 733 | } |
| 734 | |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 735 | /* |
| 736 | * Drop reference to request, return next in chain (if there is one) if this |
| 737 | * was the last reference to this request. |
| 738 | */ |
| 739 | static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 740 | { |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 741 | struct io_kiocb *nxt = NULL; |
| 742 | |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 743 | if (refcount_dec_and_test(&req->refs)) |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 744 | io_free_req(req, &nxt); |
| 745 | |
| 746 | return nxt; |
| 747 | } |
| 748 | |
| 749 | static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr) |
| 750 | { |
| 751 | struct io_kiocb *nxt; |
| 752 | |
| 753 | nxt = io_put_req_find_next(req); |
| 754 | if (nxt) { |
| 755 | if (nxtptr) { |
| 756 | *nxtptr = nxt; |
| 757 | } else { |
| 758 | INIT_WORK(&nxt->work, io_sq_wq_submit_work); |
| 759 | io_queue_async_work(nxt->ctx, nxt); |
| 760 | } |
| 761 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 762 | } |
| 763 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 764 | static unsigned io_cqring_events(struct io_rings *rings) |
Jens Axboe | a3a0e43 | 2019-08-20 11:03:11 -0600 | [diff] [blame] | 765 | { |
| 766 | /* See comment at the top of this file */ |
| 767 | smp_rmb(); |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 768 | return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head); |
Jens Axboe | a3a0e43 | 2019-08-20 11:03:11 -0600 | [diff] [blame] | 769 | } |
| 770 | |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 771 | static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) |
| 772 | { |
| 773 | struct io_rings *rings = ctx->rings; |
| 774 | |
| 775 | /* make sure SQ entry isn't read before tail */ |
| 776 | return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; |
| 777 | } |
| 778 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 779 | /* |
| 780 | * Find and free completed poll iocbs |
| 781 | */ |
| 782 | static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, |
| 783 | struct list_head *done) |
| 784 | { |
| 785 | void *reqs[IO_IOPOLL_BATCH]; |
| 786 | struct io_kiocb *req; |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 787 | int to_free; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 788 | |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 789 | to_free = 0; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 790 | while (!list_empty(done)) { |
| 791 | req = list_first_entry(done, struct io_kiocb, list); |
| 792 | list_del(&req->list); |
| 793 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 794 | io_cqring_fill_event(ctx, req->user_data, req->result); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 795 | (*nr_events)++; |
| 796 | |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 797 | if (refcount_dec_and_test(&req->refs)) { |
| 798 | /* If we're not using fixed files, we have to pair the |
| 799 | * completion part with the file put. Use regular |
| 800 | * completions for those, only batch free for fixed |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 801 | * file and non-linked commands. |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 802 | */ |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 803 | if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) == |
| 804 | REQ_F_FIXED_FILE) { |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 805 | reqs[to_free++] = req; |
| 806 | if (to_free == ARRAY_SIZE(reqs)) |
| 807 | io_free_req_many(ctx, reqs, &to_free); |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 808 | } else { |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 809 | io_free_req(req, NULL); |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 810 | } |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 811 | } |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 812 | } |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 813 | |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 814 | io_commit_cqring(ctx); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 815 | io_free_req_many(ctx, reqs, &to_free); |
| 816 | } |
| 817 | |
| 818 | static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, |
| 819 | long min) |
| 820 | { |
| 821 | struct io_kiocb *req, *tmp; |
| 822 | LIST_HEAD(done); |
| 823 | bool spin; |
| 824 | int ret; |
| 825 | |
| 826 | /* |
| 827 | * Only spin for completions if we don't have multiple devices hanging |
| 828 | * off our complete list, and we're under the requested amount. |
| 829 | */ |
| 830 | spin = !ctx->poll_multi_file && *nr_events < min; |
| 831 | |
| 832 | ret = 0; |
| 833 | list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) { |
| 834 | struct kiocb *kiocb = &req->rw; |
| 835 | |
| 836 | /* |
| 837 | * Move completed entries to our local list. If we find a |
| 838 | * request that requires polling, break out and complete |
| 839 | * the done list first, if we have entries there. |
| 840 | */ |
| 841 | if (req->flags & REQ_F_IOPOLL_COMPLETED) { |
| 842 | list_move_tail(&req->list, &done); |
| 843 | continue; |
| 844 | } |
| 845 | if (!list_empty(&done)) |
| 846 | break; |
| 847 | |
| 848 | ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin); |
| 849 | if (ret < 0) |
| 850 | break; |
| 851 | |
| 852 | if (ret && spin) |
| 853 | spin = false; |
| 854 | ret = 0; |
| 855 | } |
| 856 | |
| 857 | if (!list_empty(&done)) |
| 858 | io_iopoll_complete(ctx, nr_events, &done); |
| 859 | |
| 860 | return ret; |
| 861 | } |
| 862 | |
| 863 | /* |
| 864 | * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a |
| 865 | * non-spinning poll check - we'll still enter the driver poll loop, but only |
| 866 | * as a non-spinning completion check. |
| 867 | */ |
| 868 | static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events, |
| 869 | long min) |
| 870 | { |
Jens Axboe | 08f5439 | 2019-08-21 22:19:11 -0600 | [diff] [blame] | 871 | while (!list_empty(&ctx->poll_list) && !need_resched()) { |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 872 | int ret; |
| 873 | |
| 874 | ret = io_do_iopoll(ctx, nr_events, min); |
| 875 | if (ret < 0) |
| 876 | return ret; |
| 877 | if (!min || *nr_events >= min) |
| 878 | return 0; |
| 879 | } |
| 880 | |
| 881 | return 1; |
| 882 | } |
| 883 | |
| 884 | /* |
| 885 | * We can't just wait for polled events to come to us, we have to actively |
| 886 | * find and complete them. |
| 887 | */ |
| 888 | static void io_iopoll_reap_events(struct io_ring_ctx *ctx) |
| 889 | { |
| 890 | if (!(ctx->flags & IORING_SETUP_IOPOLL)) |
| 891 | return; |
| 892 | |
| 893 | mutex_lock(&ctx->uring_lock); |
| 894 | while (!list_empty(&ctx->poll_list)) { |
| 895 | unsigned int nr_events = 0; |
| 896 | |
| 897 | io_iopoll_getevents(ctx, &nr_events, 1); |
Jens Axboe | 08f5439 | 2019-08-21 22:19:11 -0600 | [diff] [blame] | 898 | |
| 899 | /* |
| 900 | * Ensure we allow local-to-the-cpu processing to take place, |
| 901 | * in this case we need to ensure that we reap all events. |
| 902 | */ |
| 903 | cond_resched(); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 904 | } |
| 905 | mutex_unlock(&ctx->uring_lock); |
| 906 | } |
| 907 | |
Jens Axboe | 2b2ed97 | 2019-10-25 10:06:15 -0600 | [diff] [blame] | 908 | static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, |
| 909 | long min) |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 910 | { |
Jens Axboe | 2b2ed97 | 2019-10-25 10:06:15 -0600 | [diff] [blame] | 911 | int iters = 0, ret = 0; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 912 | |
| 913 | do { |
| 914 | int tmin = 0; |
| 915 | |
Jens Axboe | 500f9fb | 2019-08-19 12:15:59 -0600 | [diff] [blame] | 916 | /* |
Jens Axboe | a3a0e43 | 2019-08-20 11:03:11 -0600 | [diff] [blame] | 917 | * Don't enter poll loop if we already have events pending. |
| 918 | * If we do, we can potentially be spinning for commands that |
| 919 | * already triggered a CQE (eg in error). |
| 920 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 921 | if (io_cqring_events(ctx->rings)) |
Jens Axboe | a3a0e43 | 2019-08-20 11:03:11 -0600 | [diff] [blame] | 922 | break; |
| 923 | |
| 924 | /* |
Jens Axboe | 500f9fb | 2019-08-19 12:15:59 -0600 | [diff] [blame] | 925 | * If a submit got punted to a workqueue, we can have the |
| 926 | * application entering polling for a command before it gets |
| 927 | * issued. That app will hold the uring_lock for the duration |
| 928 | * of the poll right here, so we need to take a breather every |
| 929 | * now and then to ensure that the issue has a chance to add |
| 930 | * the poll to the issued list. Otherwise we can spin here |
| 931 | * forever, while the workqueue is stuck trying to acquire the |
| 932 | * very same mutex. |
| 933 | */ |
| 934 | if (!(++iters & 7)) { |
| 935 | mutex_unlock(&ctx->uring_lock); |
| 936 | mutex_lock(&ctx->uring_lock); |
| 937 | } |
| 938 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 939 | if (*nr_events < min) |
| 940 | tmin = min - *nr_events; |
| 941 | |
| 942 | ret = io_iopoll_getevents(ctx, nr_events, tmin); |
| 943 | if (ret <= 0) |
| 944 | break; |
| 945 | ret = 0; |
| 946 | } while (min && !*nr_events && !need_resched()); |
| 947 | |
Jens Axboe | 2b2ed97 | 2019-10-25 10:06:15 -0600 | [diff] [blame] | 948 | return ret; |
| 949 | } |
| 950 | |
| 951 | static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, |
| 952 | long min) |
| 953 | { |
| 954 | int ret; |
| 955 | |
| 956 | /* |
| 957 | * We disallow the app entering submit/complete with polling, but we |
| 958 | * still need to lock the ring to prevent racing with polled issue |
| 959 | * that got punted to a workqueue. |
| 960 | */ |
| 961 | mutex_lock(&ctx->uring_lock); |
| 962 | ret = __io_iopoll_check(ctx, nr_events, min); |
Jens Axboe | 500f9fb | 2019-08-19 12:15:59 -0600 | [diff] [blame] | 963 | mutex_unlock(&ctx->uring_lock); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 964 | return ret; |
| 965 | } |
| 966 | |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 967 | static void kiocb_end_write(struct io_kiocb *req) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 968 | { |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 969 | /* |
| 970 | * Tell lockdep we inherited freeze protection from submission |
| 971 | * thread. |
| 972 | */ |
| 973 | if (req->flags & REQ_F_ISREG) { |
| 974 | struct inode *inode = file_inode(req->file); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 975 | |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 976 | __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 977 | } |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 978 | file_end_write(req->file); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 979 | } |
| 980 | |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 981 | static void io_complete_rw_common(struct kiocb *kiocb, long res) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 982 | { |
| 983 | struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); |
| 984 | |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 985 | if (kiocb->ki_flags & IOCB_WRITE) |
| 986 | kiocb_end_write(req); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 987 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 988 | if ((req->flags & REQ_F_LINK) && res != req->result) |
| 989 | req->flags |= REQ_F_FAIL_LINK; |
Jens Axboe | c71ffb6 | 2019-05-13 20:58:29 -0600 | [diff] [blame] | 990 | io_cqring_add_event(req->ctx, req->user_data, res); |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 991 | } |
| 992 | |
| 993 | static void io_complete_rw(struct kiocb *kiocb, long res, long res2) |
| 994 | { |
| 995 | struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); |
| 996 | |
| 997 | io_complete_rw_common(kiocb, res); |
| 998 | io_put_req(req, NULL); |
| 999 | } |
| 1000 | |
| 1001 | static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res) |
| 1002 | { |
| 1003 | struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); |
| 1004 | |
| 1005 | io_complete_rw_common(kiocb, res); |
| 1006 | return io_put_req_find_next(req); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1007 | } |
| 1008 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1009 | static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) |
| 1010 | { |
| 1011 | struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); |
| 1012 | |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 1013 | if (kiocb->ki_flags & IOCB_WRITE) |
| 1014 | kiocb_end_write(req); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1015 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1016 | if ((req->flags & REQ_F_LINK) && res != req->result) |
| 1017 | req->flags |= REQ_F_FAIL_LINK; |
| 1018 | req->result = res; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1019 | if (res != -EAGAIN) |
| 1020 | req->flags |= REQ_F_IOPOLL_COMPLETED; |
| 1021 | } |
| 1022 | |
| 1023 | /* |
| 1024 | * After the iocb has been issued, it's safe to be found on the poll list. |
| 1025 | * Adding the kiocb to the list AFTER submission ensures that we don't |
| 1026 | * find it from a io_iopoll_getevents() thread before the issuer is done |
| 1027 | * accessing the kiocb cookie. |
| 1028 | */ |
| 1029 | static void io_iopoll_req_issued(struct io_kiocb *req) |
| 1030 | { |
| 1031 | struct io_ring_ctx *ctx = req->ctx; |
| 1032 | |
| 1033 | /* |
| 1034 | * Track whether we have multiple files in our lists. This will impact |
| 1035 | * how we do polling eventually, not spinning if we're on potentially |
| 1036 | * different devices. |
| 1037 | */ |
| 1038 | if (list_empty(&ctx->poll_list)) { |
| 1039 | ctx->poll_multi_file = false; |
| 1040 | } else if (!ctx->poll_multi_file) { |
| 1041 | struct io_kiocb *list_req; |
| 1042 | |
| 1043 | list_req = list_first_entry(&ctx->poll_list, struct io_kiocb, |
| 1044 | list); |
| 1045 | if (list_req->rw.ki_filp != req->rw.ki_filp) |
| 1046 | ctx->poll_multi_file = true; |
| 1047 | } |
| 1048 | |
| 1049 | /* |
| 1050 | * For fast devices, IO may have already completed. If it has, add |
| 1051 | * it to the front so we find it first. |
| 1052 | */ |
| 1053 | if (req->flags & REQ_F_IOPOLL_COMPLETED) |
| 1054 | list_add(&req->list, &ctx->poll_list); |
| 1055 | else |
| 1056 | list_add_tail(&req->list, &ctx->poll_list); |
| 1057 | } |
| 1058 | |
Jens Axboe | 3d6770f | 2019-04-13 11:50:54 -0600 | [diff] [blame] | 1059 | static void io_file_put(struct io_submit_state *state) |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 1060 | { |
Jens Axboe | 3d6770f | 2019-04-13 11:50:54 -0600 | [diff] [blame] | 1061 | if (state->file) { |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 1062 | int diff = state->has_refs - state->used_refs; |
| 1063 | |
| 1064 | if (diff) |
| 1065 | fput_many(state->file, diff); |
| 1066 | state->file = NULL; |
| 1067 | } |
| 1068 | } |
| 1069 | |
| 1070 | /* |
| 1071 | * Get as many references to a file as we have IOs left in this submission, |
| 1072 | * assuming most submissions are for one file, or at least that each file |
| 1073 | * has more than one submission. |
| 1074 | */ |
| 1075 | static struct file *io_file_get(struct io_submit_state *state, int fd) |
| 1076 | { |
| 1077 | if (!state) |
| 1078 | return fget(fd); |
| 1079 | |
| 1080 | if (state->file) { |
| 1081 | if (state->fd == fd) { |
| 1082 | state->used_refs++; |
| 1083 | state->ios_left--; |
| 1084 | return state->file; |
| 1085 | } |
Jens Axboe | 3d6770f | 2019-04-13 11:50:54 -0600 | [diff] [blame] | 1086 | io_file_put(state); |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 1087 | } |
| 1088 | state->file = fget_many(fd, state->ios_left); |
| 1089 | if (!state->file) |
| 1090 | return NULL; |
| 1091 | |
| 1092 | state->fd = fd; |
| 1093 | state->has_refs = state->ios_left; |
| 1094 | state->used_refs = 1; |
| 1095 | state->ios_left--; |
| 1096 | return state->file; |
| 1097 | } |
| 1098 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1099 | /* |
| 1100 | * If we tracked the file through the SCM inflight mechanism, we could support |
| 1101 | * any file. For now, just ensure that anything potentially problematic is done |
| 1102 | * inline. |
| 1103 | */ |
| 1104 | static bool io_file_supports_async(struct file *file) |
| 1105 | { |
| 1106 | umode_t mode = file_inode(file)->i_mode; |
| 1107 | |
| 1108 | if (S_ISBLK(mode) || S_ISCHR(mode)) |
| 1109 | return true; |
| 1110 | if (S_ISREG(mode) && file->f_op != &io_uring_fops) |
| 1111 | return true; |
| 1112 | |
| 1113 | return false; |
| 1114 | } |
| 1115 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 1116 | static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, |
Jens Axboe | 8358e3a | 2019-04-23 08:17:58 -0600 | [diff] [blame] | 1117 | bool force_nonblock) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1118 | { |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 1119 | const struct io_uring_sqe *sqe = s->sqe; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1120 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1121 | struct kiocb *kiocb = &req->rw; |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 1122 | unsigned ioprio; |
| 1123 | int ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1124 | |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 1125 | if (!req->file) |
| 1126 | return -EBADF; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1127 | |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 1128 | if (S_ISREG(file_inode(req->file)->i_mode)) |
| 1129 | req->flags |= REQ_F_ISREG; |
| 1130 | |
| 1131 | /* |
| 1132 | * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so |
| 1133 | * we know to async punt it even if it was opened O_NONBLOCK |
| 1134 | */ |
| 1135 | if (force_nonblock && !io_file_supports_async(req->file)) { |
| 1136 | req->flags |= REQ_F_MUST_PUNT; |
| 1137 | return -EAGAIN; |
| 1138 | } |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 1139 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1140 | kiocb->ki_pos = READ_ONCE(sqe->off); |
| 1141 | kiocb->ki_flags = iocb_flags(kiocb->ki_filp); |
| 1142 | kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp)); |
| 1143 | |
| 1144 | ioprio = READ_ONCE(sqe->ioprio); |
| 1145 | if (ioprio) { |
| 1146 | ret = ioprio_check_cap(ioprio); |
| 1147 | if (ret) |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 1148 | return ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1149 | |
| 1150 | kiocb->ki_ioprio = ioprio; |
| 1151 | } else |
| 1152 | kiocb->ki_ioprio = get_current_ioprio(); |
| 1153 | |
| 1154 | ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags)); |
| 1155 | if (unlikely(ret)) |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 1156 | return ret; |
Stefan Bühler | 8449eed | 2019-04-27 20:34:19 +0200 | [diff] [blame] | 1157 | |
| 1158 | /* don't allow async punt if RWF_NOWAIT was requested */ |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 1159 | if ((kiocb->ki_flags & IOCB_NOWAIT) || |
| 1160 | (req->file->f_flags & O_NONBLOCK)) |
Stefan Bühler | 8449eed | 2019-04-27 20:34:19 +0200 | [diff] [blame] | 1161 | req->flags |= REQ_F_NOWAIT; |
| 1162 | |
| 1163 | if (force_nonblock) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1164 | kiocb->ki_flags |= IOCB_NOWAIT; |
Stefan Bühler | 8449eed | 2019-04-27 20:34:19 +0200 | [diff] [blame] | 1165 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1166 | if (ctx->flags & IORING_SETUP_IOPOLL) { |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1167 | if (!(kiocb->ki_flags & IOCB_DIRECT) || |
| 1168 | !kiocb->ki_filp->f_op->iopoll) |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 1169 | return -EOPNOTSUPP; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1170 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1171 | kiocb->ki_flags |= IOCB_HIPRI; |
| 1172 | kiocb->ki_complete = io_complete_rw_iopoll; |
| 1173 | } else { |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 1174 | if (kiocb->ki_flags & IOCB_HIPRI) |
| 1175 | return -EINVAL; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1176 | kiocb->ki_complete = io_complete_rw; |
| 1177 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1178 | return 0; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1179 | } |
| 1180 | |
| 1181 | static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) |
| 1182 | { |
| 1183 | switch (ret) { |
| 1184 | case -EIOCBQUEUED: |
| 1185 | break; |
| 1186 | case -ERESTARTSYS: |
| 1187 | case -ERESTARTNOINTR: |
| 1188 | case -ERESTARTNOHAND: |
| 1189 | case -ERESTART_RESTARTBLOCK: |
| 1190 | /* |
| 1191 | * We can't just restart the syscall, since previously |
| 1192 | * submitted sqes may already be in progress. Just fail this |
| 1193 | * IO with EINTR. |
| 1194 | */ |
| 1195 | ret = -EINTR; |
| 1196 | /* fall through */ |
| 1197 | default: |
| 1198 | kiocb->ki_complete(kiocb, ret, 0); |
| 1199 | } |
| 1200 | } |
| 1201 | |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1202 | static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt, |
| 1203 | bool in_async) |
| 1204 | { |
| 1205 | if (in_async && ret >= 0 && nxt && kiocb->ki_complete == io_complete_rw) |
| 1206 | *nxt = __io_complete_rw(kiocb, ret); |
| 1207 | else |
| 1208 | io_rw_done(kiocb, ret); |
| 1209 | } |
| 1210 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 1211 | static int io_import_fixed(struct io_ring_ctx *ctx, int rw, |
| 1212 | const struct io_uring_sqe *sqe, |
| 1213 | struct iov_iter *iter) |
| 1214 | { |
| 1215 | size_t len = READ_ONCE(sqe->len); |
| 1216 | struct io_mapped_ubuf *imu; |
| 1217 | unsigned index, buf_index; |
| 1218 | size_t offset; |
| 1219 | u64 buf_addr; |
| 1220 | |
| 1221 | /* attempt to use fixed buffers without having provided iovecs */ |
| 1222 | if (unlikely(!ctx->user_bufs)) |
| 1223 | return -EFAULT; |
| 1224 | |
| 1225 | buf_index = READ_ONCE(sqe->buf_index); |
| 1226 | if (unlikely(buf_index >= ctx->nr_user_bufs)) |
| 1227 | return -EFAULT; |
| 1228 | |
| 1229 | index = array_index_nospec(buf_index, ctx->nr_user_bufs); |
| 1230 | imu = &ctx->user_bufs[index]; |
| 1231 | buf_addr = READ_ONCE(sqe->addr); |
| 1232 | |
| 1233 | /* overflow */ |
| 1234 | if (buf_addr + len < buf_addr) |
| 1235 | return -EFAULT; |
| 1236 | /* not inside the mapped region */ |
| 1237 | if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len) |
| 1238 | return -EFAULT; |
| 1239 | |
| 1240 | /* |
| 1241 | * May not be a start of buffer, set size appropriately |
| 1242 | * and advance us to the beginning. |
| 1243 | */ |
| 1244 | offset = buf_addr - imu->ubuf; |
| 1245 | iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len); |
Jens Axboe | bd11b3a | 2019-07-20 08:37:31 -0600 | [diff] [blame] | 1246 | |
| 1247 | if (offset) { |
| 1248 | /* |
| 1249 | * Don't use iov_iter_advance() here, as it's really slow for |
| 1250 | * using the latter parts of a big fixed buffer - it iterates |
| 1251 | * over each segment manually. We can cheat a bit here, because |
| 1252 | * we know that: |
| 1253 | * |
| 1254 | * 1) it's a BVEC iter, we set it up |
| 1255 | * 2) all bvecs are PAGE_SIZE in size, except potentially the |
| 1256 | * first and last bvec |
| 1257 | * |
| 1258 | * So just find our index, and adjust the iterator afterwards. |
| 1259 | * If the offset is within the first bvec (or the whole first |
| 1260 | * bvec, just use iov_iter_advance(). This makes it easier |
| 1261 | * since we can just skip the first segment, which may not |
| 1262 | * be PAGE_SIZE aligned. |
| 1263 | */ |
| 1264 | const struct bio_vec *bvec = imu->bvec; |
| 1265 | |
| 1266 | if (offset <= bvec->bv_len) { |
| 1267 | iov_iter_advance(iter, offset); |
| 1268 | } else { |
| 1269 | unsigned long seg_skip; |
| 1270 | |
| 1271 | /* skip first vec */ |
| 1272 | offset -= bvec->bv_len; |
| 1273 | seg_skip = 1 + (offset >> PAGE_SHIFT); |
| 1274 | |
| 1275 | iter->bvec = bvec + seg_skip; |
| 1276 | iter->nr_segs -= seg_skip; |
Aleix Roca Nonell | 99c79f6 | 2019-08-15 14:03:22 +0200 | [diff] [blame] | 1277 | iter->count -= bvec->bv_len + offset; |
Jens Axboe | bd11b3a | 2019-07-20 08:37:31 -0600 | [diff] [blame] | 1278 | iter->iov_offset = offset & ~PAGE_MASK; |
Jens Axboe | bd11b3a | 2019-07-20 08:37:31 -0600 | [diff] [blame] | 1279 | } |
| 1280 | } |
| 1281 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 1282 | return 0; |
| 1283 | } |
| 1284 | |
Jens Axboe | 87e5e6d | 2019-05-14 16:02:22 -0600 | [diff] [blame] | 1285 | static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw, |
| 1286 | const struct sqe_submit *s, struct iovec **iovec, |
| 1287 | struct iov_iter *iter) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1288 | { |
| 1289 | const struct io_uring_sqe *sqe = s->sqe; |
| 1290 | void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); |
| 1291 | size_t sqe_len = READ_ONCE(sqe->len); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 1292 | u8 opcode; |
| 1293 | |
| 1294 | /* |
| 1295 | * We're reading ->opcode for the second time, but the first read |
| 1296 | * doesn't care whether it's _FIXED or not, so it doesn't matter |
| 1297 | * whether ->opcode changes concurrently. The first read does care |
| 1298 | * about whether it is a READ or a WRITE, so we don't trust this read |
| 1299 | * for that purpose and instead let the caller pass in the read/write |
| 1300 | * flag. |
| 1301 | */ |
| 1302 | opcode = READ_ONCE(sqe->opcode); |
| 1303 | if (opcode == IORING_OP_READ_FIXED || |
| 1304 | opcode == IORING_OP_WRITE_FIXED) { |
Jens Axboe | 87e5e6d | 2019-05-14 16:02:22 -0600 | [diff] [blame] | 1305 | ssize_t ret = io_import_fixed(ctx, rw, sqe, iter); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 1306 | *iovec = NULL; |
| 1307 | return ret; |
| 1308 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1309 | |
| 1310 | if (!s->has_user) |
| 1311 | return -EFAULT; |
| 1312 | |
| 1313 | #ifdef CONFIG_COMPAT |
| 1314 | if (ctx->compat) |
| 1315 | return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV, |
| 1316 | iovec, iter); |
| 1317 | #endif |
| 1318 | |
| 1319 | return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter); |
| 1320 | } |
| 1321 | |
Jens Axboe | 6d5d5ac | 2019-09-11 10:16:13 -0600 | [diff] [blame] | 1322 | static inline bool io_should_merge(struct async_list *al, struct kiocb *kiocb) |
| 1323 | { |
| 1324 | if (al->file == kiocb->ki_filp) { |
| 1325 | off_t start, end; |
| 1326 | |
| 1327 | /* |
| 1328 | * Allow merging if we're anywhere in the range of the same |
| 1329 | * page. Generally this happens for sub-page reads or writes, |
| 1330 | * and it's beneficial to allow the first worker to bring the |
| 1331 | * page in and the piggy backed work can then work on the |
| 1332 | * cached page. |
| 1333 | */ |
| 1334 | start = al->io_start & PAGE_MASK; |
| 1335 | end = (al->io_start + al->io_len + PAGE_SIZE - 1) & PAGE_MASK; |
| 1336 | if (kiocb->ki_pos >= start && kiocb->ki_pos <= end) |
| 1337 | return true; |
| 1338 | } |
| 1339 | |
| 1340 | al->file = NULL; |
| 1341 | return false; |
| 1342 | } |
| 1343 | |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1344 | /* |
| 1345 | * Make a note of the last file/offset/direction we punted to async |
| 1346 | * context. We'll use this information to see if we can piggy back a |
| 1347 | * sequential request onto the previous one, if it's still hasn't been |
| 1348 | * completed by the async worker. |
| 1349 | */ |
| 1350 | static void io_async_list_note(int rw, struct io_kiocb *req, size_t len) |
| 1351 | { |
| 1352 | struct async_list *async_list = &req->ctx->pending_async[rw]; |
| 1353 | struct kiocb *kiocb = &req->rw; |
| 1354 | struct file *filp = kiocb->ki_filp; |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1355 | |
Jens Axboe | 6d5d5ac | 2019-09-11 10:16:13 -0600 | [diff] [blame] | 1356 | if (io_should_merge(async_list, kiocb)) { |
Zhengyuan Liu | 9310a7ba | 2019-07-22 10:23:27 +0800 | [diff] [blame] | 1357 | unsigned long max_bytes; |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1358 | |
| 1359 | /* Use 8x RA size as a decent limiter for both reads/writes */ |
Zhengyuan Liu | 9310a7ba | 2019-07-22 10:23:27 +0800 | [diff] [blame] | 1360 | max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3); |
| 1361 | if (!max_bytes) |
| 1362 | max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3); |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1363 | |
Zhengyuan Liu | 9310a7ba | 2019-07-22 10:23:27 +0800 | [diff] [blame] | 1364 | /* If max len are exceeded, reset the state */ |
| 1365 | if (async_list->io_len + len <= max_bytes) { |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1366 | req->flags |= REQ_F_SEQ_PREV; |
Zhengyuan Liu | 9310a7ba | 2019-07-22 10:23:27 +0800 | [diff] [blame] | 1367 | async_list->io_len += len; |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1368 | } else { |
Jens Axboe | 6d5d5ac | 2019-09-11 10:16:13 -0600 | [diff] [blame] | 1369 | async_list->file = NULL; |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1370 | } |
| 1371 | } |
| 1372 | |
| 1373 | /* New file? Reset state. */ |
| 1374 | if (async_list->file != filp) { |
Jens Axboe | 6d5d5ac | 2019-09-11 10:16:13 -0600 | [diff] [blame] | 1375 | async_list->io_start = kiocb->ki_pos; |
| 1376 | async_list->io_len = len; |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1377 | async_list->file = filp; |
| 1378 | } |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1379 | } |
| 1380 | |
Jens Axboe | 3296061 | 2019-09-23 11:05:34 -0600 | [diff] [blame] | 1381 | /* |
| 1382 | * For files that don't have ->read_iter() and ->write_iter(), handle them |
| 1383 | * by looping over ->read() or ->write() manually. |
| 1384 | */ |
| 1385 | static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, |
| 1386 | struct iov_iter *iter) |
| 1387 | { |
| 1388 | ssize_t ret = 0; |
| 1389 | |
| 1390 | /* |
| 1391 | * Don't support polled IO through this interface, and we can't |
| 1392 | * support non-blocking either. For the latter, this just causes |
| 1393 | * the kiocb to be handled from an async context. |
| 1394 | */ |
| 1395 | if (kiocb->ki_flags & IOCB_HIPRI) |
| 1396 | return -EOPNOTSUPP; |
| 1397 | if (kiocb->ki_flags & IOCB_NOWAIT) |
| 1398 | return -EAGAIN; |
| 1399 | |
| 1400 | while (iov_iter_count(iter)) { |
| 1401 | struct iovec iovec = iov_iter_iovec(iter); |
| 1402 | ssize_t nr; |
| 1403 | |
| 1404 | if (rw == READ) { |
| 1405 | nr = file->f_op->read(file, iovec.iov_base, |
| 1406 | iovec.iov_len, &kiocb->ki_pos); |
| 1407 | } else { |
| 1408 | nr = file->f_op->write(file, iovec.iov_base, |
| 1409 | iovec.iov_len, &kiocb->ki_pos); |
| 1410 | } |
| 1411 | |
| 1412 | if (nr < 0) { |
| 1413 | if (!ret) |
| 1414 | ret = nr; |
| 1415 | break; |
| 1416 | } |
| 1417 | ret += nr; |
| 1418 | if (nr != iovec.iov_len) |
| 1419 | break; |
| 1420 | iov_iter_advance(iter, nr); |
| 1421 | } |
| 1422 | |
| 1423 | return ret; |
| 1424 | } |
| 1425 | |
Jens Axboe | e0c5c57 | 2019-03-12 10:18:47 -0600 | [diff] [blame] | 1426 | static int io_read(struct io_kiocb *req, const struct sqe_submit *s, |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1427 | struct io_kiocb **nxt, bool force_nonblock) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1428 | { |
| 1429 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
| 1430 | struct kiocb *kiocb = &req->rw; |
| 1431 | struct iov_iter iter; |
| 1432 | struct file *file; |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1433 | size_t iov_count; |
Jens Axboe | 9d93a3f | 2019-05-15 13:53:07 -0600 | [diff] [blame] | 1434 | ssize_t read_size, ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1435 | |
Jens Axboe | 8358e3a | 2019-04-23 08:17:58 -0600 | [diff] [blame] | 1436 | ret = io_prep_rw(req, s, force_nonblock); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1437 | if (ret) |
| 1438 | return ret; |
| 1439 | file = kiocb->ki_filp; |
| 1440 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1441 | if (unlikely(!(file->f_mode & FMODE_READ))) |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 1442 | return -EBADF; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1443 | |
| 1444 | ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter); |
Jens Axboe | 87e5e6d | 2019-05-14 16:02:22 -0600 | [diff] [blame] | 1445 | if (ret < 0) |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 1446 | return ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1447 | |
Jens Axboe | 9d93a3f | 2019-05-15 13:53:07 -0600 | [diff] [blame] | 1448 | read_size = ret; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1449 | if (req->flags & REQ_F_LINK) |
| 1450 | req->result = read_size; |
| 1451 | |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1452 | iov_count = iov_iter_count(&iter); |
| 1453 | ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1454 | if (!ret) { |
| 1455 | ssize_t ret2; |
| 1456 | |
Jens Axboe | 3296061 | 2019-09-23 11:05:34 -0600 | [diff] [blame] | 1457 | if (file->f_op->read_iter) |
| 1458 | ret2 = call_read_iter(file, kiocb, &iter); |
| 1459 | else |
| 1460 | ret2 = loop_rw_iter(READ, file, kiocb, &iter); |
| 1461 | |
Jens Axboe | 9d93a3f | 2019-05-15 13:53:07 -0600 | [diff] [blame] | 1462 | /* |
| 1463 | * In case of a short read, punt to async. This can happen |
| 1464 | * if we have data partially cached. Alternatively we can |
| 1465 | * return the short read, in which case the application will |
| 1466 | * need to issue another SQE and wait for it. That SQE will |
| 1467 | * need async punt anyway, so it's more efficient to do it |
| 1468 | * here. |
| 1469 | */ |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 1470 | if (force_nonblock && !(req->flags & REQ_F_NOWAIT) && |
| 1471 | (req->flags & REQ_F_ISREG) && |
| 1472 | ret2 > 0 && ret2 < read_size) |
Jens Axboe | 9d93a3f | 2019-05-15 13:53:07 -0600 | [diff] [blame] | 1473 | ret2 = -EAGAIN; |
| 1474 | /* Catch -EAGAIN return for forced non-blocking submission */ |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1475 | if (!force_nonblock || ret2 != -EAGAIN) { |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1476 | kiocb_done(kiocb, ret2, nxt, s->needs_lock); |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1477 | } else { |
| 1478 | /* |
| 1479 | * If ->needs_lock is true, we're already in async |
| 1480 | * context. |
| 1481 | */ |
| 1482 | if (!s->needs_lock) |
| 1483 | io_async_list_note(READ, req, iov_count); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1484 | ret = -EAGAIN; |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1485 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1486 | } |
| 1487 | kfree(iovec); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1488 | return ret; |
| 1489 | } |
| 1490 | |
Jens Axboe | e0c5c57 | 2019-03-12 10:18:47 -0600 | [diff] [blame] | 1491 | static int io_write(struct io_kiocb *req, const struct sqe_submit *s, |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1492 | struct io_kiocb **nxt, bool force_nonblock) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1493 | { |
| 1494 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
| 1495 | struct kiocb *kiocb = &req->rw; |
| 1496 | struct iov_iter iter; |
| 1497 | struct file *file; |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1498 | size_t iov_count; |
Jens Axboe | 87e5e6d | 2019-05-14 16:02:22 -0600 | [diff] [blame] | 1499 | ssize_t ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1500 | |
Jens Axboe | 8358e3a | 2019-04-23 08:17:58 -0600 | [diff] [blame] | 1501 | ret = io_prep_rw(req, s, force_nonblock); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1502 | if (ret) |
| 1503 | return ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1504 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1505 | file = kiocb->ki_filp; |
| 1506 | if (unlikely(!(file->f_mode & FMODE_WRITE))) |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 1507 | return -EBADF; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1508 | |
| 1509 | ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter); |
Jens Axboe | 87e5e6d | 2019-05-14 16:02:22 -0600 | [diff] [blame] | 1510 | if (ret < 0) |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 1511 | return ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1512 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1513 | if (req->flags & REQ_F_LINK) |
| 1514 | req->result = ret; |
| 1515 | |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1516 | iov_count = iov_iter_count(&iter); |
| 1517 | |
| 1518 | ret = -EAGAIN; |
| 1519 | if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) { |
| 1520 | /* If ->needs_lock is true, we're already in async context. */ |
| 1521 | if (!s->needs_lock) |
| 1522 | io_async_list_note(WRITE, req, iov_count); |
| 1523 | goto out_free; |
| 1524 | } |
| 1525 | |
| 1526 | ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1527 | if (!ret) { |
Roman Penyaev | 9bf7933 | 2019-03-25 20:09:24 +0100 | [diff] [blame] | 1528 | ssize_t ret2; |
| 1529 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1530 | /* |
| 1531 | * Open-code file_start_write here to grab freeze protection, |
| 1532 | * which will be released by another thread in |
| 1533 | * io_complete_rw(). Fool lockdep by telling it the lock got |
| 1534 | * released so that it doesn't complain about the held lock when |
| 1535 | * we return to userspace. |
| 1536 | */ |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 1537 | if (req->flags & REQ_F_ISREG) { |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1538 | __sb_start_write(file_inode(file)->i_sb, |
| 1539 | SB_FREEZE_WRITE, true); |
| 1540 | __sb_writers_release(file_inode(file)->i_sb, |
| 1541 | SB_FREEZE_WRITE); |
| 1542 | } |
| 1543 | kiocb->ki_flags |= IOCB_WRITE; |
Roman Penyaev | 9bf7933 | 2019-03-25 20:09:24 +0100 | [diff] [blame] | 1544 | |
Jens Axboe | 3296061 | 2019-09-23 11:05:34 -0600 | [diff] [blame] | 1545 | if (file->f_op->write_iter) |
| 1546 | ret2 = call_write_iter(file, kiocb, &iter); |
| 1547 | else |
| 1548 | ret2 = loop_rw_iter(WRITE, file, kiocb, &iter); |
Roman Penyaev | 9bf7933 | 2019-03-25 20:09:24 +0100 | [diff] [blame] | 1549 | if (!force_nonblock || ret2 != -EAGAIN) { |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1550 | kiocb_done(kiocb, ret2, nxt, s->needs_lock); |
Roman Penyaev | 9bf7933 | 2019-03-25 20:09:24 +0100 | [diff] [blame] | 1551 | } else { |
| 1552 | /* |
| 1553 | * If ->needs_lock is true, we're already in async |
| 1554 | * context. |
| 1555 | */ |
| 1556 | if (!s->needs_lock) |
| 1557 | io_async_list_note(WRITE, req, iov_count); |
| 1558 | ret = -EAGAIN; |
| 1559 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1560 | } |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1561 | out_free: |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1562 | kfree(iovec); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1563 | return ret; |
| 1564 | } |
| 1565 | |
| 1566 | /* |
| 1567 | * IORING_OP_NOP just posts a completion event, nothing else. |
| 1568 | */ |
| 1569 | static int io_nop(struct io_kiocb *req, u64 user_data) |
| 1570 | { |
| 1571 | struct io_ring_ctx *ctx = req->ctx; |
| 1572 | long err = 0; |
| 1573 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1574 | if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) |
| 1575 | return -EINVAL; |
| 1576 | |
Jens Axboe | c71ffb6 | 2019-05-13 20:58:29 -0600 | [diff] [blame] | 1577 | io_cqring_add_event(ctx, user_data, err); |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1578 | io_put_req(req, NULL); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1579 | return 0; |
| 1580 | } |
| 1581 | |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 1582 | static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
| 1583 | { |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 1584 | struct io_ring_ctx *ctx = req->ctx; |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 1585 | |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 1586 | if (!req->file) |
| 1587 | return -EBADF; |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 1588 | |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 1589 | if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1590 | return -EINVAL; |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 1591 | if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 1592 | return -EINVAL; |
| 1593 | |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 1594 | return 0; |
| 1595 | } |
| 1596 | |
| 1597 | static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe, |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1598 | struct io_kiocb **nxt, bool force_nonblock) |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 1599 | { |
| 1600 | loff_t sqe_off = READ_ONCE(sqe->off); |
| 1601 | loff_t sqe_len = READ_ONCE(sqe->len); |
| 1602 | loff_t end = sqe_off + sqe_len; |
| 1603 | unsigned fsync_flags; |
| 1604 | int ret; |
| 1605 | |
| 1606 | fsync_flags = READ_ONCE(sqe->fsync_flags); |
| 1607 | if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC)) |
| 1608 | return -EINVAL; |
| 1609 | |
| 1610 | ret = io_prep_fsync(req, sqe); |
| 1611 | if (ret) |
| 1612 | return ret; |
| 1613 | |
| 1614 | /* fsync always requires a blocking context */ |
| 1615 | if (force_nonblock) |
| 1616 | return -EAGAIN; |
| 1617 | |
| 1618 | ret = vfs_fsync_range(req->rw.ki_filp, sqe_off, |
| 1619 | end > 0 ? end : LLONG_MAX, |
| 1620 | fsync_flags & IORING_FSYNC_DATASYNC); |
| 1621 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1622 | if (ret < 0 && (req->flags & REQ_F_LINK)) |
| 1623 | req->flags |= REQ_F_FAIL_LINK; |
Jens Axboe | c71ffb6 | 2019-05-13 20:58:29 -0600 | [diff] [blame] | 1624 | io_cqring_add_event(req->ctx, sqe->user_data, ret); |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1625 | io_put_req(req, nxt); |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 1626 | return 0; |
| 1627 | } |
| 1628 | |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 1629 | static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
| 1630 | { |
| 1631 | struct io_ring_ctx *ctx = req->ctx; |
| 1632 | int ret = 0; |
| 1633 | |
| 1634 | if (!req->file) |
| 1635 | return -EBADF; |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 1636 | |
| 1637 | if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) |
| 1638 | return -EINVAL; |
| 1639 | if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) |
| 1640 | return -EINVAL; |
| 1641 | |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 1642 | return ret; |
| 1643 | } |
| 1644 | |
| 1645 | static int io_sync_file_range(struct io_kiocb *req, |
| 1646 | const struct io_uring_sqe *sqe, |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1647 | struct io_kiocb **nxt, |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 1648 | bool force_nonblock) |
| 1649 | { |
| 1650 | loff_t sqe_off; |
| 1651 | loff_t sqe_len; |
| 1652 | unsigned flags; |
| 1653 | int ret; |
| 1654 | |
| 1655 | ret = io_prep_sfr(req, sqe); |
| 1656 | if (ret) |
| 1657 | return ret; |
| 1658 | |
| 1659 | /* sync_file_range always requires a blocking context */ |
| 1660 | if (force_nonblock) |
| 1661 | return -EAGAIN; |
| 1662 | |
| 1663 | sqe_off = READ_ONCE(sqe->off); |
| 1664 | sqe_len = READ_ONCE(sqe->len); |
| 1665 | flags = READ_ONCE(sqe->sync_range_flags); |
| 1666 | |
| 1667 | ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags); |
| 1668 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1669 | if (ret < 0 && (req->flags & REQ_F_LINK)) |
| 1670 | req->flags |= REQ_F_FAIL_LINK; |
Jens Axboe | c71ffb6 | 2019-05-13 20:58:29 -0600 | [diff] [blame] | 1671 | io_cqring_add_event(req->ctx, sqe->user_data, ret); |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1672 | io_put_req(req, nxt); |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 1673 | return 0; |
| 1674 | } |
| 1675 | |
Jens Axboe | 0fa03c6 | 2019-04-19 13:34:07 -0600 | [diff] [blame] | 1676 | #if defined(CONFIG_NET) |
Jens Axboe | aa1fa28 | 2019-04-19 13:38:09 -0600 | [diff] [blame] | 1677 | static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1678 | struct io_kiocb **nxt, bool force_nonblock, |
Jens Axboe | aa1fa28 | 2019-04-19 13:38:09 -0600 | [diff] [blame] | 1679 | long (*fn)(struct socket *, struct user_msghdr __user *, |
| 1680 | unsigned int)) |
| 1681 | { |
Jens Axboe | 0fa03c6 | 2019-04-19 13:34:07 -0600 | [diff] [blame] | 1682 | struct socket *sock; |
| 1683 | int ret; |
| 1684 | |
| 1685 | if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) |
| 1686 | return -EINVAL; |
| 1687 | |
| 1688 | sock = sock_from_file(req->file, &ret); |
| 1689 | if (sock) { |
| 1690 | struct user_msghdr __user *msg; |
| 1691 | unsigned flags; |
| 1692 | |
| 1693 | flags = READ_ONCE(sqe->msg_flags); |
| 1694 | if (flags & MSG_DONTWAIT) |
| 1695 | req->flags |= REQ_F_NOWAIT; |
| 1696 | else if (force_nonblock) |
| 1697 | flags |= MSG_DONTWAIT; |
| 1698 | |
| 1699 | msg = (struct user_msghdr __user *) (unsigned long) |
| 1700 | READ_ONCE(sqe->addr); |
| 1701 | |
Jens Axboe | aa1fa28 | 2019-04-19 13:38:09 -0600 | [diff] [blame] | 1702 | ret = fn(sock, msg, flags); |
Jens Axboe | 0fa03c6 | 2019-04-19 13:34:07 -0600 | [diff] [blame] | 1703 | if (force_nonblock && ret == -EAGAIN) |
| 1704 | return ret; |
| 1705 | } |
| 1706 | |
| 1707 | io_cqring_add_event(req->ctx, sqe->user_data, ret); |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1708 | io_put_req(req, nxt); |
Jens Axboe | 0fa03c6 | 2019-04-19 13:34:07 -0600 | [diff] [blame] | 1709 | return 0; |
Jens Axboe | aa1fa28 | 2019-04-19 13:38:09 -0600 | [diff] [blame] | 1710 | } |
| 1711 | #endif |
| 1712 | |
| 1713 | static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1714 | struct io_kiocb **nxt, bool force_nonblock) |
Jens Axboe | aa1fa28 | 2019-04-19 13:38:09 -0600 | [diff] [blame] | 1715 | { |
| 1716 | #if defined(CONFIG_NET) |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1717 | return io_send_recvmsg(req, sqe, nxt, force_nonblock, |
| 1718 | __sys_sendmsg_sock); |
Jens Axboe | aa1fa28 | 2019-04-19 13:38:09 -0600 | [diff] [blame] | 1719 | #else |
| 1720 | return -EOPNOTSUPP; |
| 1721 | #endif |
| 1722 | } |
| 1723 | |
| 1724 | static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1725 | struct io_kiocb **nxt, bool force_nonblock) |
Jens Axboe | aa1fa28 | 2019-04-19 13:38:09 -0600 | [diff] [blame] | 1726 | { |
| 1727 | #if defined(CONFIG_NET) |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1728 | return io_send_recvmsg(req, sqe, nxt, force_nonblock, |
| 1729 | __sys_recvmsg_sock); |
Jens Axboe | 0fa03c6 | 2019-04-19 13:34:07 -0600 | [diff] [blame] | 1730 | #else |
| 1731 | return -EOPNOTSUPP; |
| 1732 | #endif |
| 1733 | } |
| 1734 | |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1735 | static void io_poll_remove_one(struct io_kiocb *req) |
| 1736 | { |
| 1737 | struct io_poll_iocb *poll = &req->poll; |
| 1738 | |
| 1739 | spin_lock(&poll->head->lock); |
| 1740 | WRITE_ONCE(poll->canceled, true); |
| 1741 | if (!list_empty(&poll->wait.entry)) { |
| 1742 | list_del_init(&poll->wait.entry); |
Jens Axboe | 18d9be1 | 2019-09-10 09:13:05 -0600 | [diff] [blame] | 1743 | io_queue_async_work(req->ctx, req); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1744 | } |
| 1745 | spin_unlock(&poll->head->lock); |
| 1746 | |
| 1747 | list_del_init(&req->list); |
| 1748 | } |
| 1749 | |
| 1750 | static void io_poll_remove_all(struct io_ring_ctx *ctx) |
| 1751 | { |
| 1752 | struct io_kiocb *req; |
| 1753 | |
| 1754 | spin_lock_irq(&ctx->completion_lock); |
| 1755 | while (!list_empty(&ctx->cancel_list)) { |
| 1756 | req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list); |
| 1757 | io_poll_remove_one(req); |
| 1758 | } |
| 1759 | spin_unlock_irq(&ctx->completion_lock); |
| 1760 | } |
| 1761 | |
| 1762 | /* |
| 1763 | * Find a running poll command that matches one specified in sqe->addr, |
| 1764 | * and remove it if found. |
| 1765 | */ |
| 1766 | static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
| 1767 | { |
| 1768 | struct io_ring_ctx *ctx = req->ctx; |
| 1769 | struct io_kiocb *poll_req, *next; |
| 1770 | int ret = -ENOENT; |
| 1771 | |
| 1772 | if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) |
| 1773 | return -EINVAL; |
| 1774 | if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || |
| 1775 | sqe->poll_events) |
| 1776 | return -EINVAL; |
| 1777 | |
| 1778 | spin_lock_irq(&ctx->completion_lock); |
| 1779 | list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) { |
| 1780 | if (READ_ONCE(sqe->addr) == poll_req->user_data) { |
| 1781 | io_poll_remove_one(poll_req); |
| 1782 | ret = 0; |
| 1783 | break; |
| 1784 | } |
| 1785 | } |
| 1786 | spin_unlock_irq(&ctx->completion_lock); |
| 1787 | |
Jens Axboe | c71ffb6 | 2019-05-13 20:58:29 -0600 | [diff] [blame] | 1788 | io_cqring_add_event(req->ctx, sqe->user_data, ret); |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1789 | io_put_req(req, NULL); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1790 | return 0; |
| 1791 | } |
| 1792 | |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1793 | static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req, |
| 1794 | __poll_t mask) |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1795 | { |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1796 | req->poll.done = true; |
Jens Axboe | c71ffb6 | 2019-05-13 20:58:29 -0600 | [diff] [blame] | 1797 | io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask)); |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1798 | io_commit_cqring(ctx); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1799 | } |
| 1800 | |
| 1801 | static void io_poll_complete_work(struct work_struct *work) |
| 1802 | { |
| 1803 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
| 1804 | struct io_poll_iocb *poll = &req->poll; |
| 1805 | struct poll_table_struct pt = { ._key = poll->events }; |
| 1806 | struct io_ring_ctx *ctx = req->ctx; |
| 1807 | __poll_t mask = 0; |
| 1808 | |
| 1809 | if (!READ_ONCE(poll->canceled)) |
| 1810 | mask = vfs_poll(poll->file, &pt) & poll->events; |
| 1811 | |
| 1812 | /* |
| 1813 | * Note that ->ki_cancel callers also delete iocb from active_reqs after |
| 1814 | * calling ->ki_cancel. We need the ctx_lock roundtrip here to |
| 1815 | * synchronize with them. In the cancellation case the list_del_init |
| 1816 | * itself is not actually needed, but harmless so we keep it in to |
| 1817 | * avoid further branches in the fast path. |
| 1818 | */ |
| 1819 | spin_lock_irq(&ctx->completion_lock); |
| 1820 | if (!mask && !READ_ONCE(poll->canceled)) { |
| 1821 | add_wait_queue(poll->head, &poll->wait); |
| 1822 | spin_unlock_irq(&ctx->completion_lock); |
| 1823 | return; |
| 1824 | } |
| 1825 | list_del_init(&req->list); |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1826 | io_poll_complete(ctx, req, mask); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1827 | spin_unlock_irq(&ctx->completion_lock); |
| 1828 | |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1829 | io_cqring_ev_posted(ctx); |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1830 | io_put_req(req, NULL); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1831 | } |
| 1832 | |
| 1833 | static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, |
| 1834 | void *key) |
| 1835 | { |
| 1836 | struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb, |
| 1837 | wait); |
| 1838 | struct io_kiocb *req = container_of(poll, struct io_kiocb, poll); |
| 1839 | struct io_ring_ctx *ctx = req->ctx; |
| 1840 | __poll_t mask = key_to_poll(key); |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1841 | unsigned long flags; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1842 | |
| 1843 | /* for instances that support it check for an event match first: */ |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1844 | if (mask && !(mask & poll->events)) |
| 1845 | return 0; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1846 | |
| 1847 | list_del_init(&poll->wait.entry); |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1848 | |
| 1849 | if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) { |
| 1850 | list_del(&req->list); |
| 1851 | io_poll_complete(ctx, req, mask); |
| 1852 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
| 1853 | |
| 1854 | io_cqring_ev_posted(ctx); |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1855 | io_put_req(req, NULL); |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1856 | } else { |
Jens Axboe | 18d9be1 | 2019-09-10 09:13:05 -0600 | [diff] [blame] | 1857 | io_queue_async_work(ctx, req); |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1858 | } |
| 1859 | |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1860 | return 1; |
| 1861 | } |
| 1862 | |
| 1863 | struct io_poll_table { |
| 1864 | struct poll_table_struct pt; |
| 1865 | struct io_kiocb *req; |
| 1866 | int error; |
| 1867 | }; |
| 1868 | |
| 1869 | static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, |
| 1870 | struct poll_table_struct *p) |
| 1871 | { |
| 1872 | struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); |
| 1873 | |
| 1874 | if (unlikely(pt->req->poll.head)) { |
| 1875 | pt->error = -EINVAL; |
| 1876 | return; |
| 1877 | } |
| 1878 | |
| 1879 | pt->error = 0; |
| 1880 | pt->req->poll.head = head; |
| 1881 | add_wait_queue(head, &pt->req->poll.wait); |
| 1882 | } |
| 1883 | |
| 1884 | static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
| 1885 | { |
| 1886 | struct io_poll_iocb *poll = &req->poll; |
| 1887 | struct io_ring_ctx *ctx = req->ctx; |
| 1888 | struct io_poll_table ipt; |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1889 | bool cancel = false; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1890 | __poll_t mask; |
| 1891 | u16 events; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1892 | |
| 1893 | if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) |
| 1894 | return -EINVAL; |
| 1895 | if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index) |
| 1896 | return -EINVAL; |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 1897 | if (!poll->file) |
| 1898 | return -EBADF; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1899 | |
Jens Axboe | 6cc47d1 | 2019-09-18 11:18:23 -0600 | [diff] [blame] | 1900 | req->submit.sqe = NULL; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1901 | INIT_WORK(&req->work, io_poll_complete_work); |
| 1902 | events = READ_ONCE(sqe->poll_events); |
| 1903 | poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; |
| 1904 | |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1905 | poll->head = NULL; |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1906 | poll->done = false; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1907 | poll->canceled = false; |
| 1908 | |
| 1909 | ipt.pt._qproc = io_poll_queue_proc; |
| 1910 | ipt.pt._key = poll->events; |
| 1911 | ipt.req = req; |
| 1912 | ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */ |
| 1913 | |
| 1914 | /* initialized the list so that we can do list_empty checks */ |
| 1915 | INIT_LIST_HEAD(&poll->wait.entry); |
| 1916 | init_waitqueue_func_entry(&poll->wait, io_poll_wake); |
| 1917 | |
Jens Axboe | 3670324 | 2019-07-25 10:20:18 -0600 | [diff] [blame] | 1918 | INIT_LIST_HEAD(&req->list); |
| 1919 | |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1920 | mask = vfs_poll(poll->file, &ipt.pt) & poll->events; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1921 | |
| 1922 | spin_lock_irq(&ctx->completion_lock); |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1923 | if (likely(poll->head)) { |
| 1924 | spin_lock(&poll->head->lock); |
| 1925 | if (unlikely(list_empty(&poll->wait.entry))) { |
| 1926 | if (ipt.error) |
| 1927 | cancel = true; |
| 1928 | ipt.error = 0; |
| 1929 | mask = 0; |
| 1930 | } |
| 1931 | if (mask || ipt.error) |
| 1932 | list_del_init(&poll->wait.entry); |
| 1933 | else if (cancel) |
| 1934 | WRITE_ONCE(poll->canceled, true); |
| 1935 | else if (!poll->done) /* actually waiting for an event */ |
| 1936 | list_add_tail(&req->list, &ctx->cancel_list); |
| 1937 | spin_unlock(&poll->head->lock); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1938 | } |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1939 | if (mask) { /* no async, we'd stolen it */ |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1940 | ipt.error = 0; |
| 1941 | io_poll_complete(ctx, req, mask); |
| 1942 | } |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1943 | spin_unlock_irq(&ctx->completion_lock); |
| 1944 | |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1945 | if (mask) { |
| 1946 | io_cqring_ev_posted(ctx); |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1947 | io_put_req(req, NULL); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1948 | } |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1949 | return ipt.error; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 1950 | } |
| 1951 | |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 1952 | static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) |
| 1953 | { |
| 1954 | struct io_ring_ctx *ctx; |
zhangyi (F) | ef03681 | 2019-10-23 15:10:08 +0800 | [diff] [blame] | 1955 | struct io_kiocb *req, *prev; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 1956 | unsigned long flags; |
| 1957 | |
| 1958 | req = container_of(timer, struct io_kiocb, timeout.timer); |
| 1959 | ctx = req->ctx; |
| 1960 | atomic_inc(&ctx->cq_timeouts); |
| 1961 | |
| 1962 | spin_lock_irqsave(&ctx->completion_lock, flags); |
zhangyi (F) | ef03681 | 2019-10-23 15:10:08 +0800 | [diff] [blame] | 1963 | /* |
| 1964 | * Adjust the reqs sequence before the current one because it |
| 1965 | * will consume a slot in the cq_ring and the the cq_tail pointer |
| 1966 | * will be increased, otherwise other timeout reqs may return in |
| 1967 | * advance without waiting for enough wait_nr. |
| 1968 | */ |
| 1969 | prev = req; |
| 1970 | list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list) |
| 1971 | prev->sequence++; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 1972 | list_del(&req->list); |
| 1973 | |
| 1974 | io_cqring_fill_event(ctx, req->user_data, -ETIME); |
| 1975 | io_commit_cqring(ctx); |
| 1976 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
| 1977 | |
| 1978 | io_cqring_ev_posted(ctx); |
| 1979 | |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1980 | io_put_req(req, NULL); |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 1981 | return HRTIMER_NORESTART; |
| 1982 | } |
| 1983 | |
| 1984 | static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
| 1985 | { |
yangerkun | 5da0fb1 | 2019-10-15 21:59:29 +0800 | [diff] [blame] | 1986 | unsigned count; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 1987 | struct io_ring_ctx *ctx = req->ctx; |
| 1988 | struct list_head *entry; |
Arnd Bergmann | bdf2007 | 2019-10-01 09:53:29 -0600 | [diff] [blame] | 1989 | struct timespec64 ts; |
zhangyi (F) | a1f58ba | 2019-10-23 15:10:09 +0800 | [diff] [blame] | 1990 | unsigned span = 0; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 1991 | |
| 1992 | if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) |
| 1993 | return -EINVAL; |
| 1994 | if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags || |
| 1995 | sqe->len != 1) |
| 1996 | return -EINVAL; |
Arnd Bergmann | bdf2007 | 2019-10-01 09:53:29 -0600 | [diff] [blame] | 1997 | |
| 1998 | if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr))) |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 1999 | return -EFAULT; |
| 2000 | |
| 2001 | /* |
| 2002 | * sqe->off holds how many events that need to occur for this |
| 2003 | * timeout event to be satisfied. |
| 2004 | */ |
| 2005 | count = READ_ONCE(sqe->off); |
| 2006 | if (!count) |
| 2007 | count = 1; |
| 2008 | |
| 2009 | req->sequence = ctx->cached_sq_head + count - 1; |
yangerkun | 5da0fb1 | 2019-10-15 21:59:29 +0800 | [diff] [blame] | 2010 | /* reuse it to store the count */ |
| 2011 | req->submit.sequence = count; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 2012 | req->flags |= REQ_F_TIMEOUT; |
| 2013 | |
| 2014 | /* |
| 2015 | * Insertion sort, ensuring the first entry in the list is always |
| 2016 | * the one we need first. |
| 2017 | */ |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 2018 | spin_lock_irq(&ctx->completion_lock); |
| 2019 | list_for_each_prev(entry, &ctx->timeout_list) { |
| 2020 | struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list); |
yangerkun | 5da0fb1 | 2019-10-15 21:59:29 +0800 | [diff] [blame] | 2021 | unsigned nxt_sq_head; |
| 2022 | long long tmp, tmp_nxt; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 2023 | |
yangerkun | 5da0fb1 | 2019-10-15 21:59:29 +0800 | [diff] [blame] | 2024 | /* |
| 2025 | * Since cached_sq_head + count - 1 can overflow, use type long |
| 2026 | * long to store it. |
| 2027 | */ |
| 2028 | tmp = (long long)ctx->cached_sq_head + count - 1; |
| 2029 | nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1; |
| 2030 | tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1; |
| 2031 | |
| 2032 | /* |
| 2033 | * cached_sq_head may overflow, and it will never overflow twice |
| 2034 | * once there is some timeout req still be valid. |
| 2035 | */ |
| 2036 | if (ctx->cached_sq_head < nxt_sq_head) |
yangerkun | 8b07a65 | 2019-10-17 12:12:35 +0800 | [diff] [blame] | 2037 | tmp += UINT_MAX; |
yangerkun | 5da0fb1 | 2019-10-15 21:59:29 +0800 | [diff] [blame] | 2038 | |
zhangyi (F) | a1f58ba | 2019-10-23 15:10:09 +0800 | [diff] [blame] | 2039 | if (tmp > tmp_nxt) |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 2040 | break; |
zhangyi (F) | a1f58ba | 2019-10-23 15:10:09 +0800 | [diff] [blame] | 2041 | |
| 2042 | /* |
| 2043 | * Sequence of reqs after the insert one and itself should |
| 2044 | * be adjusted because each timeout req consumes a slot. |
| 2045 | */ |
| 2046 | span++; |
| 2047 | nxt->sequence++; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 2048 | } |
zhangyi (F) | a1f58ba | 2019-10-23 15:10:09 +0800 | [diff] [blame] | 2049 | req->sequence -= span; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 2050 | list_add(&req->list, entry); |
| 2051 | spin_unlock_irq(&ctx->completion_lock); |
| 2052 | |
| 2053 | hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 2054 | req->timeout.timer.function = io_timeout_fn; |
Arnd Bergmann | bdf2007 | 2019-10-01 09:53:29 -0600 | [diff] [blame] | 2055 | hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts), |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 2056 | HRTIMER_MODE_REL); |
| 2057 | return 0; |
| 2058 | } |
| 2059 | |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 2060 | static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, |
| 2061 | const struct io_uring_sqe *sqe) |
| 2062 | { |
| 2063 | struct io_uring_sqe *sqe_copy; |
| 2064 | |
| 2065 | if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) |
| 2066 | return 0; |
| 2067 | |
| 2068 | sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL); |
| 2069 | if (!sqe_copy) |
| 2070 | return -EAGAIN; |
| 2071 | |
| 2072 | spin_lock_irq(&ctx->completion_lock); |
| 2073 | if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) { |
| 2074 | spin_unlock_irq(&ctx->completion_lock); |
| 2075 | kfree(sqe_copy); |
| 2076 | return 0; |
| 2077 | } |
| 2078 | |
| 2079 | memcpy(sqe_copy, sqe, sizeof(*sqe_copy)); |
| 2080 | req->submit.sqe = sqe_copy; |
| 2081 | |
| 2082 | INIT_WORK(&req->work, io_sq_wq_submit_work); |
| 2083 | list_add_tail(&req->list, &ctx->defer_list); |
| 2084 | spin_unlock_irq(&ctx->completion_lock); |
| 2085 | return -EIOCBQUEUED; |
| 2086 | } |
| 2087 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2088 | static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2089 | const struct sqe_submit *s, struct io_kiocb **nxt, |
| 2090 | bool force_nonblock) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2091 | { |
Jens Axboe | e0c5c57 | 2019-03-12 10:18:47 -0600 | [diff] [blame] | 2092 | int ret, opcode; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2093 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2094 | req->user_data = READ_ONCE(s->sqe->user_data); |
| 2095 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2096 | if (unlikely(s->index >= ctx->sq_entries)) |
| 2097 | return -EINVAL; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2098 | |
| 2099 | opcode = READ_ONCE(s->sqe->opcode); |
| 2100 | switch (opcode) { |
| 2101 | case IORING_OP_NOP: |
| 2102 | ret = io_nop(req, req->user_data); |
| 2103 | break; |
| 2104 | case IORING_OP_READV: |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2105 | if (unlikely(s->sqe->buf_index)) |
| 2106 | return -EINVAL; |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2107 | ret = io_read(req, s, nxt, force_nonblock); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2108 | break; |
| 2109 | case IORING_OP_WRITEV: |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2110 | if (unlikely(s->sqe->buf_index)) |
| 2111 | return -EINVAL; |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2112 | ret = io_write(req, s, nxt, force_nonblock); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2113 | break; |
| 2114 | case IORING_OP_READ_FIXED: |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2115 | ret = io_read(req, s, nxt, force_nonblock); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2116 | break; |
| 2117 | case IORING_OP_WRITE_FIXED: |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2118 | ret = io_write(req, s, nxt, force_nonblock); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2119 | break; |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 2120 | case IORING_OP_FSYNC: |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2121 | ret = io_fsync(req, s->sqe, nxt, force_nonblock); |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 2122 | break; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 2123 | case IORING_OP_POLL_ADD: |
| 2124 | ret = io_poll_add(req, s->sqe); |
| 2125 | break; |
| 2126 | case IORING_OP_POLL_REMOVE: |
| 2127 | ret = io_poll_remove(req, s->sqe); |
| 2128 | break; |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 2129 | case IORING_OP_SYNC_FILE_RANGE: |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2130 | ret = io_sync_file_range(req, s->sqe, nxt, force_nonblock); |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 2131 | break; |
Jens Axboe | 0fa03c6 | 2019-04-19 13:34:07 -0600 | [diff] [blame] | 2132 | case IORING_OP_SENDMSG: |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2133 | ret = io_sendmsg(req, s->sqe, nxt, force_nonblock); |
Jens Axboe | 0fa03c6 | 2019-04-19 13:34:07 -0600 | [diff] [blame] | 2134 | break; |
Jens Axboe | aa1fa28 | 2019-04-19 13:38:09 -0600 | [diff] [blame] | 2135 | case IORING_OP_RECVMSG: |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2136 | ret = io_recvmsg(req, s->sqe, nxt, force_nonblock); |
Jens Axboe | aa1fa28 | 2019-04-19 13:38:09 -0600 | [diff] [blame] | 2137 | break; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 2138 | case IORING_OP_TIMEOUT: |
| 2139 | ret = io_timeout(req, s->sqe); |
| 2140 | break; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2141 | default: |
| 2142 | ret = -EINVAL; |
| 2143 | break; |
| 2144 | } |
| 2145 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 2146 | if (ret) |
| 2147 | return ret; |
| 2148 | |
| 2149 | if (ctx->flags & IORING_SETUP_IOPOLL) { |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2150 | if (req->result == -EAGAIN) |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 2151 | return -EAGAIN; |
| 2152 | |
| 2153 | /* workqueue context doesn't hold uring_lock, grab it now */ |
| 2154 | if (s->needs_lock) |
| 2155 | mutex_lock(&ctx->uring_lock); |
| 2156 | io_iopoll_req_issued(req); |
| 2157 | if (s->needs_lock) |
| 2158 | mutex_unlock(&ctx->uring_lock); |
| 2159 | } |
| 2160 | |
| 2161 | return 0; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2162 | } |
| 2163 | |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2164 | static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx, |
| 2165 | const struct io_uring_sqe *sqe) |
| 2166 | { |
| 2167 | switch (sqe->opcode) { |
| 2168 | case IORING_OP_READV: |
| 2169 | case IORING_OP_READ_FIXED: |
| 2170 | return &ctx->pending_async[READ]; |
| 2171 | case IORING_OP_WRITEV: |
| 2172 | case IORING_OP_WRITE_FIXED: |
| 2173 | return &ctx->pending_async[WRITE]; |
| 2174 | default: |
| 2175 | return NULL; |
| 2176 | } |
| 2177 | } |
| 2178 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2179 | static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe) |
| 2180 | { |
| 2181 | u8 opcode = READ_ONCE(sqe->opcode); |
| 2182 | |
| 2183 | return !(opcode == IORING_OP_READ_FIXED || |
| 2184 | opcode == IORING_OP_WRITE_FIXED); |
| 2185 | } |
| 2186 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2187 | static void io_sq_wq_submit_work(struct work_struct *work) |
| 2188 | { |
| 2189 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2190 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2191 | struct mm_struct *cur_mm = NULL; |
| 2192 | struct async_list *async_list; |
| 2193 | LIST_HEAD(req_list); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2194 | mm_segment_t old_fs; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2195 | int ret; |
| 2196 | |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2197 | async_list = io_async_list_from_sqe(ctx, req->submit.sqe); |
| 2198 | restart: |
| 2199 | do { |
| 2200 | struct sqe_submit *s = &req->submit; |
| 2201 | const struct io_uring_sqe *sqe = s->sqe; |
Jackie Liu | d0ee879 | 2019-07-31 14:39:33 +0800 | [diff] [blame] | 2202 | unsigned int flags = req->flags; |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2203 | struct io_kiocb *nxt = NULL; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2204 | |
Stefan Bühler | 8449eed | 2019-04-27 20:34:19 +0200 | [diff] [blame] | 2205 | /* Ensure we clear previously set non-block flag */ |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2206 | req->rw.ki_flags &= ~IOCB_NOWAIT; |
| 2207 | |
| 2208 | ret = 0; |
| 2209 | if (io_sqe_needs_user(sqe) && !cur_mm) { |
| 2210 | if (!mmget_not_zero(ctx->sqo_mm)) { |
| 2211 | ret = -EFAULT; |
| 2212 | } else { |
| 2213 | cur_mm = ctx->sqo_mm; |
| 2214 | use_mm(cur_mm); |
| 2215 | old_fs = get_fs(); |
| 2216 | set_fs(USER_DS); |
| 2217 | } |
| 2218 | } |
| 2219 | |
| 2220 | if (!ret) { |
| 2221 | s->has_user = cur_mm != NULL; |
| 2222 | s->needs_lock = true; |
| 2223 | do { |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2224 | ret = __io_submit_sqe(ctx, req, s, &nxt, false); |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2225 | /* |
| 2226 | * We can get EAGAIN for polled IO even though |
| 2227 | * we're forcing a sync submission from here, |
| 2228 | * since we can't wait for request slots on the |
| 2229 | * block side. |
| 2230 | */ |
| 2231 | if (ret != -EAGAIN) |
| 2232 | break; |
| 2233 | cond_resched(); |
| 2234 | } while (1); |
| 2235 | } |
Jens Axboe | 817869d | 2019-04-30 14:44:05 -0600 | [diff] [blame] | 2236 | |
| 2237 | /* drop submission reference */ |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2238 | io_put_req(req, NULL); |
Jens Axboe | 817869d | 2019-04-30 14:44:05 -0600 | [diff] [blame] | 2239 | |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2240 | if (ret) { |
Jens Axboe | c71ffb6 | 2019-05-13 20:58:29 -0600 | [diff] [blame] | 2241 | io_cqring_add_event(ctx, sqe->user_data, ret); |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2242 | io_put_req(req, NULL); |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2243 | } |
| 2244 | |
| 2245 | /* async context always use a copy of the sqe */ |
| 2246 | kfree(sqe); |
| 2247 | |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2248 | /* if a dependent link is ready, do that as the next one */ |
| 2249 | if (!ret && nxt) { |
| 2250 | req = nxt; |
| 2251 | continue; |
| 2252 | } |
| 2253 | |
Zhengyuan Liu | f7b76ac | 2019-07-16 23:26:14 +0800 | [diff] [blame] | 2254 | /* req from defer and link list needn't decrease async cnt */ |
Jackie Liu | d0ee879 | 2019-07-31 14:39:33 +0800 | [diff] [blame] | 2255 | if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE)) |
Zhengyuan Liu | f7b76ac | 2019-07-16 23:26:14 +0800 | [diff] [blame] | 2256 | goto out; |
| 2257 | |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2258 | if (!async_list) |
| 2259 | break; |
| 2260 | if (!list_empty(&req_list)) { |
| 2261 | req = list_first_entry(&req_list, struct io_kiocb, |
| 2262 | list); |
| 2263 | list_del(&req->list); |
| 2264 | continue; |
| 2265 | } |
| 2266 | if (list_empty(&async_list->list)) |
| 2267 | break; |
| 2268 | |
| 2269 | req = NULL; |
| 2270 | spin_lock(&async_list->lock); |
| 2271 | if (list_empty(&async_list->list)) { |
| 2272 | spin_unlock(&async_list->lock); |
| 2273 | break; |
| 2274 | } |
| 2275 | list_splice_init(&async_list->list, &req_list); |
| 2276 | spin_unlock(&async_list->lock); |
| 2277 | |
| 2278 | req = list_first_entry(&req_list, struct io_kiocb, list); |
| 2279 | list_del(&req->list); |
| 2280 | } while (req); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2281 | |
| 2282 | /* |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2283 | * Rare case of racing with a submitter. If we find the count has |
| 2284 | * dropped to zero AND we have pending work items, then restart |
| 2285 | * the processing. This is a tiny race window. |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2286 | */ |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2287 | if (async_list) { |
| 2288 | ret = atomic_dec_return(&async_list->cnt); |
| 2289 | while (!ret && !list_empty(&async_list->list)) { |
| 2290 | spin_lock(&async_list->lock); |
| 2291 | atomic_inc(&async_list->cnt); |
| 2292 | list_splice_init(&async_list->list, &req_list); |
| 2293 | spin_unlock(&async_list->lock); |
| 2294 | |
| 2295 | if (!list_empty(&req_list)) { |
| 2296 | req = list_first_entry(&req_list, |
| 2297 | struct io_kiocb, list); |
| 2298 | list_del(&req->list); |
| 2299 | goto restart; |
| 2300 | } |
| 2301 | ret = atomic_dec_return(&async_list->cnt); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2302 | } |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2303 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2304 | |
Zhengyuan Liu | f7b76ac | 2019-07-16 23:26:14 +0800 | [diff] [blame] | 2305 | out: |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2306 | if (cur_mm) { |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2307 | set_fs(old_fs); |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2308 | unuse_mm(cur_mm); |
| 2309 | mmput(cur_mm); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2310 | } |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2311 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2312 | |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2313 | /* |
| 2314 | * See if we can piggy back onto previously submitted work, that is still |
| 2315 | * running. We currently only allow this if the new request is sequential |
| 2316 | * to the previous one we punted. |
| 2317 | */ |
| 2318 | static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req) |
| 2319 | { |
Jens Axboe | 6d5d5ac | 2019-09-11 10:16:13 -0600 | [diff] [blame] | 2320 | bool ret; |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2321 | |
| 2322 | if (!list) |
| 2323 | return false; |
| 2324 | if (!(req->flags & REQ_F_SEQ_PREV)) |
| 2325 | return false; |
| 2326 | if (!atomic_read(&list->cnt)) |
| 2327 | return false; |
| 2328 | |
| 2329 | ret = true; |
| 2330 | spin_lock(&list->lock); |
| 2331 | list_add_tail(&req->list, &list->list); |
Zhengyuan Liu | c0e48f9 | 2019-07-18 20:44:00 +0800 | [diff] [blame] | 2332 | /* |
| 2333 | * Ensure we see a simultaneous modification from io_sq_wq_submit_work() |
| 2334 | */ |
| 2335 | smp_mb(); |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2336 | if (!atomic_read(&list->cnt)) { |
| 2337 | list_del_init(&req->list); |
| 2338 | ret = false; |
| 2339 | } |
| 2340 | spin_unlock(&list->lock); |
| 2341 | return ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2342 | } |
| 2343 | |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 2344 | static bool io_op_needs_file(const struct io_uring_sqe *sqe) |
| 2345 | { |
| 2346 | int op = READ_ONCE(sqe->opcode); |
| 2347 | |
| 2348 | switch (op) { |
| 2349 | case IORING_OP_NOP: |
| 2350 | case IORING_OP_POLL_REMOVE: |
| 2351 | return false; |
| 2352 | default: |
| 2353 | return true; |
| 2354 | } |
| 2355 | } |
| 2356 | |
| 2357 | static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s, |
| 2358 | struct io_submit_state *state, struct io_kiocb *req) |
| 2359 | { |
| 2360 | unsigned flags; |
| 2361 | int fd; |
| 2362 | |
| 2363 | flags = READ_ONCE(s->sqe->flags); |
| 2364 | fd = READ_ONCE(s->sqe->fd); |
| 2365 | |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2366 | if (flags & IOSQE_IO_DRAIN) |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 2367 | req->flags |= REQ_F_IO_DRAIN; |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2368 | /* |
| 2369 | * All io need record the previous position, if LINK vs DARIN, |
| 2370 | * it can be used to mark the position of the first IO in the |
| 2371 | * link list. |
| 2372 | */ |
| 2373 | req->sequence = s->sequence; |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 2374 | |
Jens Axboe | 60c112b | 2019-06-21 10:20:18 -0600 | [diff] [blame] | 2375 | if (!io_op_needs_file(s->sqe)) |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 2376 | return 0; |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 2377 | |
| 2378 | if (flags & IOSQE_FIXED_FILE) { |
| 2379 | if (unlikely(!ctx->user_files || |
| 2380 | (unsigned) fd >= ctx->nr_user_files)) |
| 2381 | return -EBADF; |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame^] | 2382 | if (!ctx->user_files[fd]) |
| 2383 | return -EBADF; |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 2384 | req->file = ctx->user_files[fd]; |
| 2385 | req->flags |= REQ_F_FIXED_FILE; |
| 2386 | } else { |
| 2387 | if (s->needs_fixed_file) |
| 2388 | return -EBADF; |
| 2389 | req->file = io_file_get(state, fd); |
| 2390 | if (unlikely(!req->file)) |
| 2391 | return -EBADF; |
| 2392 | } |
| 2393 | |
| 2394 | return 0; |
| 2395 | } |
| 2396 | |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2397 | static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, |
Jens Axboe | bc808bc | 2019-10-22 13:14:37 -0600 | [diff] [blame] | 2398 | struct sqe_submit *s) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2399 | { |
Jens Axboe | e0c5c57 | 2019-03-12 10:18:47 -0600 | [diff] [blame] | 2400 | int ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2401 | |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2402 | ret = __io_submit_sqe(ctx, req, s, NULL, true); |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 2403 | |
| 2404 | /* |
| 2405 | * We async punt it if the file wasn't marked NOWAIT, or if the file |
| 2406 | * doesn't support non-blocking read/write attempts |
| 2407 | */ |
| 2408 | if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) || |
| 2409 | (req->flags & REQ_F_MUST_PUNT))) { |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2410 | struct io_uring_sqe *sqe_copy; |
| 2411 | |
Jackie Liu | 954dab1 | 2019-09-18 10:37:52 +0800 | [diff] [blame] | 2412 | sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2413 | if (sqe_copy) { |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2414 | struct async_list *list; |
| 2415 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2416 | s->sqe = sqe_copy; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2417 | memcpy(&req->submit, s, sizeof(*s)); |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2418 | list = io_async_list_from_sqe(ctx, s->sqe); |
| 2419 | if (!io_add_to_prev_work(list, req)) { |
| 2420 | if (list) |
| 2421 | atomic_inc(&list->cnt); |
| 2422 | INIT_WORK(&req->work, io_sq_wq_submit_work); |
Jens Axboe | 18d9be1 | 2019-09-10 09:13:05 -0600 | [diff] [blame] | 2423 | io_queue_async_work(ctx, req); |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2424 | } |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 2425 | |
| 2426 | /* |
| 2427 | * Queued up for async execution, worker will release |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2428 | * submit reference when the iocb is actually submitted. |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 2429 | */ |
| 2430 | return 0; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2431 | } |
| 2432 | } |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 2433 | |
| 2434 | /* drop submission reference */ |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2435 | io_put_req(req, NULL); |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 2436 | |
| 2437 | /* and drop final reference, if we failed */ |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2438 | if (ret) { |
| 2439 | io_cqring_add_event(ctx, req->user_data, ret); |
| 2440 | if (req->flags & REQ_F_LINK) |
| 2441 | req->flags |= REQ_F_FAIL_LINK; |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2442 | io_put_req(req, NULL); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2443 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2444 | |
| 2445 | return ret; |
| 2446 | } |
| 2447 | |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2448 | static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, |
Jens Axboe | bc808bc | 2019-10-22 13:14:37 -0600 | [diff] [blame] | 2449 | struct sqe_submit *s) |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2450 | { |
| 2451 | int ret; |
| 2452 | |
| 2453 | ret = io_req_defer(ctx, req, s->sqe); |
| 2454 | if (ret) { |
| 2455 | if (ret != -EIOCBQUEUED) { |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2456 | io_free_req(req, NULL); |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2457 | io_cqring_add_event(ctx, s->sqe->user_data, ret); |
| 2458 | } |
| 2459 | return 0; |
| 2460 | } |
| 2461 | |
Jens Axboe | bc808bc | 2019-10-22 13:14:37 -0600 | [diff] [blame] | 2462 | return __io_queue_sqe(ctx, req, s); |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2463 | } |
| 2464 | |
| 2465 | static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req, |
Jens Axboe | bc808bc | 2019-10-22 13:14:37 -0600 | [diff] [blame] | 2466 | struct sqe_submit *s, struct io_kiocb *shadow) |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2467 | { |
| 2468 | int ret; |
| 2469 | int need_submit = false; |
| 2470 | |
| 2471 | if (!shadow) |
Jens Axboe | bc808bc | 2019-10-22 13:14:37 -0600 | [diff] [blame] | 2472 | return io_queue_sqe(ctx, req, s); |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2473 | |
| 2474 | /* |
| 2475 | * Mark the first IO in link list as DRAIN, let all the following |
| 2476 | * IOs enter the defer list. all IO needs to be completed before link |
| 2477 | * list. |
| 2478 | */ |
| 2479 | req->flags |= REQ_F_IO_DRAIN; |
| 2480 | ret = io_req_defer(ctx, req, s->sqe); |
| 2481 | if (ret) { |
| 2482 | if (ret != -EIOCBQUEUED) { |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2483 | io_free_req(req, NULL); |
Pavel Begunkov | 7b20238 | 2019-10-27 22:10:36 +0300 | [diff] [blame] | 2484 | __io_free_req(shadow); |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2485 | io_cqring_add_event(ctx, s->sqe->user_data, ret); |
| 2486 | return 0; |
| 2487 | } |
| 2488 | } else { |
| 2489 | /* |
| 2490 | * If ret == 0 means that all IOs in front of link io are |
| 2491 | * running done. let's queue link head. |
| 2492 | */ |
| 2493 | need_submit = true; |
| 2494 | } |
| 2495 | |
| 2496 | /* Insert shadow req to defer_list, blocking next IOs */ |
| 2497 | spin_lock_irq(&ctx->completion_lock); |
| 2498 | list_add_tail(&shadow->list, &ctx->defer_list); |
| 2499 | spin_unlock_irq(&ctx->completion_lock); |
| 2500 | |
| 2501 | if (need_submit) |
Jens Axboe | bc808bc | 2019-10-22 13:14:37 -0600 | [diff] [blame] | 2502 | return __io_queue_sqe(ctx, req, s); |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2503 | |
| 2504 | return 0; |
| 2505 | } |
| 2506 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2507 | #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK) |
| 2508 | |
| 2509 | static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s, |
Jens Axboe | bc808bc | 2019-10-22 13:14:37 -0600 | [diff] [blame] | 2510 | struct io_submit_state *state, struct io_kiocb **link) |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2511 | { |
| 2512 | struct io_uring_sqe *sqe_copy; |
| 2513 | struct io_kiocb *req; |
| 2514 | int ret; |
| 2515 | |
| 2516 | /* enforce forwards compatibility on users */ |
| 2517 | if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) { |
| 2518 | ret = -EINVAL; |
| 2519 | goto err; |
| 2520 | } |
| 2521 | |
| 2522 | req = io_get_req(ctx, state); |
| 2523 | if (unlikely(!req)) { |
| 2524 | ret = -EAGAIN; |
| 2525 | goto err; |
| 2526 | } |
| 2527 | |
| 2528 | ret = io_req_set_file(ctx, s, state, req); |
| 2529 | if (unlikely(ret)) { |
| 2530 | err_req: |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2531 | io_free_req(req, NULL); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2532 | err: |
| 2533 | io_cqring_add_event(ctx, s->sqe->user_data, ret); |
| 2534 | return; |
| 2535 | } |
| 2536 | |
Pavel Begunkov | 84d55dc | 2019-10-25 12:31:29 +0300 | [diff] [blame] | 2537 | req->user_data = s->sqe->user_data; |
| 2538 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2539 | /* |
| 2540 | * If we already have a head request, queue this one for async |
| 2541 | * submittal once the head completes. If we don't have a head but |
| 2542 | * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be |
| 2543 | * submitted sync once the chain is complete. If none of those |
| 2544 | * conditions are true (normal request), then just queue it. |
| 2545 | */ |
| 2546 | if (*link) { |
| 2547 | struct io_kiocb *prev = *link; |
| 2548 | |
| 2549 | sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL); |
| 2550 | if (!sqe_copy) { |
| 2551 | ret = -EAGAIN; |
| 2552 | goto err_req; |
| 2553 | } |
| 2554 | |
| 2555 | s->sqe = sqe_copy; |
| 2556 | memcpy(&req->submit, s, sizeof(*s)); |
| 2557 | list_add_tail(&req->list, &prev->link_list); |
| 2558 | } else if (s->sqe->flags & IOSQE_IO_LINK) { |
| 2559 | req->flags |= REQ_F_LINK; |
| 2560 | |
| 2561 | memcpy(&req->submit, s, sizeof(*s)); |
| 2562 | INIT_LIST_HEAD(&req->link_list); |
| 2563 | *link = req; |
| 2564 | } else { |
Jens Axboe | bc808bc | 2019-10-22 13:14:37 -0600 | [diff] [blame] | 2565 | io_queue_sqe(ctx, req, s); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2566 | } |
| 2567 | } |
| 2568 | |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 2569 | /* |
| 2570 | * Batched submission is done, ensure local IO is flushed out. |
| 2571 | */ |
| 2572 | static void io_submit_state_end(struct io_submit_state *state) |
| 2573 | { |
| 2574 | blk_finish_plug(&state->plug); |
Jens Axboe | 3d6770f | 2019-04-13 11:50:54 -0600 | [diff] [blame] | 2575 | io_file_put(state); |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 2576 | if (state->free_reqs) |
| 2577 | kmem_cache_free_bulk(req_cachep, state->free_reqs, |
| 2578 | &state->reqs[state->cur_req]); |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 2579 | } |
| 2580 | |
| 2581 | /* |
| 2582 | * Start submission side cache. |
| 2583 | */ |
| 2584 | static void io_submit_state_start(struct io_submit_state *state, |
| 2585 | struct io_ring_ctx *ctx, unsigned max_ios) |
| 2586 | { |
| 2587 | blk_start_plug(&state->plug); |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 2588 | state->free_reqs = 0; |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 2589 | state->file = NULL; |
| 2590 | state->ios_left = max_ios; |
| 2591 | } |
| 2592 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2593 | static void io_commit_sqring(struct io_ring_ctx *ctx) |
| 2594 | { |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2595 | struct io_rings *rings = ctx->rings; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2596 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2597 | if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) { |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2598 | /* |
| 2599 | * Ensure any loads from the SQEs are done at this point, |
| 2600 | * since once we write the new head, the application could |
| 2601 | * write new data to them. |
| 2602 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2603 | smp_store_release(&rings->sq.head, ctx->cached_sq_head); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2604 | } |
| 2605 | } |
| 2606 | |
| 2607 | /* |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2608 | * Fetch an sqe, if one is available. Note that s->sqe will point to memory |
| 2609 | * that is mapped by userspace. This means that care needs to be taken to |
| 2610 | * ensure that reads are stable, as we cannot rely on userspace always |
| 2611 | * being a good citizen. If members of the sqe are validated and then later |
| 2612 | * used, it's important that those reads are done through READ_ONCE() to |
| 2613 | * prevent a re-load down the line. |
| 2614 | */ |
| 2615 | static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s) |
| 2616 | { |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2617 | struct io_rings *rings = ctx->rings; |
| 2618 | u32 *sq_array = ctx->sq_array; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2619 | unsigned head; |
| 2620 | |
| 2621 | /* |
| 2622 | * The cached sq head (or cq tail) serves two purposes: |
| 2623 | * |
| 2624 | * 1) allows us to batch the cost of updating the user visible |
| 2625 | * head updates. |
| 2626 | * 2) allows the kernel side to track the head on its own, even |
| 2627 | * though the application is the one updating it. |
| 2628 | */ |
| 2629 | head = ctx->cached_sq_head; |
Stefan Bühler | e523a29 | 2019-04-19 11:57:44 +0200 | [diff] [blame] | 2630 | /* make sure SQ entry isn't read before tail */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2631 | if (head == smp_load_acquire(&rings->sq.tail)) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2632 | return false; |
| 2633 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2634 | head = READ_ONCE(sq_array[head & ctx->sq_mask]); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2635 | if (head < ctx->sq_entries) { |
| 2636 | s->index = head; |
| 2637 | s->sqe = &ctx->sq_sqes[head]; |
Jackie Liu | 8776f3f | 2019-09-09 20:50:39 +0800 | [diff] [blame] | 2638 | s->sequence = ctx->cached_sq_head; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2639 | ctx->cached_sq_head++; |
| 2640 | return true; |
| 2641 | } |
| 2642 | |
| 2643 | /* drop invalid entries */ |
| 2644 | ctx->cached_sq_head++; |
Jens Axboe | 498ccd9 | 2019-10-25 10:04:25 -0600 | [diff] [blame] | 2645 | ctx->cached_sq_dropped++; |
| 2646 | WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2647 | return false; |
| 2648 | } |
| 2649 | |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 2650 | static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, |
| 2651 | bool has_user, bool mm_fault) |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2652 | { |
| 2653 | struct io_submit_state state, *statep = NULL; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2654 | struct io_kiocb *link = NULL; |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2655 | struct io_kiocb *shadow_req = NULL; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2656 | bool prev_was_link = false; |
| 2657 | int i, submitted = 0; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2658 | |
| 2659 | if (nr > IO_PLUG_THRESHOLD) { |
| 2660 | io_submit_state_start(&state, ctx, nr); |
| 2661 | statep = &state; |
| 2662 | } |
| 2663 | |
| 2664 | for (i = 0; i < nr; i++) { |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 2665 | struct sqe_submit s; |
| 2666 | |
| 2667 | if (!io_get_sqring(ctx, &s)) |
| 2668 | break; |
| 2669 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2670 | /* |
| 2671 | * If previous wasn't linked and we have a linked command, |
| 2672 | * that's the end of the chain. Submit the previous link. |
| 2673 | */ |
| 2674 | if (!prev_was_link && link) { |
Jens Axboe | bc808bc | 2019-10-22 13:14:37 -0600 | [diff] [blame] | 2675 | io_queue_link_head(ctx, link, &link->submit, shadow_req); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2676 | link = NULL; |
Jackie Liu | 5f5ad9c | 2019-09-18 10:37:53 +0800 | [diff] [blame] | 2677 | shadow_req = NULL; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2678 | } |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 2679 | prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2680 | |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 2681 | if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) { |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2682 | if (!shadow_req) { |
| 2683 | shadow_req = io_get_req(ctx, NULL); |
Jackie Liu | a1041c2 | 2019-09-18 17:25:52 +0800 | [diff] [blame] | 2684 | if (unlikely(!shadow_req)) |
| 2685 | goto out; |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2686 | shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN); |
| 2687 | refcount_dec(&shadow_req->refs); |
| 2688 | } |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 2689 | shadow_req->sequence = s.sequence; |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2690 | } |
| 2691 | |
Jackie Liu | a1041c2 | 2019-09-18 17:25:52 +0800 | [diff] [blame] | 2692 | out: |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2693 | if (unlikely(mm_fault)) { |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 2694 | io_cqring_add_event(ctx, s.sqe->user_data, |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2695 | -EFAULT); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2696 | } else { |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 2697 | s.has_user = has_user; |
| 2698 | s.needs_lock = true; |
| 2699 | s.needs_fixed_file = true; |
| 2700 | io_submit_sqe(ctx, &s, statep, &link); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2701 | submitted++; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2702 | } |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2703 | } |
| 2704 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2705 | if (link) |
Jens Axboe | bc808bc | 2019-10-22 13:14:37 -0600 | [diff] [blame] | 2706 | io_queue_link_head(ctx, link, &link->submit, shadow_req); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2707 | if (statep) |
| 2708 | io_submit_state_end(&state); |
| 2709 | |
| 2710 | return submitted; |
| 2711 | } |
| 2712 | |
| 2713 | static int io_sq_thread(void *data) |
| 2714 | { |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2715 | struct io_ring_ctx *ctx = data; |
| 2716 | struct mm_struct *cur_mm = NULL; |
| 2717 | mm_segment_t old_fs; |
| 2718 | DEFINE_WAIT(wait); |
| 2719 | unsigned inflight; |
| 2720 | unsigned long timeout; |
| 2721 | |
Jackie Liu | a4c0b3d | 2019-07-08 13:41:12 +0800 | [diff] [blame] | 2722 | complete(&ctx->sqo_thread_started); |
| 2723 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2724 | old_fs = get_fs(); |
| 2725 | set_fs(USER_DS); |
| 2726 | |
| 2727 | timeout = inflight = 0; |
Roman Penyaev | 2bbcd6d | 2019-05-16 10:53:57 +0200 | [diff] [blame] | 2728 | while (!kthread_should_park()) { |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 2729 | bool mm_fault = false; |
| 2730 | unsigned int to_submit; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2731 | |
| 2732 | if (inflight) { |
| 2733 | unsigned nr_events = 0; |
| 2734 | |
| 2735 | if (ctx->flags & IORING_SETUP_IOPOLL) { |
Jens Axboe | 2b2ed97 | 2019-10-25 10:06:15 -0600 | [diff] [blame] | 2736 | /* |
| 2737 | * inflight is the count of the maximum possible |
| 2738 | * entries we submitted, but it can be smaller |
| 2739 | * if we dropped some of them. If we don't have |
| 2740 | * poll entries available, then we know that we |
| 2741 | * have nothing left to poll for. Reset the |
| 2742 | * inflight count to zero in that case. |
| 2743 | */ |
| 2744 | mutex_lock(&ctx->uring_lock); |
| 2745 | if (!list_empty(&ctx->poll_list)) |
| 2746 | __io_iopoll_check(ctx, &nr_events, 0); |
| 2747 | else |
| 2748 | inflight = 0; |
| 2749 | mutex_unlock(&ctx->uring_lock); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2750 | } else { |
| 2751 | /* |
| 2752 | * Normal IO, just pretend everything completed. |
| 2753 | * We don't have to poll completions for that. |
| 2754 | */ |
| 2755 | nr_events = inflight; |
| 2756 | } |
| 2757 | |
| 2758 | inflight -= nr_events; |
| 2759 | if (!inflight) |
| 2760 | timeout = jiffies + ctx->sq_thread_idle; |
| 2761 | } |
| 2762 | |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 2763 | to_submit = io_sqring_entries(ctx); |
| 2764 | if (!to_submit) { |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2765 | /* |
| 2766 | * We're polling. If we're within the defined idle |
| 2767 | * period, then let us spin without work before going |
| 2768 | * to sleep. |
| 2769 | */ |
| 2770 | if (inflight || !time_after(jiffies, timeout)) { |
Jens Axboe | 9831a90 | 2019-09-19 09:48:55 -0600 | [diff] [blame] | 2771 | cond_resched(); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2772 | continue; |
| 2773 | } |
| 2774 | |
| 2775 | /* |
| 2776 | * Drop cur_mm before scheduling, we can't hold it for |
| 2777 | * long periods (or over schedule()). Do this before |
| 2778 | * adding ourselves to the waitqueue, as the unuse/drop |
| 2779 | * may sleep. |
| 2780 | */ |
| 2781 | if (cur_mm) { |
| 2782 | unuse_mm(cur_mm); |
| 2783 | mmput(cur_mm); |
| 2784 | cur_mm = NULL; |
| 2785 | } |
| 2786 | |
| 2787 | prepare_to_wait(&ctx->sqo_wait, &wait, |
| 2788 | TASK_INTERRUPTIBLE); |
| 2789 | |
| 2790 | /* Tell userspace we may need a wakeup call */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2791 | ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP; |
Stefan Bühler | 0d7bae6 | 2019-04-19 11:57:45 +0200 | [diff] [blame] | 2792 | /* make sure to read SQ tail after writing flags */ |
| 2793 | smp_mb(); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2794 | |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 2795 | to_submit = io_sqring_entries(ctx); |
| 2796 | if (!to_submit) { |
Roman Penyaev | 2bbcd6d | 2019-05-16 10:53:57 +0200 | [diff] [blame] | 2797 | if (kthread_should_park()) { |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2798 | finish_wait(&ctx->sqo_wait, &wait); |
| 2799 | break; |
| 2800 | } |
| 2801 | if (signal_pending(current)) |
| 2802 | flush_signals(current); |
| 2803 | schedule(); |
| 2804 | finish_wait(&ctx->sqo_wait, &wait); |
| 2805 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2806 | ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2807 | continue; |
| 2808 | } |
| 2809 | finish_wait(&ctx->sqo_wait, &wait); |
| 2810 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2811 | ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2812 | } |
| 2813 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2814 | /* Unless all new commands are FIXED regions, grab mm */ |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 2815 | if (!cur_mm) { |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2816 | mm_fault = !mmget_not_zero(ctx->sqo_mm); |
| 2817 | if (!mm_fault) { |
| 2818 | use_mm(ctx->sqo_mm); |
| 2819 | cur_mm = ctx->sqo_mm; |
| 2820 | } |
| 2821 | } |
| 2822 | |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 2823 | to_submit = min(to_submit, ctx->sq_entries); |
| 2824 | inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL, |
| 2825 | mm_fault); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2826 | |
| 2827 | /* Commit SQ ring head once we've consumed all SQEs */ |
| 2828 | io_commit_sqring(ctx); |
| 2829 | } |
| 2830 | |
| 2831 | set_fs(old_fs); |
| 2832 | if (cur_mm) { |
| 2833 | unuse_mm(cur_mm); |
| 2834 | mmput(cur_mm); |
| 2835 | } |
Jens Axboe | 0605863 | 2019-04-13 09:26:03 -0600 | [diff] [blame] | 2836 | |
Roman Penyaev | 2bbcd6d | 2019-05-16 10:53:57 +0200 | [diff] [blame] | 2837 | kthread_parkme(); |
Jens Axboe | 0605863 | 2019-04-13 09:26:03 -0600 | [diff] [blame] | 2838 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2839 | return 0; |
| 2840 | } |
| 2841 | |
Jens Axboe | bc808bc | 2019-10-22 13:14:37 -0600 | [diff] [blame] | 2842 | static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2843 | { |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 2844 | struct io_submit_state state, *statep = NULL; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2845 | struct io_kiocb *link = NULL; |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2846 | struct io_kiocb *shadow_req = NULL; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2847 | bool prev_was_link = false; |
Jens Axboe | 5c8b0b5 | 2019-04-30 10:16:07 -0600 | [diff] [blame] | 2848 | int i, submit = 0; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2849 | |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 2850 | if (to_submit > IO_PLUG_THRESHOLD) { |
| 2851 | io_submit_state_start(&state, ctx, to_submit); |
| 2852 | statep = &state; |
| 2853 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2854 | |
| 2855 | for (i = 0; i < to_submit; i++) { |
| 2856 | struct sqe_submit s; |
| 2857 | |
| 2858 | if (!io_get_sqring(ctx, &s)) |
| 2859 | break; |
| 2860 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2861 | /* |
| 2862 | * If previous wasn't linked and we have a linked command, |
| 2863 | * that's the end of the chain. Submit the previous link. |
| 2864 | */ |
| 2865 | if (!prev_was_link && link) { |
Jens Axboe | bc808bc | 2019-10-22 13:14:37 -0600 | [diff] [blame] | 2866 | io_queue_link_head(ctx, link, &link->submit, shadow_req); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2867 | link = NULL; |
Jackie Liu | 5f5ad9c | 2019-09-18 10:37:53 +0800 | [diff] [blame] | 2868 | shadow_req = NULL; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2869 | } |
| 2870 | prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0; |
| 2871 | |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2872 | if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) { |
| 2873 | if (!shadow_req) { |
| 2874 | shadow_req = io_get_req(ctx, NULL); |
Jackie Liu | a1041c2 | 2019-09-18 17:25:52 +0800 | [diff] [blame] | 2875 | if (unlikely(!shadow_req)) |
| 2876 | goto out; |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2877 | shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN); |
| 2878 | refcount_dec(&shadow_req->refs); |
| 2879 | } |
| 2880 | shadow_req->sequence = s.sequence; |
| 2881 | } |
| 2882 | |
Jackie Liu | a1041c2 | 2019-09-18 17:25:52 +0800 | [diff] [blame] | 2883 | out: |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2884 | s.has_user = true; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 2885 | s.needs_lock = false; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2886 | s.needs_fixed_file = false; |
Jens Axboe | 5c8b0b5 | 2019-04-30 10:16:07 -0600 | [diff] [blame] | 2887 | submit++; |
Jens Axboe | bc808bc | 2019-10-22 13:14:37 -0600 | [diff] [blame] | 2888 | io_submit_sqe(ctx, &s, statep, &link); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2889 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2890 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2891 | if (link) |
Jens Axboe | bc808bc | 2019-10-22 13:14:37 -0600 | [diff] [blame] | 2892 | io_queue_link_head(ctx, link, &link->submit, shadow_req); |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 2893 | if (statep) |
| 2894 | io_submit_state_end(statep); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2895 | |
Pavel Begunkov | 935d1e4 | 2019-10-25 12:31:31 +0300 | [diff] [blame] | 2896 | io_commit_sqring(ctx); |
| 2897 | |
Jens Axboe | 5c8b0b5 | 2019-04-30 10:16:07 -0600 | [diff] [blame] | 2898 | return submit; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2899 | } |
| 2900 | |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 2901 | struct io_wait_queue { |
| 2902 | struct wait_queue_entry wq; |
| 2903 | struct io_ring_ctx *ctx; |
| 2904 | unsigned to_wait; |
| 2905 | unsigned nr_timeouts; |
| 2906 | }; |
| 2907 | |
| 2908 | static inline bool io_should_wake(struct io_wait_queue *iowq) |
| 2909 | { |
| 2910 | struct io_ring_ctx *ctx = iowq->ctx; |
| 2911 | |
| 2912 | /* |
| 2913 | * Wake up if we have enough events, or if a timeout occured since we |
| 2914 | * started waiting. For timeouts, we always want to return to userspace, |
| 2915 | * regardless of event count. |
| 2916 | */ |
| 2917 | return io_cqring_events(ctx->rings) >= iowq->to_wait || |
| 2918 | atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; |
| 2919 | } |
| 2920 | |
| 2921 | static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, |
| 2922 | int wake_flags, void *key) |
| 2923 | { |
| 2924 | struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, |
| 2925 | wq); |
| 2926 | |
| 2927 | if (!io_should_wake(iowq)) |
| 2928 | return -1; |
| 2929 | |
| 2930 | return autoremove_wake_function(curr, mode, wake_flags, key); |
| 2931 | } |
| 2932 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2933 | /* |
| 2934 | * Wait until events become available, if we don't already have some. The |
| 2935 | * application must reap them itself, as they reside on the shared cq ring. |
| 2936 | */ |
| 2937 | static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, |
| 2938 | const sigset_t __user *sig, size_t sigsz) |
| 2939 | { |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 2940 | struct io_wait_queue iowq = { |
| 2941 | .wq = { |
| 2942 | .private = current, |
| 2943 | .func = io_wake_function, |
| 2944 | .entry = LIST_HEAD_INIT(iowq.wq.entry), |
| 2945 | }, |
| 2946 | .ctx = ctx, |
| 2947 | .to_wait = min_events, |
| 2948 | }; |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2949 | struct io_rings *rings = ctx->rings; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2950 | int ret; |
| 2951 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2952 | if (io_cqring_events(rings) >= min_events) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2953 | return 0; |
| 2954 | |
| 2955 | if (sig) { |
Arnd Bergmann | 9e75ad5 | 2019-03-25 15:34:53 +0100 | [diff] [blame] | 2956 | #ifdef CONFIG_COMPAT |
| 2957 | if (in_compat_syscall()) |
| 2958 | ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig, |
Oleg Nesterov | b772434 | 2019-07-16 16:29:53 -0700 | [diff] [blame] | 2959 | sigsz); |
Arnd Bergmann | 9e75ad5 | 2019-03-25 15:34:53 +0100 | [diff] [blame] | 2960 | else |
| 2961 | #endif |
Oleg Nesterov | b772434 | 2019-07-16 16:29:53 -0700 | [diff] [blame] | 2962 | ret = set_user_sigmask(sig, sigsz); |
Arnd Bergmann | 9e75ad5 | 2019-03-25 15:34:53 +0100 | [diff] [blame] | 2963 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2964 | if (ret) |
| 2965 | return ret; |
| 2966 | } |
| 2967 | |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 2968 | ret = 0; |
| 2969 | iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); |
| 2970 | do { |
| 2971 | prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, |
| 2972 | TASK_INTERRUPTIBLE); |
| 2973 | if (io_should_wake(&iowq)) |
| 2974 | break; |
| 2975 | schedule(); |
| 2976 | if (signal_pending(current)) { |
| 2977 | ret = -ERESTARTSYS; |
| 2978 | break; |
| 2979 | } |
| 2980 | } while (1); |
| 2981 | finish_wait(&ctx->wait, &iowq.wq); |
| 2982 | |
Oleg Nesterov | b772434 | 2019-07-16 16:29:53 -0700 | [diff] [blame] | 2983 | restore_saved_sigmask_unless(ret == -ERESTARTSYS); |
Oleg Nesterov | 97abc88 | 2019-06-28 12:06:50 -0700 | [diff] [blame] | 2984 | if (ret == -ERESTARTSYS) |
| 2985 | ret = -EINTR; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2986 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2987 | return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2988 | } |
| 2989 | |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 2990 | static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) |
| 2991 | { |
| 2992 | #if defined(CONFIG_UNIX) |
| 2993 | if (ctx->ring_sock) { |
| 2994 | struct sock *sock = ctx->ring_sock->sk; |
| 2995 | struct sk_buff *skb; |
| 2996 | |
| 2997 | while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL) |
| 2998 | kfree_skb(skb); |
| 2999 | } |
| 3000 | #else |
| 3001 | int i; |
| 3002 | |
| 3003 | for (i = 0; i < ctx->nr_user_files; i++) |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame^] | 3004 | if (ctx->user_files[i]) |
| 3005 | fput(ctx->user_files[i]); |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3006 | #endif |
| 3007 | } |
| 3008 | |
| 3009 | static int io_sqe_files_unregister(struct io_ring_ctx *ctx) |
| 3010 | { |
| 3011 | if (!ctx->user_files) |
| 3012 | return -ENXIO; |
| 3013 | |
| 3014 | __io_sqe_files_unregister(ctx); |
| 3015 | kfree(ctx->user_files); |
| 3016 | ctx->user_files = NULL; |
| 3017 | ctx->nr_user_files = 0; |
| 3018 | return 0; |
| 3019 | } |
| 3020 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3021 | static void io_sq_thread_stop(struct io_ring_ctx *ctx) |
| 3022 | { |
| 3023 | if (ctx->sqo_thread) { |
Jackie Liu | a4c0b3d | 2019-07-08 13:41:12 +0800 | [diff] [blame] | 3024 | wait_for_completion(&ctx->sqo_thread_started); |
Roman Penyaev | 2bbcd6d | 2019-05-16 10:53:57 +0200 | [diff] [blame] | 3025 | /* |
| 3026 | * The park is a bit of a work-around, without it we get |
| 3027 | * warning spews on shutdown with SQPOLL set and affinity |
| 3028 | * set to a single CPU. |
| 3029 | */ |
Jens Axboe | 0605863 | 2019-04-13 09:26:03 -0600 | [diff] [blame] | 3030 | kthread_park(ctx->sqo_thread); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3031 | kthread_stop(ctx->sqo_thread); |
| 3032 | ctx->sqo_thread = NULL; |
| 3033 | } |
| 3034 | } |
| 3035 | |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3036 | static void io_finish_async(struct io_ring_ctx *ctx) |
| 3037 | { |
Jens Axboe | 54a91f3 | 2019-09-10 09:15:04 -0600 | [diff] [blame] | 3038 | int i; |
| 3039 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3040 | io_sq_thread_stop(ctx); |
| 3041 | |
Jens Axboe | 54a91f3 | 2019-09-10 09:15:04 -0600 | [diff] [blame] | 3042 | for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++) { |
| 3043 | if (ctx->sqo_wq[i]) { |
| 3044 | destroy_workqueue(ctx->sqo_wq[i]); |
| 3045 | ctx->sqo_wq[i] = NULL; |
| 3046 | } |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3047 | } |
| 3048 | } |
| 3049 | |
| 3050 | #if defined(CONFIG_UNIX) |
| 3051 | static void io_destruct_skb(struct sk_buff *skb) |
| 3052 | { |
| 3053 | struct io_ring_ctx *ctx = skb->sk->sk_user_data; |
Jens Axboe | 8a99734 | 2019-10-09 14:40:13 -0600 | [diff] [blame] | 3054 | int i; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3055 | |
Jens Axboe | 8a99734 | 2019-10-09 14:40:13 -0600 | [diff] [blame] | 3056 | for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++) |
| 3057 | if (ctx->sqo_wq[i]) |
| 3058 | flush_workqueue(ctx->sqo_wq[i]); |
| 3059 | |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3060 | unix_destruct_scm(skb); |
| 3061 | } |
| 3062 | |
| 3063 | /* |
| 3064 | * Ensure the UNIX gc is aware of our file set, so we are certain that |
| 3065 | * the io_uring can be safely unregistered on process exit, even if we have |
| 3066 | * loops in the file referencing. |
| 3067 | */ |
| 3068 | static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) |
| 3069 | { |
| 3070 | struct sock *sk = ctx->ring_sock->sk; |
| 3071 | struct scm_fp_list *fpl; |
| 3072 | struct sk_buff *skb; |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame^] | 3073 | int i, nr_files; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3074 | |
| 3075 | if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) { |
| 3076 | unsigned long inflight = ctx->user->unix_inflight + nr; |
| 3077 | |
| 3078 | if (inflight > task_rlimit(current, RLIMIT_NOFILE)) |
| 3079 | return -EMFILE; |
| 3080 | } |
| 3081 | |
| 3082 | fpl = kzalloc(sizeof(*fpl), GFP_KERNEL); |
| 3083 | if (!fpl) |
| 3084 | return -ENOMEM; |
| 3085 | |
| 3086 | skb = alloc_skb(0, GFP_KERNEL); |
| 3087 | if (!skb) { |
| 3088 | kfree(fpl); |
| 3089 | return -ENOMEM; |
| 3090 | } |
| 3091 | |
| 3092 | skb->sk = sk; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3093 | |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame^] | 3094 | nr_files = 0; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3095 | fpl->user = get_uid(ctx->user); |
| 3096 | for (i = 0; i < nr; i++) { |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame^] | 3097 | if (!ctx->user_files[i + offset]) |
| 3098 | continue; |
| 3099 | fpl->fp[nr_files] = get_file(ctx->user_files[i + offset]); |
| 3100 | unix_inflight(fpl->user, fpl->fp[nr_files]); |
| 3101 | nr_files++; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3102 | } |
| 3103 | |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame^] | 3104 | if (nr_files) { |
| 3105 | fpl->max = SCM_MAX_FD; |
| 3106 | fpl->count = nr_files; |
| 3107 | UNIXCB(skb).fp = fpl; |
| 3108 | skb->destructor = io_destruct_skb; |
| 3109 | refcount_add(skb->truesize, &sk->sk_wmem_alloc); |
| 3110 | skb_queue_head(&sk->sk_receive_queue, skb); |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3111 | |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame^] | 3112 | for (i = 0; i < nr_files; i++) |
| 3113 | fput(fpl->fp[i]); |
| 3114 | } else { |
| 3115 | kfree_skb(skb); |
| 3116 | kfree(fpl); |
| 3117 | } |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3118 | |
| 3119 | return 0; |
| 3120 | } |
| 3121 | |
| 3122 | /* |
| 3123 | * If UNIX sockets are enabled, fd passing can cause a reference cycle which |
| 3124 | * causes regular reference counting to break down. We rely on the UNIX |
| 3125 | * garbage collection to take care of this problem for us. |
| 3126 | */ |
| 3127 | static int io_sqe_files_scm(struct io_ring_ctx *ctx) |
| 3128 | { |
| 3129 | unsigned left, total; |
| 3130 | int ret = 0; |
| 3131 | |
| 3132 | total = 0; |
| 3133 | left = ctx->nr_user_files; |
| 3134 | while (left) { |
| 3135 | unsigned this_files = min_t(unsigned, left, SCM_MAX_FD); |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3136 | |
| 3137 | ret = __io_sqe_files_scm(ctx, this_files, total); |
| 3138 | if (ret) |
| 3139 | break; |
| 3140 | left -= this_files; |
| 3141 | total += this_files; |
| 3142 | } |
| 3143 | |
| 3144 | if (!ret) |
| 3145 | return 0; |
| 3146 | |
| 3147 | while (total < ctx->nr_user_files) { |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame^] | 3148 | if (ctx->user_files[total]) |
| 3149 | fput(ctx->user_files[total]); |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3150 | total++; |
| 3151 | } |
| 3152 | |
| 3153 | return ret; |
| 3154 | } |
| 3155 | #else |
| 3156 | static int io_sqe_files_scm(struct io_ring_ctx *ctx) |
| 3157 | { |
| 3158 | return 0; |
| 3159 | } |
| 3160 | #endif |
| 3161 | |
| 3162 | static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, |
| 3163 | unsigned nr_args) |
| 3164 | { |
| 3165 | __s32 __user *fds = (__s32 __user *) arg; |
| 3166 | int fd, ret = 0; |
| 3167 | unsigned i; |
| 3168 | |
| 3169 | if (ctx->user_files) |
| 3170 | return -EBUSY; |
| 3171 | if (!nr_args) |
| 3172 | return -EINVAL; |
| 3173 | if (nr_args > IORING_MAX_FIXED_FILES) |
| 3174 | return -EMFILE; |
| 3175 | |
| 3176 | ctx->user_files = kcalloc(nr_args, sizeof(struct file *), GFP_KERNEL); |
| 3177 | if (!ctx->user_files) |
| 3178 | return -ENOMEM; |
| 3179 | |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame^] | 3180 | for (i = 0; i < nr_args; i++, ctx->nr_user_files++) { |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3181 | ret = -EFAULT; |
| 3182 | if (copy_from_user(&fd, &fds[i], sizeof(fd))) |
| 3183 | break; |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame^] | 3184 | /* allow sparse sets */ |
| 3185 | if (fd == -1) { |
| 3186 | ret = 0; |
| 3187 | continue; |
| 3188 | } |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3189 | |
| 3190 | ctx->user_files[i] = fget(fd); |
| 3191 | |
| 3192 | ret = -EBADF; |
| 3193 | if (!ctx->user_files[i]) |
| 3194 | break; |
| 3195 | /* |
| 3196 | * Don't allow io_uring instances to be registered. If UNIX |
| 3197 | * isn't enabled, then this causes a reference cycle and this |
| 3198 | * instance can never get freed. If UNIX is enabled we'll |
| 3199 | * handle it just fine, but there's still no point in allowing |
| 3200 | * a ring fd as it doesn't support regular read/write anyway. |
| 3201 | */ |
| 3202 | if (ctx->user_files[i]->f_op == &io_uring_fops) { |
| 3203 | fput(ctx->user_files[i]); |
| 3204 | break; |
| 3205 | } |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3206 | ret = 0; |
| 3207 | } |
| 3208 | |
| 3209 | if (ret) { |
| 3210 | for (i = 0; i < ctx->nr_user_files; i++) |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame^] | 3211 | if (ctx->user_files[i]) |
| 3212 | fput(ctx->user_files[i]); |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3213 | |
| 3214 | kfree(ctx->user_files); |
Jens Axboe | 25adf50 | 2019-04-03 09:52:40 -0600 | [diff] [blame] | 3215 | ctx->user_files = NULL; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3216 | ctx->nr_user_files = 0; |
| 3217 | return ret; |
| 3218 | } |
| 3219 | |
| 3220 | ret = io_sqe_files_scm(ctx); |
| 3221 | if (ret) |
| 3222 | io_sqe_files_unregister(ctx); |
| 3223 | |
| 3224 | return ret; |
| 3225 | } |
| 3226 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3227 | static int io_sq_offload_start(struct io_ring_ctx *ctx, |
| 3228 | struct io_uring_params *p) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3229 | { |
| 3230 | int ret; |
| 3231 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3232 | init_waitqueue_head(&ctx->sqo_wait); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3233 | mmgrab(current->mm); |
| 3234 | ctx->sqo_mm = current->mm; |
| 3235 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3236 | if (ctx->flags & IORING_SETUP_SQPOLL) { |
Jens Axboe | 3ec482d | 2019-04-08 10:51:01 -0600 | [diff] [blame] | 3237 | ret = -EPERM; |
| 3238 | if (!capable(CAP_SYS_ADMIN)) |
| 3239 | goto err; |
| 3240 | |
Jens Axboe | 917257d | 2019-04-13 09:28:55 -0600 | [diff] [blame] | 3241 | ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); |
| 3242 | if (!ctx->sq_thread_idle) |
| 3243 | ctx->sq_thread_idle = HZ; |
| 3244 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3245 | if (p->flags & IORING_SETUP_SQ_AFF) { |
Jens Axboe | 44a9bd1 | 2019-05-14 20:00:30 -0600 | [diff] [blame] | 3246 | int cpu = p->sq_thread_cpu; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3247 | |
Jens Axboe | 917257d | 2019-04-13 09:28:55 -0600 | [diff] [blame] | 3248 | ret = -EINVAL; |
Jens Axboe | 44a9bd1 | 2019-05-14 20:00:30 -0600 | [diff] [blame] | 3249 | if (cpu >= nr_cpu_ids) |
| 3250 | goto err; |
Shenghui Wang | 7889f44 | 2019-05-07 16:03:19 +0800 | [diff] [blame] | 3251 | if (!cpu_online(cpu)) |
Jens Axboe | 917257d | 2019-04-13 09:28:55 -0600 | [diff] [blame] | 3252 | goto err; |
| 3253 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3254 | ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread, |
| 3255 | ctx, cpu, |
| 3256 | "io_uring-sq"); |
| 3257 | } else { |
| 3258 | ctx->sqo_thread = kthread_create(io_sq_thread, ctx, |
| 3259 | "io_uring-sq"); |
| 3260 | } |
| 3261 | if (IS_ERR(ctx->sqo_thread)) { |
| 3262 | ret = PTR_ERR(ctx->sqo_thread); |
| 3263 | ctx->sqo_thread = NULL; |
| 3264 | goto err; |
| 3265 | } |
| 3266 | wake_up_process(ctx->sqo_thread); |
| 3267 | } else if (p->flags & IORING_SETUP_SQ_AFF) { |
| 3268 | /* Can't have SQ_AFF without SQPOLL */ |
| 3269 | ret = -EINVAL; |
| 3270 | goto err; |
| 3271 | } |
| 3272 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3273 | /* Do QD, or 2 * CPUS, whatever is smallest */ |
Jens Axboe | 54a91f3 | 2019-09-10 09:15:04 -0600 | [diff] [blame] | 3274 | ctx->sqo_wq[0] = alloc_workqueue("io_ring-wq", |
| 3275 | WQ_UNBOUND | WQ_FREEZABLE, |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3276 | min(ctx->sq_entries - 1, 2 * num_online_cpus())); |
Jens Axboe | 54a91f3 | 2019-09-10 09:15:04 -0600 | [diff] [blame] | 3277 | if (!ctx->sqo_wq[0]) { |
| 3278 | ret = -ENOMEM; |
| 3279 | goto err; |
| 3280 | } |
| 3281 | |
| 3282 | /* |
| 3283 | * This is for buffered writes, where we want to limit the parallelism |
| 3284 | * due to file locking in file systems. As "normal" buffered writes |
| 3285 | * should parellelize on writeout quite nicely, limit us to having 2 |
| 3286 | * pending. This avoids massive contention on the inode when doing |
| 3287 | * buffered async writes. |
| 3288 | */ |
| 3289 | ctx->sqo_wq[1] = alloc_workqueue("io_ring-write-wq", |
| 3290 | WQ_UNBOUND | WQ_FREEZABLE, 2); |
| 3291 | if (!ctx->sqo_wq[1]) { |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3292 | ret = -ENOMEM; |
| 3293 | goto err; |
| 3294 | } |
| 3295 | |
| 3296 | return 0; |
| 3297 | err: |
Jens Axboe | 54a91f3 | 2019-09-10 09:15:04 -0600 | [diff] [blame] | 3298 | io_finish_async(ctx); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3299 | mmdrop(ctx->sqo_mm); |
| 3300 | ctx->sqo_mm = NULL; |
| 3301 | return ret; |
| 3302 | } |
| 3303 | |
| 3304 | static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages) |
| 3305 | { |
| 3306 | atomic_long_sub(nr_pages, &user->locked_vm); |
| 3307 | } |
| 3308 | |
| 3309 | static int io_account_mem(struct user_struct *user, unsigned long nr_pages) |
| 3310 | { |
| 3311 | unsigned long page_limit, cur_pages, new_pages; |
| 3312 | |
| 3313 | /* Don't allow more pages than we can safely lock */ |
| 3314 | page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
| 3315 | |
| 3316 | do { |
| 3317 | cur_pages = atomic_long_read(&user->locked_vm); |
| 3318 | new_pages = cur_pages + nr_pages; |
| 3319 | if (new_pages > page_limit) |
| 3320 | return -ENOMEM; |
| 3321 | } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages, |
| 3322 | new_pages) != cur_pages); |
| 3323 | |
| 3324 | return 0; |
| 3325 | } |
| 3326 | |
| 3327 | static void io_mem_free(void *ptr) |
| 3328 | { |
Mark Rutland | 52e04ef | 2019-04-30 17:30:21 +0100 | [diff] [blame] | 3329 | struct page *page; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3330 | |
Mark Rutland | 52e04ef | 2019-04-30 17:30:21 +0100 | [diff] [blame] | 3331 | if (!ptr) |
| 3332 | return; |
| 3333 | |
| 3334 | page = virt_to_head_page(ptr); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3335 | if (put_page_testzero(page)) |
| 3336 | free_compound_page(page); |
| 3337 | } |
| 3338 | |
| 3339 | static void *io_mem_alloc(size_t size) |
| 3340 | { |
| 3341 | gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP | |
| 3342 | __GFP_NORETRY; |
| 3343 | |
| 3344 | return (void *) __get_free_pages(gfp_flags, get_order(size)); |
| 3345 | } |
| 3346 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3347 | static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries, |
| 3348 | size_t *sq_offset) |
| 3349 | { |
| 3350 | struct io_rings *rings; |
| 3351 | size_t off, sq_array_size; |
| 3352 | |
| 3353 | off = struct_size(rings, cqes, cq_entries); |
| 3354 | if (off == SIZE_MAX) |
| 3355 | return SIZE_MAX; |
| 3356 | |
| 3357 | #ifdef CONFIG_SMP |
| 3358 | off = ALIGN(off, SMP_CACHE_BYTES); |
| 3359 | if (off == 0) |
| 3360 | return SIZE_MAX; |
| 3361 | #endif |
| 3362 | |
| 3363 | sq_array_size = array_size(sizeof(u32), sq_entries); |
| 3364 | if (sq_array_size == SIZE_MAX) |
| 3365 | return SIZE_MAX; |
| 3366 | |
| 3367 | if (check_add_overflow(off, sq_array_size, &off)) |
| 3368 | return SIZE_MAX; |
| 3369 | |
| 3370 | if (sq_offset) |
| 3371 | *sq_offset = off; |
| 3372 | |
| 3373 | return off; |
| 3374 | } |
| 3375 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3376 | static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries) |
| 3377 | { |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3378 | size_t pages; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3379 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3380 | pages = (size_t)1 << get_order( |
| 3381 | rings_size(sq_entries, cq_entries, NULL)); |
| 3382 | pages += (size_t)1 << get_order( |
| 3383 | array_size(sizeof(struct io_uring_sqe), sq_entries)); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3384 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3385 | return pages; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3386 | } |
| 3387 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3388 | static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx) |
| 3389 | { |
| 3390 | int i, j; |
| 3391 | |
| 3392 | if (!ctx->user_bufs) |
| 3393 | return -ENXIO; |
| 3394 | |
| 3395 | for (i = 0; i < ctx->nr_user_bufs; i++) { |
| 3396 | struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; |
| 3397 | |
| 3398 | for (j = 0; j < imu->nr_bvecs; j++) |
John Hubbard | 27c4d3a | 2019-08-04 19:32:06 -0700 | [diff] [blame] | 3399 | put_user_page(imu->bvec[j].bv_page); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3400 | |
| 3401 | if (ctx->account_mem) |
| 3402 | io_unaccount_mem(ctx->user, imu->nr_bvecs); |
Mark Rutland | d4ef647 | 2019-05-01 16:59:16 +0100 | [diff] [blame] | 3403 | kvfree(imu->bvec); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3404 | imu->nr_bvecs = 0; |
| 3405 | } |
| 3406 | |
| 3407 | kfree(ctx->user_bufs); |
| 3408 | ctx->user_bufs = NULL; |
| 3409 | ctx->nr_user_bufs = 0; |
| 3410 | return 0; |
| 3411 | } |
| 3412 | |
| 3413 | static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst, |
| 3414 | void __user *arg, unsigned index) |
| 3415 | { |
| 3416 | struct iovec __user *src; |
| 3417 | |
| 3418 | #ifdef CONFIG_COMPAT |
| 3419 | if (ctx->compat) { |
| 3420 | struct compat_iovec __user *ciovs; |
| 3421 | struct compat_iovec ciov; |
| 3422 | |
| 3423 | ciovs = (struct compat_iovec __user *) arg; |
| 3424 | if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov))) |
| 3425 | return -EFAULT; |
| 3426 | |
| 3427 | dst->iov_base = (void __user *) (unsigned long) ciov.iov_base; |
| 3428 | dst->iov_len = ciov.iov_len; |
| 3429 | return 0; |
| 3430 | } |
| 3431 | #endif |
| 3432 | src = (struct iovec __user *) arg; |
| 3433 | if (copy_from_user(dst, &src[index], sizeof(*dst))) |
| 3434 | return -EFAULT; |
| 3435 | return 0; |
| 3436 | } |
| 3437 | |
| 3438 | static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, |
| 3439 | unsigned nr_args) |
| 3440 | { |
| 3441 | struct vm_area_struct **vmas = NULL; |
| 3442 | struct page **pages = NULL; |
| 3443 | int i, j, got_pages = 0; |
| 3444 | int ret = -EINVAL; |
| 3445 | |
| 3446 | if (ctx->user_bufs) |
| 3447 | return -EBUSY; |
| 3448 | if (!nr_args || nr_args > UIO_MAXIOV) |
| 3449 | return -EINVAL; |
| 3450 | |
| 3451 | ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf), |
| 3452 | GFP_KERNEL); |
| 3453 | if (!ctx->user_bufs) |
| 3454 | return -ENOMEM; |
| 3455 | |
| 3456 | for (i = 0; i < nr_args; i++) { |
| 3457 | struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; |
| 3458 | unsigned long off, start, end, ubuf; |
| 3459 | int pret, nr_pages; |
| 3460 | struct iovec iov; |
| 3461 | size_t size; |
| 3462 | |
| 3463 | ret = io_copy_iov(ctx, &iov, arg, i); |
| 3464 | if (ret) |
Pavel Begunkov | a278682 | 2019-05-26 12:35:47 +0300 | [diff] [blame] | 3465 | goto err; |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3466 | |
| 3467 | /* |
| 3468 | * Don't impose further limits on the size and buffer |
| 3469 | * constraints here, we'll -EINVAL later when IO is |
| 3470 | * submitted if they are wrong. |
| 3471 | */ |
| 3472 | ret = -EFAULT; |
| 3473 | if (!iov.iov_base || !iov.iov_len) |
| 3474 | goto err; |
| 3475 | |
| 3476 | /* arbitrary limit, but we need something */ |
| 3477 | if (iov.iov_len > SZ_1G) |
| 3478 | goto err; |
| 3479 | |
| 3480 | ubuf = (unsigned long) iov.iov_base; |
| 3481 | end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 3482 | start = ubuf >> PAGE_SHIFT; |
| 3483 | nr_pages = end - start; |
| 3484 | |
| 3485 | if (ctx->account_mem) { |
| 3486 | ret = io_account_mem(ctx->user, nr_pages); |
| 3487 | if (ret) |
| 3488 | goto err; |
| 3489 | } |
| 3490 | |
| 3491 | ret = 0; |
| 3492 | if (!pages || nr_pages > got_pages) { |
| 3493 | kfree(vmas); |
| 3494 | kfree(pages); |
Mark Rutland | d4ef647 | 2019-05-01 16:59:16 +0100 | [diff] [blame] | 3495 | pages = kvmalloc_array(nr_pages, sizeof(struct page *), |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3496 | GFP_KERNEL); |
Mark Rutland | d4ef647 | 2019-05-01 16:59:16 +0100 | [diff] [blame] | 3497 | vmas = kvmalloc_array(nr_pages, |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3498 | sizeof(struct vm_area_struct *), |
| 3499 | GFP_KERNEL); |
| 3500 | if (!pages || !vmas) { |
| 3501 | ret = -ENOMEM; |
| 3502 | if (ctx->account_mem) |
| 3503 | io_unaccount_mem(ctx->user, nr_pages); |
| 3504 | goto err; |
| 3505 | } |
| 3506 | got_pages = nr_pages; |
| 3507 | } |
| 3508 | |
Mark Rutland | d4ef647 | 2019-05-01 16:59:16 +0100 | [diff] [blame] | 3509 | imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec), |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3510 | GFP_KERNEL); |
| 3511 | ret = -ENOMEM; |
| 3512 | if (!imu->bvec) { |
| 3513 | if (ctx->account_mem) |
| 3514 | io_unaccount_mem(ctx->user, nr_pages); |
| 3515 | goto err; |
| 3516 | } |
| 3517 | |
| 3518 | ret = 0; |
| 3519 | down_read(¤t->mm->mmap_sem); |
Ira Weiny | 932f4a6 | 2019-05-13 17:17:03 -0700 | [diff] [blame] | 3520 | pret = get_user_pages(ubuf, nr_pages, |
| 3521 | FOLL_WRITE | FOLL_LONGTERM, |
| 3522 | pages, vmas); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3523 | if (pret == nr_pages) { |
| 3524 | /* don't support file backed memory */ |
| 3525 | for (j = 0; j < nr_pages; j++) { |
| 3526 | struct vm_area_struct *vma = vmas[j]; |
| 3527 | |
| 3528 | if (vma->vm_file && |
| 3529 | !is_file_hugepages(vma->vm_file)) { |
| 3530 | ret = -EOPNOTSUPP; |
| 3531 | break; |
| 3532 | } |
| 3533 | } |
| 3534 | } else { |
| 3535 | ret = pret < 0 ? pret : -EFAULT; |
| 3536 | } |
| 3537 | up_read(¤t->mm->mmap_sem); |
| 3538 | if (ret) { |
| 3539 | /* |
| 3540 | * if we did partial map, or found file backed vmas, |
| 3541 | * release any pages we did get |
| 3542 | */ |
John Hubbard | 27c4d3a | 2019-08-04 19:32:06 -0700 | [diff] [blame] | 3543 | if (pret > 0) |
| 3544 | put_user_pages(pages, pret); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3545 | if (ctx->account_mem) |
| 3546 | io_unaccount_mem(ctx->user, nr_pages); |
Mark Rutland | d4ef647 | 2019-05-01 16:59:16 +0100 | [diff] [blame] | 3547 | kvfree(imu->bvec); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3548 | goto err; |
| 3549 | } |
| 3550 | |
| 3551 | off = ubuf & ~PAGE_MASK; |
| 3552 | size = iov.iov_len; |
| 3553 | for (j = 0; j < nr_pages; j++) { |
| 3554 | size_t vec_len; |
| 3555 | |
| 3556 | vec_len = min_t(size_t, size, PAGE_SIZE - off); |
| 3557 | imu->bvec[j].bv_page = pages[j]; |
| 3558 | imu->bvec[j].bv_len = vec_len; |
| 3559 | imu->bvec[j].bv_offset = off; |
| 3560 | off = 0; |
| 3561 | size -= vec_len; |
| 3562 | } |
| 3563 | /* store original address for later verification */ |
| 3564 | imu->ubuf = ubuf; |
| 3565 | imu->len = iov.iov_len; |
| 3566 | imu->nr_bvecs = nr_pages; |
| 3567 | |
| 3568 | ctx->nr_user_bufs++; |
| 3569 | } |
Mark Rutland | d4ef647 | 2019-05-01 16:59:16 +0100 | [diff] [blame] | 3570 | kvfree(pages); |
| 3571 | kvfree(vmas); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3572 | return 0; |
| 3573 | err: |
Mark Rutland | d4ef647 | 2019-05-01 16:59:16 +0100 | [diff] [blame] | 3574 | kvfree(pages); |
| 3575 | kvfree(vmas); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3576 | io_sqe_buffer_unregister(ctx); |
| 3577 | return ret; |
| 3578 | } |
| 3579 | |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 3580 | static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg) |
| 3581 | { |
| 3582 | __s32 __user *fds = arg; |
| 3583 | int fd; |
| 3584 | |
| 3585 | if (ctx->cq_ev_fd) |
| 3586 | return -EBUSY; |
| 3587 | |
| 3588 | if (copy_from_user(&fd, fds, sizeof(*fds))) |
| 3589 | return -EFAULT; |
| 3590 | |
| 3591 | ctx->cq_ev_fd = eventfd_ctx_fdget(fd); |
| 3592 | if (IS_ERR(ctx->cq_ev_fd)) { |
| 3593 | int ret = PTR_ERR(ctx->cq_ev_fd); |
| 3594 | ctx->cq_ev_fd = NULL; |
| 3595 | return ret; |
| 3596 | } |
| 3597 | |
| 3598 | return 0; |
| 3599 | } |
| 3600 | |
| 3601 | static int io_eventfd_unregister(struct io_ring_ctx *ctx) |
| 3602 | { |
| 3603 | if (ctx->cq_ev_fd) { |
| 3604 | eventfd_ctx_put(ctx->cq_ev_fd); |
| 3605 | ctx->cq_ev_fd = NULL; |
| 3606 | return 0; |
| 3607 | } |
| 3608 | |
| 3609 | return -ENXIO; |
| 3610 | } |
| 3611 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3612 | static void io_ring_ctx_free(struct io_ring_ctx *ctx) |
| 3613 | { |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3614 | io_finish_async(ctx); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3615 | if (ctx->sqo_mm) |
| 3616 | mmdrop(ctx->sqo_mm); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 3617 | |
| 3618 | io_iopoll_reap_events(ctx); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3619 | io_sqe_buffer_unregister(ctx); |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3620 | io_sqe_files_unregister(ctx); |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 3621 | io_eventfd_unregister(ctx); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 3622 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3623 | #if defined(CONFIG_UNIX) |
Eric Biggers | 355e8d2 | 2019-06-12 14:58:43 -0700 | [diff] [blame] | 3624 | if (ctx->ring_sock) { |
| 3625 | ctx->ring_sock->file = NULL; /* so that iput() is called */ |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3626 | sock_release(ctx->ring_sock); |
Eric Biggers | 355e8d2 | 2019-06-12 14:58:43 -0700 | [diff] [blame] | 3627 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3628 | #endif |
| 3629 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3630 | io_mem_free(ctx->rings); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3631 | io_mem_free(ctx->sq_sqes); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3632 | |
| 3633 | percpu_ref_exit(&ctx->refs); |
| 3634 | if (ctx->account_mem) |
| 3635 | io_unaccount_mem(ctx->user, |
| 3636 | ring_pages(ctx->sq_entries, ctx->cq_entries)); |
| 3637 | free_uid(ctx->user); |
| 3638 | kfree(ctx); |
| 3639 | } |
| 3640 | |
| 3641 | static __poll_t io_uring_poll(struct file *file, poll_table *wait) |
| 3642 | { |
| 3643 | struct io_ring_ctx *ctx = file->private_data; |
| 3644 | __poll_t mask = 0; |
| 3645 | |
| 3646 | poll_wait(file, &ctx->cq_wait, wait); |
Stefan Bühler | 4f7067c | 2019-04-24 23:54:17 +0200 | [diff] [blame] | 3647 | /* |
| 3648 | * synchronizes with barrier from wq_has_sleeper call in |
| 3649 | * io_commit_cqring |
| 3650 | */ |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3651 | smp_rmb(); |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3652 | if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head != |
| 3653 | ctx->rings->sq_ring_entries) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3654 | mask |= EPOLLOUT | EPOLLWRNORM; |
yangerkun | daa5de5 | 2019-09-24 20:53:34 +0800 | [diff] [blame] | 3655 | if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3656 | mask |= EPOLLIN | EPOLLRDNORM; |
| 3657 | |
| 3658 | return mask; |
| 3659 | } |
| 3660 | |
| 3661 | static int io_uring_fasync(int fd, struct file *file, int on) |
| 3662 | { |
| 3663 | struct io_ring_ctx *ctx = file->private_data; |
| 3664 | |
| 3665 | return fasync_helper(fd, file, on, &ctx->cq_fasync); |
| 3666 | } |
| 3667 | |
| 3668 | static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) |
| 3669 | { |
| 3670 | mutex_lock(&ctx->uring_lock); |
| 3671 | percpu_ref_kill(&ctx->refs); |
| 3672 | mutex_unlock(&ctx->uring_lock); |
| 3673 | |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 3674 | io_kill_timeouts(ctx); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3675 | io_poll_remove_all(ctx); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 3676 | io_iopoll_reap_events(ctx); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3677 | wait_for_completion(&ctx->ctx_done); |
| 3678 | io_ring_ctx_free(ctx); |
| 3679 | } |
| 3680 | |
| 3681 | static int io_uring_release(struct inode *inode, struct file *file) |
| 3682 | { |
| 3683 | struct io_ring_ctx *ctx = file->private_data; |
| 3684 | |
| 3685 | file->private_data = NULL; |
| 3686 | io_ring_ctx_wait_and_kill(ctx); |
| 3687 | return 0; |
| 3688 | } |
| 3689 | |
| 3690 | static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) |
| 3691 | { |
| 3692 | loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT; |
| 3693 | unsigned long sz = vma->vm_end - vma->vm_start; |
| 3694 | struct io_ring_ctx *ctx = file->private_data; |
| 3695 | unsigned long pfn; |
| 3696 | struct page *page; |
| 3697 | void *ptr; |
| 3698 | |
| 3699 | switch (offset) { |
| 3700 | case IORING_OFF_SQ_RING: |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3701 | case IORING_OFF_CQ_RING: |
| 3702 | ptr = ctx->rings; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3703 | break; |
| 3704 | case IORING_OFF_SQES: |
| 3705 | ptr = ctx->sq_sqes; |
| 3706 | break; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3707 | default: |
| 3708 | return -EINVAL; |
| 3709 | } |
| 3710 | |
| 3711 | page = virt_to_head_page(ptr); |
Matthew Wilcox (Oracle) | a50b854 | 2019-09-23 15:34:25 -0700 | [diff] [blame] | 3712 | if (sz > page_size(page)) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3713 | return -EINVAL; |
| 3714 | |
| 3715 | pfn = virt_to_phys(ptr) >> PAGE_SHIFT; |
| 3716 | return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot); |
| 3717 | } |
| 3718 | |
| 3719 | SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, |
| 3720 | u32, min_complete, u32, flags, const sigset_t __user *, sig, |
| 3721 | size_t, sigsz) |
| 3722 | { |
| 3723 | struct io_ring_ctx *ctx; |
| 3724 | long ret = -EBADF; |
| 3725 | int submitted = 0; |
| 3726 | struct fd f; |
| 3727 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3728 | if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP)) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3729 | return -EINVAL; |
| 3730 | |
| 3731 | f = fdget(fd); |
| 3732 | if (!f.file) |
| 3733 | return -EBADF; |
| 3734 | |
| 3735 | ret = -EOPNOTSUPP; |
| 3736 | if (f.file->f_op != &io_uring_fops) |
| 3737 | goto out_fput; |
| 3738 | |
| 3739 | ret = -ENXIO; |
| 3740 | ctx = f.file->private_data; |
| 3741 | if (!percpu_ref_tryget(&ctx->refs)) |
| 3742 | goto out_fput; |
| 3743 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3744 | /* |
| 3745 | * For SQ polling, the thread will do all submissions and completions. |
| 3746 | * Just return the requested submit count, and wake the thread if |
| 3747 | * we were asked to. |
| 3748 | */ |
Jens Axboe | b2a9ead | 2019-09-12 14:19:16 -0600 | [diff] [blame] | 3749 | ret = 0; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3750 | if (ctx->flags & IORING_SETUP_SQPOLL) { |
| 3751 | if (flags & IORING_ENTER_SQ_WAKEUP) |
| 3752 | wake_up(&ctx->sqo_wait); |
| 3753 | submitted = to_submit; |
Jens Axboe | b2a9ead | 2019-09-12 14:19:16 -0600 | [diff] [blame] | 3754 | } else if (to_submit) { |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3755 | to_submit = min(to_submit, ctx->sq_entries); |
| 3756 | |
| 3757 | mutex_lock(&ctx->uring_lock); |
Jens Axboe | bc808bc | 2019-10-22 13:14:37 -0600 | [diff] [blame] | 3758 | submitted = io_ring_submit(ctx, to_submit); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3759 | mutex_unlock(&ctx->uring_lock); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3760 | } |
| 3761 | if (flags & IORING_ENTER_GETEVENTS) { |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 3762 | unsigned nr_events = 0; |
| 3763 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3764 | min_complete = min(min_complete, ctx->cq_entries); |
| 3765 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 3766 | if (ctx->flags & IORING_SETUP_IOPOLL) { |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 3767 | ret = io_iopoll_check(ctx, &nr_events, min_complete); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 3768 | } else { |
| 3769 | ret = io_cqring_wait(ctx, min_complete, sig, sigsz); |
| 3770 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3771 | } |
| 3772 | |
Pavel Begunkov | 6805b32 | 2019-10-08 02:18:42 +0300 | [diff] [blame] | 3773 | percpu_ref_put(&ctx->refs); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3774 | out_fput: |
| 3775 | fdput(f); |
| 3776 | return submitted ? submitted : ret; |
| 3777 | } |
| 3778 | |
| 3779 | static const struct file_operations io_uring_fops = { |
| 3780 | .release = io_uring_release, |
| 3781 | .mmap = io_uring_mmap, |
| 3782 | .poll = io_uring_poll, |
| 3783 | .fasync = io_uring_fasync, |
| 3784 | }; |
| 3785 | |
| 3786 | static int io_allocate_scq_urings(struct io_ring_ctx *ctx, |
| 3787 | struct io_uring_params *p) |
| 3788 | { |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3789 | struct io_rings *rings; |
| 3790 | size_t size, sq_array_offset; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3791 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3792 | size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset); |
| 3793 | if (size == SIZE_MAX) |
| 3794 | return -EOVERFLOW; |
| 3795 | |
| 3796 | rings = io_mem_alloc(size); |
| 3797 | if (!rings) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3798 | return -ENOMEM; |
| 3799 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3800 | ctx->rings = rings; |
| 3801 | ctx->sq_array = (u32 *)((char *)rings + sq_array_offset); |
| 3802 | rings->sq_ring_mask = p->sq_entries - 1; |
| 3803 | rings->cq_ring_mask = p->cq_entries - 1; |
| 3804 | rings->sq_ring_entries = p->sq_entries; |
| 3805 | rings->cq_ring_entries = p->cq_entries; |
| 3806 | ctx->sq_mask = rings->sq_ring_mask; |
| 3807 | ctx->cq_mask = rings->cq_ring_mask; |
| 3808 | ctx->sq_entries = rings->sq_ring_entries; |
| 3809 | ctx->cq_entries = rings->cq_ring_entries; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3810 | |
| 3811 | size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); |
| 3812 | if (size == SIZE_MAX) |
| 3813 | return -EOVERFLOW; |
| 3814 | |
| 3815 | ctx->sq_sqes = io_mem_alloc(size); |
Mark Rutland | 52e04ef | 2019-04-30 17:30:21 +0100 | [diff] [blame] | 3816 | if (!ctx->sq_sqes) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3817 | return -ENOMEM; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3818 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3819 | return 0; |
| 3820 | } |
| 3821 | |
| 3822 | /* |
| 3823 | * Allocate an anonymous fd, this is what constitutes the application |
| 3824 | * visible backing of an io_uring instance. The application mmaps this |
| 3825 | * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled, |
| 3826 | * we have to tie this fd to a socket for file garbage collection purposes. |
| 3827 | */ |
| 3828 | static int io_uring_get_fd(struct io_ring_ctx *ctx) |
| 3829 | { |
| 3830 | struct file *file; |
| 3831 | int ret; |
| 3832 | |
| 3833 | #if defined(CONFIG_UNIX) |
| 3834 | ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP, |
| 3835 | &ctx->ring_sock); |
| 3836 | if (ret) |
| 3837 | return ret; |
| 3838 | #endif |
| 3839 | |
| 3840 | ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC); |
| 3841 | if (ret < 0) |
| 3842 | goto err; |
| 3843 | |
| 3844 | file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx, |
| 3845 | O_RDWR | O_CLOEXEC); |
| 3846 | if (IS_ERR(file)) { |
| 3847 | put_unused_fd(ret); |
| 3848 | ret = PTR_ERR(file); |
| 3849 | goto err; |
| 3850 | } |
| 3851 | |
| 3852 | #if defined(CONFIG_UNIX) |
| 3853 | ctx->ring_sock->file = file; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3854 | ctx->ring_sock->sk->sk_user_data = ctx; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3855 | #endif |
| 3856 | fd_install(ret, file); |
| 3857 | return ret; |
| 3858 | err: |
| 3859 | #if defined(CONFIG_UNIX) |
| 3860 | sock_release(ctx->ring_sock); |
| 3861 | ctx->ring_sock = NULL; |
| 3862 | #endif |
| 3863 | return ret; |
| 3864 | } |
| 3865 | |
| 3866 | static int io_uring_create(unsigned entries, struct io_uring_params *p) |
| 3867 | { |
| 3868 | struct user_struct *user = NULL; |
| 3869 | struct io_ring_ctx *ctx; |
| 3870 | bool account_mem; |
| 3871 | int ret; |
| 3872 | |
| 3873 | if (!entries || entries > IORING_MAX_ENTRIES) |
| 3874 | return -EINVAL; |
| 3875 | |
| 3876 | /* |
| 3877 | * Use twice as many entries for the CQ ring. It's possible for the |
| 3878 | * application to drive a higher depth than the size of the SQ ring, |
| 3879 | * since the sqes are only used at submission time. This allows for |
| 3880 | * some flexibility in overcommitting a bit. |
| 3881 | */ |
| 3882 | p->sq_entries = roundup_pow_of_two(entries); |
| 3883 | p->cq_entries = 2 * p->sq_entries; |
| 3884 | |
| 3885 | user = get_uid(current_user()); |
| 3886 | account_mem = !capable(CAP_IPC_LOCK); |
| 3887 | |
| 3888 | if (account_mem) { |
| 3889 | ret = io_account_mem(user, |
| 3890 | ring_pages(p->sq_entries, p->cq_entries)); |
| 3891 | if (ret) { |
| 3892 | free_uid(user); |
| 3893 | return ret; |
| 3894 | } |
| 3895 | } |
| 3896 | |
| 3897 | ctx = io_ring_ctx_alloc(p); |
| 3898 | if (!ctx) { |
| 3899 | if (account_mem) |
| 3900 | io_unaccount_mem(user, ring_pages(p->sq_entries, |
| 3901 | p->cq_entries)); |
| 3902 | free_uid(user); |
| 3903 | return -ENOMEM; |
| 3904 | } |
| 3905 | ctx->compat = in_compat_syscall(); |
| 3906 | ctx->account_mem = account_mem; |
| 3907 | ctx->user = user; |
| 3908 | |
| 3909 | ret = io_allocate_scq_urings(ctx, p); |
| 3910 | if (ret) |
| 3911 | goto err; |
| 3912 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3913 | ret = io_sq_offload_start(ctx, p); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3914 | if (ret) |
| 3915 | goto err; |
| 3916 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3917 | memset(&p->sq_off, 0, sizeof(p->sq_off)); |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3918 | p->sq_off.head = offsetof(struct io_rings, sq.head); |
| 3919 | p->sq_off.tail = offsetof(struct io_rings, sq.tail); |
| 3920 | p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask); |
| 3921 | p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries); |
| 3922 | p->sq_off.flags = offsetof(struct io_rings, sq_flags); |
| 3923 | p->sq_off.dropped = offsetof(struct io_rings, sq_dropped); |
| 3924 | p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3925 | |
| 3926 | memset(&p->cq_off, 0, sizeof(p->cq_off)); |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3927 | p->cq_off.head = offsetof(struct io_rings, cq.head); |
| 3928 | p->cq_off.tail = offsetof(struct io_rings, cq.tail); |
| 3929 | p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask); |
| 3930 | p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries); |
| 3931 | p->cq_off.overflow = offsetof(struct io_rings, cq_overflow); |
| 3932 | p->cq_off.cqes = offsetof(struct io_rings, cqes); |
Jens Axboe | ac90f24 | 2019-09-06 10:26:21 -0600 | [diff] [blame] | 3933 | |
Jens Axboe | 044c1ab | 2019-10-28 09:15:33 -0600 | [diff] [blame] | 3934 | /* |
| 3935 | * Install ring fd as the very last thing, so we don't risk someone |
| 3936 | * having closed it before we finish setup |
| 3937 | */ |
| 3938 | ret = io_uring_get_fd(ctx); |
| 3939 | if (ret < 0) |
| 3940 | goto err; |
| 3941 | |
Jens Axboe | ac90f24 | 2019-09-06 10:26:21 -0600 | [diff] [blame] | 3942 | p->features = IORING_FEAT_SINGLE_MMAP; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3943 | return ret; |
| 3944 | err: |
| 3945 | io_ring_ctx_wait_and_kill(ctx); |
| 3946 | return ret; |
| 3947 | } |
| 3948 | |
| 3949 | /* |
| 3950 | * Sets up an aio uring context, and returns the fd. Applications asks for a |
| 3951 | * ring size, we return the actual sq/cq ring sizes (among other things) in the |
| 3952 | * params structure passed in. |
| 3953 | */ |
| 3954 | static long io_uring_setup(u32 entries, struct io_uring_params __user *params) |
| 3955 | { |
| 3956 | struct io_uring_params p; |
| 3957 | long ret; |
| 3958 | int i; |
| 3959 | |
| 3960 | if (copy_from_user(&p, params, sizeof(p))) |
| 3961 | return -EFAULT; |
| 3962 | for (i = 0; i < ARRAY_SIZE(p.resv); i++) { |
| 3963 | if (p.resv[i]) |
| 3964 | return -EINVAL; |
| 3965 | } |
| 3966 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3967 | if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | |
| 3968 | IORING_SETUP_SQ_AFF)) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3969 | return -EINVAL; |
| 3970 | |
| 3971 | ret = io_uring_create(entries, &p); |
| 3972 | if (ret < 0) |
| 3973 | return ret; |
| 3974 | |
| 3975 | if (copy_to_user(params, &p, sizeof(p))) |
| 3976 | return -EFAULT; |
| 3977 | |
| 3978 | return ret; |
| 3979 | } |
| 3980 | |
| 3981 | SYSCALL_DEFINE2(io_uring_setup, u32, entries, |
| 3982 | struct io_uring_params __user *, params) |
| 3983 | { |
| 3984 | return io_uring_setup(entries, params); |
| 3985 | } |
| 3986 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3987 | static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, |
| 3988 | void __user *arg, unsigned nr_args) |
Jens Axboe | b19062a | 2019-04-15 10:49:38 -0600 | [diff] [blame] | 3989 | __releases(ctx->uring_lock) |
| 3990 | __acquires(ctx->uring_lock) |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3991 | { |
| 3992 | int ret; |
| 3993 | |
Jens Axboe | 35fa71a | 2019-04-22 10:23:23 -0600 | [diff] [blame] | 3994 | /* |
| 3995 | * We're inside the ring mutex, if the ref is already dying, then |
| 3996 | * someone else killed the ctx or is already going through |
| 3997 | * io_uring_register(). |
| 3998 | */ |
| 3999 | if (percpu_ref_is_dying(&ctx->refs)) |
| 4000 | return -ENXIO; |
| 4001 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 4002 | percpu_ref_kill(&ctx->refs); |
Jens Axboe | b19062a | 2019-04-15 10:49:38 -0600 | [diff] [blame] | 4003 | |
| 4004 | /* |
| 4005 | * Drop uring mutex before waiting for references to exit. If another |
| 4006 | * thread is currently inside io_uring_enter() it might need to grab |
| 4007 | * the uring_lock to make progress. If we hold it here across the drain |
| 4008 | * wait, then we can deadlock. It's safe to drop the mutex here, since |
| 4009 | * no new references will come in after we've killed the percpu ref. |
| 4010 | */ |
| 4011 | mutex_unlock(&ctx->uring_lock); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 4012 | wait_for_completion(&ctx->ctx_done); |
Jens Axboe | b19062a | 2019-04-15 10:49:38 -0600 | [diff] [blame] | 4013 | mutex_lock(&ctx->uring_lock); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 4014 | |
| 4015 | switch (opcode) { |
| 4016 | case IORING_REGISTER_BUFFERS: |
| 4017 | ret = io_sqe_buffer_register(ctx, arg, nr_args); |
| 4018 | break; |
| 4019 | case IORING_UNREGISTER_BUFFERS: |
| 4020 | ret = -EINVAL; |
| 4021 | if (arg || nr_args) |
| 4022 | break; |
| 4023 | ret = io_sqe_buffer_unregister(ctx); |
| 4024 | break; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 4025 | case IORING_REGISTER_FILES: |
| 4026 | ret = io_sqe_files_register(ctx, arg, nr_args); |
| 4027 | break; |
| 4028 | case IORING_UNREGISTER_FILES: |
| 4029 | ret = -EINVAL; |
| 4030 | if (arg || nr_args) |
| 4031 | break; |
| 4032 | ret = io_sqe_files_unregister(ctx); |
| 4033 | break; |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 4034 | case IORING_REGISTER_EVENTFD: |
| 4035 | ret = -EINVAL; |
| 4036 | if (nr_args != 1) |
| 4037 | break; |
| 4038 | ret = io_eventfd_register(ctx, arg); |
| 4039 | break; |
| 4040 | case IORING_UNREGISTER_EVENTFD: |
| 4041 | ret = -EINVAL; |
| 4042 | if (arg || nr_args) |
| 4043 | break; |
| 4044 | ret = io_eventfd_unregister(ctx); |
| 4045 | break; |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 4046 | default: |
| 4047 | ret = -EINVAL; |
| 4048 | break; |
| 4049 | } |
| 4050 | |
| 4051 | /* bring the ctx back to life */ |
| 4052 | reinit_completion(&ctx->ctx_done); |
| 4053 | percpu_ref_reinit(&ctx->refs); |
| 4054 | return ret; |
| 4055 | } |
| 4056 | |
| 4057 | SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, |
| 4058 | void __user *, arg, unsigned int, nr_args) |
| 4059 | { |
| 4060 | struct io_ring_ctx *ctx; |
| 4061 | long ret = -EBADF; |
| 4062 | struct fd f; |
| 4063 | |
| 4064 | f = fdget(fd); |
| 4065 | if (!f.file) |
| 4066 | return -EBADF; |
| 4067 | |
| 4068 | ret = -EOPNOTSUPP; |
| 4069 | if (f.file->f_op != &io_uring_fops) |
| 4070 | goto out_fput; |
| 4071 | |
| 4072 | ctx = f.file->private_data; |
| 4073 | |
| 4074 | mutex_lock(&ctx->uring_lock); |
| 4075 | ret = __io_uring_register(ctx, opcode, arg, nr_args); |
| 4076 | mutex_unlock(&ctx->uring_lock); |
| 4077 | out_fput: |
| 4078 | fdput(f); |
| 4079 | return ret; |
| 4080 | } |
| 4081 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 4082 | static int __init io_uring_init(void) |
| 4083 | { |
| 4084 | req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC); |
| 4085 | return 0; |
| 4086 | }; |
| 4087 | __initcall(io_uring_init); |