blob: 9d8e703bc8510e9f36f688566172792daca56a67 [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
47#include <linux/refcount.h>
48#include <linux/uio.h>
49
50#include <linux/sched/signal.h>
51#include <linux/fs.h>
52#include <linux/file.h>
53#include <linux/fdtable.h>
54#include <linux/mm.h>
55#include <linux/mman.h>
56#include <linux/mmu_context.h>
57#include <linux/percpu.h>
58#include <linux/slab.h>
59#include <linux/workqueue.h>
Jens Axboe6c271ce2019-01-10 11:22:30 -070060#include <linux/kthread.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070061#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070062#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070063#include <linux/net.h>
64#include <net/sock.h>
65#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070066#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070067#include <linux/anon_inodes.h>
68#include <linux/sched/mm.h>
69#include <linux/uaccess.h>
70#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070071#include <linux/sizes.h>
72#include <linux/hugetlb.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070073
74#include <uapi/linux/io_uring.h>
75
76#include "internal.h"
77
Daniel Xu5277dea2019-09-14 14:23:45 -070078#define IORING_MAX_ENTRIES 32768
Jens Axboe6b063142019-01-10 22:13:58 -070079#define IORING_MAX_FIXED_FILES 1024
Jens Axboe2b188cc2019-01-07 10:46:33 -070080
81struct io_uring {
82 u32 head ____cacheline_aligned_in_smp;
83 u32 tail ____cacheline_aligned_in_smp;
84};
85
Stefan Bühler1e84b972019-04-24 23:54:16 +020086/*
Hristo Venev75b28af2019-08-26 17:23:46 +000087 * This data is shared with the application through the mmap at offsets
88 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +020089 *
90 * The offsets to the member fields are published through struct
91 * io_sqring_offsets when calling io_uring_setup.
92 */
Hristo Venev75b28af2019-08-26 17:23:46 +000093struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +020094 /*
95 * Head and tail offsets into the ring; the offsets need to be
96 * masked to get valid indices.
97 *
Hristo Venev75b28af2019-08-26 17:23:46 +000098 * The kernel controls head of the sq ring and the tail of the cq ring,
99 * and the application controls tail of the sq ring and the head of the
100 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200101 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000102 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200103 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000104 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200105 * ring_entries - 1)
106 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000107 u32 sq_ring_mask, cq_ring_mask;
108 /* Ring sizes (constant, power of 2) */
109 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200110 /*
111 * Number of invalid entries dropped by the kernel due to
112 * invalid index stored in array
113 *
114 * Written by the kernel, shouldn't be modified by the
115 * application (i.e. get number of "new events" by comparing to
116 * cached value).
117 *
118 * After a new SQ head value was read by the application this
119 * counter includes all submissions that were dropped reaching
120 * the new SQ head (and possibly more).
121 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000122 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200123 /*
124 * Runtime flags
125 *
126 * Written by the kernel, shouldn't be modified by the
127 * application.
128 *
129 * The application needs a full memory barrier before checking
130 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
131 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000132 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200133 /*
134 * Number of completion events lost because the queue was full;
135 * this should be avoided by the application by making sure
136 * there are not more requests pending thatn there is space in
137 * the completion queue.
138 *
139 * Written by the kernel, shouldn't be modified by the
140 * application (i.e. get number of "new events" by comparing to
141 * cached value).
142 *
143 * As completion events come in out of order this counter is not
144 * ordered with any other data.
145 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000146 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200147 /*
148 * Ring buffer of completion events.
149 *
150 * The kernel writes completion events fresh every time they are
151 * produced, so the application is allowed to modify pending
152 * entries.
153 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000154 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700155};
156
Jens Axboeedafcce2019-01-09 09:16:05 -0700157struct io_mapped_ubuf {
158 u64 ubuf;
159 size_t len;
160 struct bio_vec *bvec;
161 unsigned int nr_bvecs;
162};
163
Jens Axboe31b51512019-01-18 22:56:34 -0700164struct async_list {
165 spinlock_t lock;
166 atomic_t cnt;
167 struct list_head list;
168
169 struct file *file;
Jens Axboe6d5d5ac2019-09-11 10:16:13 -0600170 off_t io_start;
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +0800171 size_t io_len;
Jens Axboe31b51512019-01-18 22:56:34 -0700172};
173
Jens Axboe2b188cc2019-01-07 10:46:33 -0700174struct io_ring_ctx {
175 struct {
176 struct percpu_ref refs;
177 } ____cacheline_aligned_in_smp;
178
179 struct {
180 unsigned int flags;
181 bool compat;
182 bool account_mem;
183
Hristo Venev75b28af2019-08-26 17:23:46 +0000184 /*
185 * Ring buffer of indices into array of io_uring_sqe, which is
186 * mmapped by the application using the IORING_OFF_SQES offset.
187 *
188 * This indirection could e.g. be used to assign fixed
189 * io_uring_sqe entries to operations and only submit them to
190 * the queue when needed.
191 *
192 * The kernel modifies neither the indices array nor the entries
193 * array.
194 */
195 u32 *sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700196 unsigned cached_sq_head;
197 unsigned sq_entries;
198 unsigned sq_mask;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700199 unsigned sq_thread_idle;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700200 struct io_uring_sqe *sq_sqes;
Jens Axboede0617e2019-04-06 21:51:27 -0600201
202 struct list_head defer_list;
Jens Axboe5262f562019-09-17 12:26:57 -0600203 struct list_head timeout_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700204 } ____cacheline_aligned_in_smp;
205
206 /* IO offload */
Jens Axboe54a91f32019-09-10 09:15:04 -0600207 struct workqueue_struct *sqo_wq[2];
Jens Axboe6c271ce2019-01-10 11:22:30 -0700208 struct task_struct *sqo_thread; /* if using sq thread polling */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700209 struct mm_struct *sqo_mm;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700210 wait_queue_head_t sqo_wait;
Jackie Liua4c0b3d2019-07-08 13:41:12 +0800211 struct completion sqo_thread_started;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700212
213 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700214 unsigned cached_cq_tail;
215 unsigned cq_entries;
216 unsigned cq_mask;
217 struct wait_queue_head cq_wait;
218 struct fasync_struct *cq_fasync;
Jens Axboe9b402842019-04-11 11:45:41 -0600219 struct eventfd_ctx *cq_ev_fd;
Jens Axboe5262f562019-09-17 12:26:57 -0600220 atomic_t cq_timeouts;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700221 } ____cacheline_aligned_in_smp;
222
Hristo Venev75b28af2019-08-26 17:23:46 +0000223 struct io_rings *rings;
224
Jens Axboe6b063142019-01-10 22:13:58 -0700225 /*
226 * If used, fixed file set. Writers must ensure that ->refs is dead,
227 * readers must ensure that ->refs is alive as long as the file* is
228 * used. Only updated through io_uring_register(2).
229 */
230 struct file **user_files;
231 unsigned nr_user_files;
232
Jens Axboeedafcce2019-01-09 09:16:05 -0700233 /* if used, fixed mapped user buffers */
234 unsigned nr_user_bufs;
235 struct io_mapped_ubuf *user_bufs;
236
Jens Axboe2b188cc2019-01-07 10:46:33 -0700237 struct user_struct *user;
238
239 struct completion ctx_done;
240
241 struct {
242 struct mutex uring_lock;
243 wait_queue_head_t wait;
244 } ____cacheline_aligned_in_smp;
245
246 struct {
247 spinlock_t completion_lock;
Jens Axboedef596e2019-01-09 08:59:42 -0700248 bool poll_multi_file;
249 /*
250 * ->poll_list is protected by the ctx->uring_lock for
251 * io_uring instances that don't use IORING_SETUP_SQPOLL.
252 * For SQPOLL, only the single threaded io_sq_thread() will
253 * manipulate the list, hence no extra locking is needed there.
254 */
255 struct list_head poll_list;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700256 struct list_head cancel_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700257 } ____cacheline_aligned_in_smp;
258
Jens Axboe31b51512019-01-18 22:56:34 -0700259 struct async_list pending_async[2];
260
Jens Axboe2b188cc2019-01-07 10:46:33 -0700261#if defined(CONFIG_UNIX)
262 struct socket *ring_sock;
263#endif
264};
265
266struct sqe_submit {
267 const struct io_uring_sqe *sqe;
268 unsigned short index;
Jackie Liu8776f3f2019-09-09 20:50:39 +0800269 u32 sequence;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700270 bool has_user;
Jens Axboedef596e2019-01-09 08:59:42 -0700271 bool needs_lock;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700272 bool needs_fixed_file;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700273};
274
Jens Axboe09bb8392019-03-13 12:39:28 -0600275/*
276 * First field must be the file pointer in all the
277 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
278 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700279struct io_poll_iocb {
280 struct file *file;
281 struct wait_queue_head *head;
282 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600283 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700284 bool canceled;
285 struct wait_queue_entry wait;
286};
287
Jens Axboe5262f562019-09-17 12:26:57 -0600288struct io_timeout {
289 struct file *file;
290 struct hrtimer timer;
291};
292
Jens Axboe09bb8392019-03-13 12:39:28 -0600293/*
294 * NOTE! Each of the iocb union members has the file pointer
295 * as the first entry in their struct definition. So you can
296 * access the file pointer through any of the sub-structs,
297 * or directly as just 'ki_filp' in this struct.
298 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700299struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700300 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600301 struct file *file;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700302 struct kiocb rw;
303 struct io_poll_iocb poll;
Jens Axboe5262f562019-09-17 12:26:57 -0600304 struct io_timeout timeout;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700305 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700306
307 struct sqe_submit submit;
308
309 struct io_ring_ctx *ctx;
310 struct list_head list;
Jens Axboe9e645e112019-05-10 16:07:28 -0600311 struct list_head link_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700312 unsigned int flags;
Jens Axboec16361c2019-01-17 08:39:48 -0700313 refcount_t refs;
Stefan Bühler8449eed2019-04-27 20:34:19 +0200314#define REQ_F_NOWAIT 1 /* must not punt to workers */
Jens Axboedef596e2019-01-09 08:59:42 -0700315#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
Jens Axboe6b063142019-01-10 22:13:58 -0700316#define REQ_F_FIXED_FILE 4 /* ctx owns file */
Jens Axboe31b51512019-01-18 22:56:34 -0700317#define REQ_F_SEQ_PREV 8 /* sequential with previous */
Stefan Bühlere2033e32019-05-11 19:08:01 +0200318#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
319#define REQ_F_IO_DRAINED 32 /* drain done */
Jens Axboe9e645e112019-05-10 16:07:28 -0600320#define REQ_F_LINK 64 /* linked sqes */
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +0800321#define REQ_F_LINK_DONE 128 /* linked sqes done */
322#define REQ_F_FAIL_LINK 256 /* fail rest of links */
Jackie Liu4fe2c962019-09-09 20:50:40 +0800323#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
Jens Axboe5262f562019-09-17 12:26:57 -0600324#define REQ_F_TIMEOUT 1024 /* timeout request */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700325 u64 user_data;
Jens Axboe9e645e112019-05-10 16:07:28 -0600326 u32 result;
Jens Axboede0617e2019-04-06 21:51:27 -0600327 u32 sequence;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700328
329 struct work_struct work;
330};
331
332#define IO_PLUG_THRESHOLD 2
Jens Axboedef596e2019-01-09 08:59:42 -0700333#define IO_IOPOLL_BATCH 8
Jens Axboe2b188cc2019-01-07 10:46:33 -0700334
Jens Axboe9a56a232019-01-09 09:06:50 -0700335struct io_submit_state {
336 struct blk_plug plug;
337
338 /*
Jens Axboe2579f912019-01-09 09:10:43 -0700339 * io_kiocb alloc cache
340 */
341 void *reqs[IO_IOPOLL_BATCH];
342 unsigned int free_reqs;
343 unsigned int cur_req;
344
345 /*
Jens Axboe9a56a232019-01-09 09:06:50 -0700346 * File reference cache
347 */
348 struct file *file;
349 unsigned int fd;
350 unsigned int has_refs;
351 unsigned int used_refs;
352 unsigned int ios_left;
353};
354
Jens Axboede0617e2019-04-06 21:51:27 -0600355static void io_sq_wq_submit_work(struct work_struct *work);
Jens Axboe5262f562019-09-17 12:26:57 -0600356static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
357 long res);
Jackie Liu4fe2c962019-09-09 20:50:40 +0800358static void __io_free_req(struct io_kiocb *req);
Jens Axboede0617e2019-04-06 21:51:27 -0600359
Jens Axboe2b188cc2019-01-07 10:46:33 -0700360static struct kmem_cache *req_cachep;
361
362static const struct file_operations io_uring_fops;
363
364struct sock *io_uring_get_socket(struct file *file)
365{
366#if defined(CONFIG_UNIX)
367 if (file->f_op == &io_uring_fops) {
368 struct io_ring_ctx *ctx = file->private_data;
369
370 return ctx->ring_sock->sk;
371 }
372#endif
373 return NULL;
374}
375EXPORT_SYMBOL(io_uring_get_socket);
376
377static void io_ring_ctx_ref_free(struct percpu_ref *ref)
378{
379 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
380
381 complete(&ctx->ctx_done);
382}
383
384static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
385{
386 struct io_ring_ctx *ctx;
Jens Axboe31b51512019-01-18 22:56:34 -0700387 int i;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700388
389 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
390 if (!ctx)
391 return NULL;
392
Roman Gushchin21482892019-05-07 10:01:48 -0700393 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
394 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700395 kfree(ctx);
396 return NULL;
397 }
398
399 ctx->flags = p->flags;
400 init_waitqueue_head(&ctx->cq_wait);
401 init_completion(&ctx->ctx_done);
Jackie Liua4c0b3d2019-07-08 13:41:12 +0800402 init_completion(&ctx->sqo_thread_started);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700403 mutex_init(&ctx->uring_lock);
404 init_waitqueue_head(&ctx->wait);
Jens Axboe31b51512019-01-18 22:56:34 -0700405 for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) {
406 spin_lock_init(&ctx->pending_async[i].lock);
407 INIT_LIST_HEAD(&ctx->pending_async[i].list);
408 atomic_set(&ctx->pending_async[i].cnt, 0);
409 }
Jens Axboe2b188cc2019-01-07 10:46:33 -0700410 spin_lock_init(&ctx->completion_lock);
Jens Axboedef596e2019-01-09 08:59:42 -0700411 INIT_LIST_HEAD(&ctx->poll_list);
Jens Axboe221c5eb2019-01-17 09:41:58 -0700412 INIT_LIST_HEAD(&ctx->cancel_list);
Jens Axboede0617e2019-04-06 21:51:27 -0600413 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -0600414 INIT_LIST_HEAD(&ctx->timeout_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700415 return ctx;
416}
417
Jens Axboede0617e2019-04-06 21:51:27 -0600418static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
419 struct io_kiocb *req)
420{
Jens Axboe5262f562019-09-17 12:26:57 -0600421 /* timeout requests always honor sequence */
422 if (!(req->flags & REQ_F_TIMEOUT) &&
423 (req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
Jens Axboede0617e2019-04-06 21:51:27 -0600424 return false;
425
Hristo Venev75b28af2019-08-26 17:23:46 +0000426 return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped;
Jens Axboede0617e2019-04-06 21:51:27 -0600427}
428
Jens Axboe5262f562019-09-17 12:26:57 -0600429static struct io_kiocb *__io_get_deferred_req(struct io_ring_ctx *ctx,
430 struct list_head *list)
Jens Axboede0617e2019-04-06 21:51:27 -0600431{
432 struct io_kiocb *req;
433
Jens Axboe5262f562019-09-17 12:26:57 -0600434 if (list_empty(list))
Jens Axboede0617e2019-04-06 21:51:27 -0600435 return NULL;
436
Jens Axboe5262f562019-09-17 12:26:57 -0600437 req = list_first_entry(list, struct io_kiocb, list);
Jens Axboede0617e2019-04-06 21:51:27 -0600438 if (!io_sequence_defer(ctx, req)) {
439 list_del_init(&req->list);
440 return req;
441 }
442
443 return NULL;
444}
445
Jens Axboe5262f562019-09-17 12:26:57 -0600446static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
447{
448 return __io_get_deferred_req(ctx, &ctx->defer_list);
449}
450
451static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
452{
453 return __io_get_deferred_req(ctx, &ctx->timeout_list);
454}
455
Jens Axboede0617e2019-04-06 21:51:27 -0600456static void __io_commit_cqring(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700457{
Hristo Venev75b28af2019-08-26 17:23:46 +0000458 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700459
Hristo Venev75b28af2019-08-26 17:23:46 +0000460 if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700461 /* order cqe stores with ring update */
Hristo Venev75b28af2019-08-26 17:23:46 +0000462 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700463
Jens Axboe2b188cc2019-01-07 10:46:33 -0700464 if (wq_has_sleeper(&ctx->cq_wait)) {
465 wake_up_interruptible(&ctx->cq_wait);
466 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
467 }
468 }
469}
470
Jens Axboe18d9be12019-09-10 09:13:05 -0600471static inline void io_queue_async_work(struct io_ring_ctx *ctx,
472 struct io_kiocb *req)
473{
Jens Axboe6cc47d12019-09-18 11:18:23 -0600474 int rw = 0;
Jens Axboe54a91f32019-09-10 09:15:04 -0600475
Jens Axboe6cc47d12019-09-18 11:18:23 -0600476 if (req->submit.sqe) {
477 switch (req->submit.sqe->opcode) {
478 case IORING_OP_WRITEV:
479 case IORING_OP_WRITE_FIXED:
480 rw = !(req->rw.ki_flags & IOCB_DIRECT);
481 break;
482 }
Jens Axboe54a91f32019-09-10 09:15:04 -0600483 }
484
485 queue_work(ctx->sqo_wq[rw], &req->work);
Jens Axboe18d9be12019-09-10 09:13:05 -0600486}
487
Jens Axboe5262f562019-09-17 12:26:57 -0600488static void io_kill_timeout(struct io_kiocb *req)
489{
490 int ret;
491
492 ret = hrtimer_try_to_cancel(&req->timeout.timer);
493 if (ret != -1) {
494 atomic_inc(&req->ctx->cq_timeouts);
495 list_del(&req->list);
496 io_cqring_fill_event(req->ctx, req->user_data, 0);
497 __io_free_req(req);
498 }
499}
500
501static void io_kill_timeouts(struct io_ring_ctx *ctx)
502{
503 struct io_kiocb *req, *tmp;
504
505 spin_lock_irq(&ctx->completion_lock);
506 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
507 io_kill_timeout(req);
508 spin_unlock_irq(&ctx->completion_lock);
509}
510
Jens Axboede0617e2019-04-06 21:51:27 -0600511static void io_commit_cqring(struct io_ring_ctx *ctx)
512{
513 struct io_kiocb *req;
514
Jens Axboe5262f562019-09-17 12:26:57 -0600515 while ((req = io_get_timeout_req(ctx)) != NULL)
516 io_kill_timeout(req);
517
Jens Axboede0617e2019-04-06 21:51:27 -0600518 __io_commit_cqring(ctx);
519
520 while ((req = io_get_deferred_req(ctx)) != NULL) {
Jackie Liu4fe2c962019-09-09 20:50:40 +0800521 if (req->flags & REQ_F_SHADOW_DRAIN) {
522 /* Just for drain, free it. */
523 __io_free_req(req);
524 continue;
525 }
Jens Axboede0617e2019-04-06 21:51:27 -0600526 req->flags |= REQ_F_IO_DRAINED;
Jens Axboe18d9be12019-09-10 09:13:05 -0600527 io_queue_async_work(ctx, req);
Jens Axboede0617e2019-04-06 21:51:27 -0600528 }
529}
530
Jens Axboe2b188cc2019-01-07 10:46:33 -0700531static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
532{
Hristo Venev75b28af2019-08-26 17:23:46 +0000533 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700534 unsigned tail;
535
536 tail = ctx->cached_cq_tail;
Stefan Bühler115e12e2019-04-24 23:54:18 +0200537 /*
538 * writes to the cq entry need to come after reading head; the
539 * control dependency is enough as we're using WRITE_ONCE to
540 * fill the cq entry
541 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000542 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700543 return NULL;
544
545 ctx->cached_cq_tail++;
Hristo Venev75b28af2019-08-26 17:23:46 +0000546 return &rings->cqes[tail & ctx->cq_mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -0700547}
548
549static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
Jens Axboec71ffb62019-05-13 20:58:29 -0600550 long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700551{
552 struct io_uring_cqe *cqe;
553
554 /*
555 * If we can't get a cq entry, userspace overflowed the
556 * submission (by quite a lot). Increment the overflow count in
557 * the ring.
558 */
559 cqe = io_get_cqring(ctx);
560 if (cqe) {
561 WRITE_ONCE(cqe->user_data, ki_user_data);
562 WRITE_ONCE(cqe->res, res);
Jens Axboec71ffb62019-05-13 20:58:29 -0600563 WRITE_ONCE(cqe->flags, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700564 } else {
Hristo Venev75b28af2019-08-26 17:23:46 +0000565 unsigned overflow = READ_ONCE(ctx->rings->cq_overflow);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700566
Hristo Venev75b28af2019-08-26 17:23:46 +0000567 WRITE_ONCE(ctx->rings->cq_overflow, overflow + 1);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700568 }
569}
570
Jens Axboe8c838782019-03-12 15:48:16 -0600571static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
572{
573 if (waitqueue_active(&ctx->wait))
574 wake_up(&ctx->wait);
575 if (waitqueue_active(&ctx->sqo_wait))
576 wake_up(&ctx->sqo_wait);
Jens Axboe9b402842019-04-11 11:45:41 -0600577 if (ctx->cq_ev_fd)
578 eventfd_signal(ctx->cq_ev_fd, 1);
Jens Axboe8c838782019-03-12 15:48:16 -0600579}
580
581static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
Jens Axboec71ffb62019-05-13 20:58:29 -0600582 long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700583{
584 unsigned long flags;
585
586 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboec71ffb62019-05-13 20:58:29 -0600587 io_cqring_fill_event(ctx, user_data, res);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700588 io_commit_cqring(ctx);
589 spin_unlock_irqrestore(&ctx->completion_lock, flags);
590
Jens Axboe8c838782019-03-12 15:48:16 -0600591 io_cqring_ev_posted(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700592}
593
594static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
595{
596 percpu_ref_put_many(&ctx->refs, refs);
597
598 if (waitqueue_active(&ctx->wait))
599 wake_up(&ctx->wait);
600}
601
Jens Axboe2579f912019-01-09 09:10:43 -0700602static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
603 struct io_submit_state *state)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700604{
Jens Axboefd6fab22019-03-14 16:30:06 -0600605 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700606 struct io_kiocb *req;
607
608 if (!percpu_ref_tryget(&ctx->refs))
609 return NULL;
610
Jens Axboe2579f912019-01-09 09:10:43 -0700611 if (!state) {
Jens Axboefd6fab22019-03-14 16:30:06 -0600612 req = kmem_cache_alloc(req_cachep, gfp);
Jens Axboe2579f912019-01-09 09:10:43 -0700613 if (unlikely(!req))
614 goto out;
615 } else if (!state->free_reqs) {
616 size_t sz;
617 int ret;
618
619 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
Jens Axboefd6fab22019-03-14 16:30:06 -0600620 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
621
622 /*
623 * Bulk alloc is all-or-nothing. If we fail to get a batch,
624 * retry single alloc to be on the safe side.
625 */
626 if (unlikely(ret <= 0)) {
627 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
628 if (!state->reqs[0])
629 goto out;
630 ret = 1;
631 }
Jens Axboe2579f912019-01-09 09:10:43 -0700632 state->free_reqs = ret - 1;
633 state->cur_req = 1;
634 req = state->reqs[0];
635 } else {
636 req = state->reqs[state->cur_req];
637 state->free_reqs--;
638 state->cur_req++;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700639 }
640
Jens Axboe60c112b2019-06-21 10:20:18 -0600641 req->file = NULL;
Jens Axboe2579f912019-01-09 09:10:43 -0700642 req->ctx = ctx;
643 req->flags = 0;
Jens Axboee65ef562019-03-12 10:16:44 -0600644 /* one is dropped after submission, the other at completion */
645 refcount_set(&req->refs, 2);
Jens Axboe9e645e112019-05-10 16:07:28 -0600646 req->result = 0;
Jens Axboe2579f912019-01-09 09:10:43 -0700647 return req;
648out:
Jens Axboe2b188cc2019-01-07 10:46:33 -0700649 io_ring_drop_ctx_refs(ctx, 1);
650 return NULL;
651}
652
Jens Axboedef596e2019-01-09 08:59:42 -0700653static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
654{
655 if (*nr) {
656 kmem_cache_free_bulk(req_cachep, *nr, reqs);
657 io_ring_drop_ctx_refs(ctx, *nr);
658 *nr = 0;
659 }
660}
661
Jens Axboe9e645e112019-05-10 16:07:28 -0600662static void __io_free_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700663{
Jens Axboe09bb8392019-03-13 12:39:28 -0600664 if (req->file && !(req->flags & REQ_F_FIXED_FILE))
665 fput(req->file);
Jens Axboee65ef562019-03-12 10:16:44 -0600666 io_ring_drop_ctx_refs(req->ctx, 1);
667 kmem_cache_free(req_cachep, req);
668}
669
Jens Axboe9e645e112019-05-10 16:07:28 -0600670static void io_req_link_next(struct io_kiocb *req)
671{
672 struct io_kiocb *nxt;
673
674 /*
675 * The list should never be empty when we are called here. But could
676 * potentially happen if the chain is messed up, check to be on the
677 * safe side.
678 */
679 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
680 if (nxt) {
681 list_del(&nxt->list);
682 if (!list_empty(&req->link_list)) {
683 INIT_LIST_HEAD(&nxt->link_list);
684 list_splice(&req->link_list, &nxt->link_list);
685 nxt->flags |= REQ_F_LINK;
686 }
687
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +0800688 nxt->flags |= REQ_F_LINK_DONE;
Jens Axboe9e645e112019-05-10 16:07:28 -0600689 INIT_WORK(&nxt->work, io_sq_wq_submit_work);
Jens Axboe18d9be12019-09-10 09:13:05 -0600690 io_queue_async_work(req->ctx, nxt);
Jens Axboe9e645e112019-05-10 16:07:28 -0600691 }
692}
693
694/*
695 * Called if REQ_F_LINK is set, and we fail the head request
696 */
697static void io_fail_links(struct io_kiocb *req)
698{
699 struct io_kiocb *link;
700
701 while (!list_empty(&req->link_list)) {
702 link = list_first_entry(&req->link_list, struct io_kiocb, list);
703 list_del(&link->list);
704
705 io_cqring_add_event(req->ctx, link->user_data, -ECANCELED);
706 __io_free_req(link);
707 }
708}
709
710static void io_free_req(struct io_kiocb *req)
711{
712 /*
713 * If LINK is set, we have dependent requests in this chain. If we
714 * didn't fail this request, queue the first one up, moving any other
715 * dependencies to the next request. In case of failure, fail the rest
716 * of the chain.
717 */
718 if (req->flags & REQ_F_LINK) {
719 if (req->flags & REQ_F_FAIL_LINK)
720 io_fail_links(req);
721 else
722 io_req_link_next(req);
723 }
724
725 __io_free_req(req);
726}
727
Jens Axboee65ef562019-03-12 10:16:44 -0600728static void io_put_req(struct io_kiocb *req)
729{
730 if (refcount_dec_and_test(&req->refs))
731 io_free_req(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700732}
733
Hristo Venev75b28af2019-08-26 17:23:46 +0000734static unsigned io_cqring_events(struct io_rings *rings)
Jens Axboea3a0e432019-08-20 11:03:11 -0600735{
736 /* See comment at the top of this file */
737 smp_rmb();
Hristo Venev75b28af2019-08-26 17:23:46 +0000738 return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
Jens Axboea3a0e432019-08-20 11:03:11 -0600739}
740
Jens Axboedef596e2019-01-09 08:59:42 -0700741/*
742 * Find and free completed poll iocbs
743 */
744static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
745 struct list_head *done)
746{
747 void *reqs[IO_IOPOLL_BATCH];
748 struct io_kiocb *req;
Jens Axboe09bb8392019-03-13 12:39:28 -0600749 int to_free;
Jens Axboedef596e2019-01-09 08:59:42 -0700750
Jens Axboe09bb8392019-03-13 12:39:28 -0600751 to_free = 0;
Jens Axboedef596e2019-01-09 08:59:42 -0700752 while (!list_empty(done)) {
753 req = list_first_entry(done, struct io_kiocb, list);
754 list_del(&req->list);
755
Jens Axboe9e645e112019-05-10 16:07:28 -0600756 io_cqring_fill_event(ctx, req->user_data, req->result);
Jens Axboedef596e2019-01-09 08:59:42 -0700757 (*nr_events)++;
758
Jens Axboe09bb8392019-03-13 12:39:28 -0600759 if (refcount_dec_and_test(&req->refs)) {
760 /* If we're not using fixed files, we have to pair the
761 * completion part with the file put. Use regular
762 * completions for those, only batch free for fixed
Jens Axboe9e645e112019-05-10 16:07:28 -0600763 * file and non-linked commands.
Jens Axboe09bb8392019-03-13 12:39:28 -0600764 */
Jens Axboe9e645e112019-05-10 16:07:28 -0600765 if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
766 REQ_F_FIXED_FILE) {
Jens Axboe09bb8392019-03-13 12:39:28 -0600767 reqs[to_free++] = req;
768 if (to_free == ARRAY_SIZE(reqs))
769 io_free_req_many(ctx, reqs, &to_free);
Jens Axboe6b063142019-01-10 22:13:58 -0700770 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -0600771 io_free_req(req);
Jens Axboe6b063142019-01-10 22:13:58 -0700772 }
Jens Axboe9a56a232019-01-09 09:06:50 -0700773 }
Jens Axboedef596e2019-01-09 08:59:42 -0700774 }
Jens Axboedef596e2019-01-09 08:59:42 -0700775
Jens Axboe09bb8392019-03-13 12:39:28 -0600776 io_commit_cqring(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -0700777 io_free_req_many(ctx, reqs, &to_free);
778}
779
780static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
781 long min)
782{
783 struct io_kiocb *req, *tmp;
784 LIST_HEAD(done);
785 bool spin;
786 int ret;
787
788 /*
789 * Only spin for completions if we don't have multiple devices hanging
790 * off our complete list, and we're under the requested amount.
791 */
792 spin = !ctx->poll_multi_file && *nr_events < min;
793
794 ret = 0;
795 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
796 struct kiocb *kiocb = &req->rw;
797
798 /*
799 * Move completed entries to our local list. If we find a
800 * request that requires polling, break out and complete
801 * the done list first, if we have entries there.
802 */
803 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
804 list_move_tail(&req->list, &done);
805 continue;
806 }
807 if (!list_empty(&done))
808 break;
809
810 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
811 if (ret < 0)
812 break;
813
814 if (ret && spin)
815 spin = false;
816 ret = 0;
817 }
818
819 if (!list_empty(&done))
820 io_iopoll_complete(ctx, nr_events, &done);
821
822 return ret;
823}
824
825/*
826 * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a
827 * non-spinning poll check - we'll still enter the driver poll loop, but only
828 * as a non-spinning completion check.
829 */
830static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
831 long min)
832{
Jens Axboe08f54392019-08-21 22:19:11 -0600833 while (!list_empty(&ctx->poll_list) && !need_resched()) {
Jens Axboedef596e2019-01-09 08:59:42 -0700834 int ret;
835
836 ret = io_do_iopoll(ctx, nr_events, min);
837 if (ret < 0)
838 return ret;
839 if (!min || *nr_events >= min)
840 return 0;
841 }
842
843 return 1;
844}
845
846/*
847 * We can't just wait for polled events to come to us, we have to actively
848 * find and complete them.
849 */
850static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
851{
852 if (!(ctx->flags & IORING_SETUP_IOPOLL))
853 return;
854
855 mutex_lock(&ctx->uring_lock);
856 while (!list_empty(&ctx->poll_list)) {
857 unsigned int nr_events = 0;
858
859 io_iopoll_getevents(ctx, &nr_events, 1);
Jens Axboe08f54392019-08-21 22:19:11 -0600860
861 /*
862 * Ensure we allow local-to-the-cpu processing to take place,
863 * in this case we need to ensure that we reap all events.
864 */
865 cond_resched();
Jens Axboedef596e2019-01-09 08:59:42 -0700866 }
867 mutex_unlock(&ctx->uring_lock);
868}
869
870static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
871 long min)
872{
Jens Axboe500f9fb2019-08-19 12:15:59 -0600873 int iters, ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -0700874
Jens Axboe500f9fb2019-08-19 12:15:59 -0600875 /*
876 * We disallow the app entering submit/complete with polling, but we
877 * still need to lock the ring to prevent racing with polled issue
878 * that got punted to a workqueue.
879 */
880 mutex_lock(&ctx->uring_lock);
881
882 iters = 0;
Jens Axboedef596e2019-01-09 08:59:42 -0700883 do {
884 int tmin = 0;
885
Jens Axboe500f9fb2019-08-19 12:15:59 -0600886 /*
Jens Axboea3a0e432019-08-20 11:03:11 -0600887 * Don't enter poll loop if we already have events pending.
888 * If we do, we can potentially be spinning for commands that
889 * already triggered a CQE (eg in error).
890 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000891 if (io_cqring_events(ctx->rings))
Jens Axboea3a0e432019-08-20 11:03:11 -0600892 break;
893
894 /*
Jens Axboe500f9fb2019-08-19 12:15:59 -0600895 * If a submit got punted to a workqueue, we can have the
896 * application entering polling for a command before it gets
897 * issued. That app will hold the uring_lock for the duration
898 * of the poll right here, so we need to take a breather every
899 * now and then to ensure that the issue has a chance to add
900 * the poll to the issued list. Otherwise we can spin here
901 * forever, while the workqueue is stuck trying to acquire the
902 * very same mutex.
903 */
904 if (!(++iters & 7)) {
905 mutex_unlock(&ctx->uring_lock);
906 mutex_lock(&ctx->uring_lock);
907 }
908
Jens Axboedef596e2019-01-09 08:59:42 -0700909 if (*nr_events < min)
910 tmin = min - *nr_events;
911
912 ret = io_iopoll_getevents(ctx, nr_events, tmin);
913 if (ret <= 0)
914 break;
915 ret = 0;
916 } while (min && !*nr_events && !need_resched());
917
Jens Axboe500f9fb2019-08-19 12:15:59 -0600918 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -0700919 return ret;
920}
921
Jens Axboe2b188cc2019-01-07 10:46:33 -0700922static void kiocb_end_write(struct kiocb *kiocb)
923{
924 if (kiocb->ki_flags & IOCB_WRITE) {
925 struct inode *inode = file_inode(kiocb->ki_filp);
926
927 /*
928 * Tell lockdep we inherited freeze protection from submission
929 * thread.
930 */
931 if (S_ISREG(inode->i_mode))
932 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
933 file_end_write(kiocb->ki_filp);
934 }
935}
936
937static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
938{
939 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
940
941 kiocb_end_write(kiocb);
942
Jens Axboe9e645e112019-05-10 16:07:28 -0600943 if ((req->flags & REQ_F_LINK) && res != req->result)
944 req->flags |= REQ_F_FAIL_LINK;
Jens Axboec71ffb62019-05-13 20:58:29 -0600945 io_cqring_add_event(req->ctx, req->user_data, res);
Jens Axboee65ef562019-03-12 10:16:44 -0600946 io_put_req(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700947}
948
Jens Axboedef596e2019-01-09 08:59:42 -0700949static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
950{
951 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
952
953 kiocb_end_write(kiocb);
954
Jens Axboe9e645e112019-05-10 16:07:28 -0600955 if ((req->flags & REQ_F_LINK) && res != req->result)
956 req->flags |= REQ_F_FAIL_LINK;
957 req->result = res;
Jens Axboedef596e2019-01-09 08:59:42 -0700958 if (res != -EAGAIN)
959 req->flags |= REQ_F_IOPOLL_COMPLETED;
960}
961
962/*
963 * After the iocb has been issued, it's safe to be found on the poll list.
964 * Adding the kiocb to the list AFTER submission ensures that we don't
965 * find it from a io_iopoll_getevents() thread before the issuer is done
966 * accessing the kiocb cookie.
967 */
968static void io_iopoll_req_issued(struct io_kiocb *req)
969{
970 struct io_ring_ctx *ctx = req->ctx;
971
972 /*
973 * Track whether we have multiple files in our lists. This will impact
974 * how we do polling eventually, not spinning if we're on potentially
975 * different devices.
976 */
977 if (list_empty(&ctx->poll_list)) {
978 ctx->poll_multi_file = false;
979 } else if (!ctx->poll_multi_file) {
980 struct io_kiocb *list_req;
981
982 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
983 list);
984 if (list_req->rw.ki_filp != req->rw.ki_filp)
985 ctx->poll_multi_file = true;
986 }
987
988 /*
989 * For fast devices, IO may have already completed. If it has, add
990 * it to the front so we find it first.
991 */
992 if (req->flags & REQ_F_IOPOLL_COMPLETED)
993 list_add(&req->list, &ctx->poll_list);
994 else
995 list_add_tail(&req->list, &ctx->poll_list);
996}
997
Jens Axboe3d6770f2019-04-13 11:50:54 -0600998static void io_file_put(struct io_submit_state *state)
Jens Axboe9a56a232019-01-09 09:06:50 -0700999{
Jens Axboe3d6770f2019-04-13 11:50:54 -06001000 if (state->file) {
Jens Axboe9a56a232019-01-09 09:06:50 -07001001 int diff = state->has_refs - state->used_refs;
1002
1003 if (diff)
1004 fput_many(state->file, diff);
1005 state->file = NULL;
1006 }
1007}
1008
1009/*
1010 * Get as many references to a file as we have IOs left in this submission,
1011 * assuming most submissions are for one file, or at least that each file
1012 * has more than one submission.
1013 */
1014static struct file *io_file_get(struct io_submit_state *state, int fd)
1015{
1016 if (!state)
1017 return fget(fd);
1018
1019 if (state->file) {
1020 if (state->fd == fd) {
1021 state->used_refs++;
1022 state->ios_left--;
1023 return state->file;
1024 }
Jens Axboe3d6770f2019-04-13 11:50:54 -06001025 io_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07001026 }
1027 state->file = fget_many(fd, state->ios_left);
1028 if (!state->file)
1029 return NULL;
1030
1031 state->fd = fd;
1032 state->has_refs = state->ios_left;
1033 state->used_refs = 1;
1034 state->ios_left--;
1035 return state->file;
1036}
1037
Jens Axboe2b188cc2019-01-07 10:46:33 -07001038/*
1039 * If we tracked the file through the SCM inflight mechanism, we could support
1040 * any file. For now, just ensure that anything potentially problematic is done
1041 * inline.
1042 */
1043static bool io_file_supports_async(struct file *file)
1044{
1045 umode_t mode = file_inode(file)->i_mode;
1046
1047 if (S_ISBLK(mode) || S_ISCHR(mode))
1048 return true;
1049 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
1050 return true;
1051
1052 return false;
1053}
1054
Jens Axboe6c271ce2019-01-10 11:22:30 -07001055static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
Jens Axboe8358e3a2019-04-23 08:17:58 -06001056 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001057{
Jens Axboe6c271ce2019-01-10 11:22:30 -07001058 const struct io_uring_sqe *sqe = s->sqe;
Jens Axboedef596e2019-01-09 08:59:42 -07001059 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001060 struct kiocb *kiocb = &req->rw;
Jens Axboe09bb8392019-03-13 12:39:28 -06001061 unsigned ioprio;
1062 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001063
Jens Axboe09bb8392019-03-13 12:39:28 -06001064 if (!req->file)
1065 return -EBADF;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001066
Jens Axboe09bb8392019-03-13 12:39:28 -06001067 if (force_nonblock && !io_file_supports_async(req->file))
1068 force_nonblock = false;
Jens Axboe6b063142019-01-10 22:13:58 -07001069
Jens Axboe2b188cc2019-01-07 10:46:33 -07001070 kiocb->ki_pos = READ_ONCE(sqe->off);
1071 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
1072 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
1073
1074 ioprio = READ_ONCE(sqe->ioprio);
1075 if (ioprio) {
1076 ret = ioprio_check_cap(ioprio);
1077 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06001078 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001079
1080 kiocb->ki_ioprio = ioprio;
1081 } else
1082 kiocb->ki_ioprio = get_current_ioprio();
1083
1084 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
1085 if (unlikely(ret))
Jens Axboe09bb8392019-03-13 12:39:28 -06001086 return ret;
Stefan Bühler8449eed2019-04-27 20:34:19 +02001087
1088 /* don't allow async punt if RWF_NOWAIT was requested */
1089 if (kiocb->ki_flags & IOCB_NOWAIT)
1090 req->flags |= REQ_F_NOWAIT;
1091
1092 if (force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001093 kiocb->ki_flags |= IOCB_NOWAIT;
Stefan Bühler8449eed2019-04-27 20:34:19 +02001094
Jens Axboedef596e2019-01-09 08:59:42 -07001095 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07001096 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
1097 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06001098 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001099
Jens Axboedef596e2019-01-09 08:59:42 -07001100 kiocb->ki_flags |= IOCB_HIPRI;
1101 kiocb->ki_complete = io_complete_rw_iopoll;
1102 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06001103 if (kiocb->ki_flags & IOCB_HIPRI)
1104 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07001105 kiocb->ki_complete = io_complete_rw;
1106 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001107 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001108}
1109
1110static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
1111{
1112 switch (ret) {
1113 case -EIOCBQUEUED:
1114 break;
1115 case -ERESTARTSYS:
1116 case -ERESTARTNOINTR:
1117 case -ERESTARTNOHAND:
1118 case -ERESTART_RESTARTBLOCK:
1119 /*
1120 * We can't just restart the syscall, since previously
1121 * submitted sqes may already be in progress. Just fail this
1122 * IO with EINTR.
1123 */
1124 ret = -EINTR;
1125 /* fall through */
1126 default:
1127 kiocb->ki_complete(kiocb, ret, 0);
1128 }
1129}
1130
Jens Axboeedafcce2019-01-09 09:16:05 -07001131static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
1132 const struct io_uring_sqe *sqe,
1133 struct iov_iter *iter)
1134{
1135 size_t len = READ_ONCE(sqe->len);
1136 struct io_mapped_ubuf *imu;
1137 unsigned index, buf_index;
1138 size_t offset;
1139 u64 buf_addr;
1140
1141 /* attempt to use fixed buffers without having provided iovecs */
1142 if (unlikely(!ctx->user_bufs))
1143 return -EFAULT;
1144
1145 buf_index = READ_ONCE(sqe->buf_index);
1146 if (unlikely(buf_index >= ctx->nr_user_bufs))
1147 return -EFAULT;
1148
1149 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
1150 imu = &ctx->user_bufs[index];
1151 buf_addr = READ_ONCE(sqe->addr);
1152
1153 /* overflow */
1154 if (buf_addr + len < buf_addr)
1155 return -EFAULT;
1156 /* not inside the mapped region */
1157 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
1158 return -EFAULT;
1159
1160 /*
1161 * May not be a start of buffer, set size appropriately
1162 * and advance us to the beginning.
1163 */
1164 offset = buf_addr - imu->ubuf;
1165 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06001166
1167 if (offset) {
1168 /*
1169 * Don't use iov_iter_advance() here, as it's really slow for
1170 * using the latter parts of a big fixed buffer - it iterates
1171 * over each segment manually. We can cheat a bit here, because
1172 * we know that:
1173 *
1174 * 1) it's a BVEC iter, we set it up
1175 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1176 * first and last bvec
1177 *
1178 * So just find our index, and adjust the iterator afterwards.
1179 * If the offset is within the first bvec (or the whole first
1180 * bvec, just use iov_iter_advance(). This makes it easier
1181 * since we can just skip the first segment, which may not
1182 * be PAGE_SIZE aligned.
1183 */
1184 const struct bio_vec *bvec = imu->bvec;
1185
1186 if (offset <= bvec->bv_len) {
1187 iov_iter_advance(iter, offset);
1188 } else {
1189 unsigned long seg_skip;
1190
1191 /* skip first vec */
1192 offset -= bvec->bv_len;
1193 seg_skip = 1 + (offset >> PAGE_SHIFT);
1194
1195 iter->bvec = bvec + seg_skip;
1196 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02001197 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06001198 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06001199 }
1200 }
1201
Jens Axboeedafcce2019-01-09 09:16:05 -07001202 return 0;
1203}
1204
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001205static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
1206 const struct sqe_submit *s, struct iovec **iovec,
1207 struct iov_iter *iter)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001208{
1209 const struct io_uring_sqe *sqe = s->sqe;
1210 void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1211 size_t sqe_len = READ_ONCE(sqe->len);
Jens Axboeedafcce2019-01-09 09:16:05 -07001212 u8 opcode;
1213
1214 /*
1215 * We're reading ->opcode for the second time, but the first read
1216 * doesn't care whether it's _FIXED or not, so it doesn't matter
1217 * whether ->opcode changes concurrently. The first read does care
1218 * about whether it is a READ or a WRITE, so we don't trust this read
1219 * for that purpose and instead let the caller pass in the read/write
1220 * flag.
1221 */
1222 opcode = READ_ONCE(sqe->opcode);
1223 if (opcode == IORING_OP_READ_FIXED ||
1224 opcode == IORING_OP_WRITE_FIXED) {
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001225 ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07001226 *iovec = NULL;
1227 return ret;
1228 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001229
1230 if (!s->has_user)
1231 return -EFAULT;
1232
1233#ifdef CONFIG_COMPAT
1234 if (ctx->compat)
1235 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
1236 iovec, iter);
1237#endif
1238
1239 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
1240}
1241
Jens Axboe6d5d5ac2019-09-11 10:16:13 -06001242static inline bool io_should_merge(struct async_list *al, struct kiocb *kiocb)
1243{
1244 if (al->file == kiocb->ki_filp) {
1245 off_t start, end;
1246
1247 /*
1248 * Allow merging if we're anywhere in the range of the same
1249 * page. Generally this happens for sub-page reads or writes,
1250 * and it's beneficial to allow the first worker to bring the
1251 * page in and the piggy backed work can then work on the
1252 * cached page.
1253 */
1254 start = al->io_start & PAGE_MASK;
1255 end = (al->io_start + al->io_len + PAGE_SIZE - 1) & PAGE_MASK;
1256 if (kiocb->ki_pos >= start && kiocb->ki_pos <= end)
1257 return true;
1258 }
1259
1260 al->file = NULL;
1261 return false;
1262}
1263
Jens Axboe31b51512019-01-18 22:56:34 -07001264/*
1265 * Make a note of the last file/offset/direction we punted to async
1266 * context. We'll use this information to see if we can piggy back a
1267 * sequential request onto the previous one, if it's still hasn't been
1268 * completed by the async worker.
1269 */
1270static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
1271{
1272 struct async_list *async_list = &req->ctx->pending_async[rw];
1273 struct kiocb *kiocb = &req->rw;
1274 struct file *filp = kiocb->ki_filp;
Jens Axboe31b51512019-01-18 22:56:34 -07001275
Jens Axboe6d5d5ac2019-09-11 10:16:13 -06001276 if (io_should_merge(async_list, kiocb)) {
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +08001277 unsigned long max_bytes;
Jens Axboe31b51512019-01-18 22:56:34 -07001278
1279 /* Use 8x RA size as a decent limiter for both reads/writes */
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +08001280 max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3);
1281 if (!max_bytes)
1282 max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3);
Jens Axboe31b51512019-01-18 22:56:34 -07001283
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +08001284 /* If max len are exceeded, reset the state */
1285 if (async_list->io_len + len <= max_bytes) {
Jens Axboe31b51512019-01-18 22:56:34 -07001286 req->flags |= REQ_F_SEQ_PREV;
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +08001287 async_list->io_len += len;
Jens Axboe31b51512019-01-18 22:56:34 -07001288 } else {
Jens Axboe6d5d5ac2019-09-11 10:16:13 -06001289 async_list->file = NULL;
Jens Axboe31b51512019-01-18 22:56:34 -07001290 }
1291 }
1292
1293 /* New file? Reset state. */
1294 if (async_list->file != filp) {
Jens Axboe6d5d5ac2019-09-11 10:16:13 -06001295 async_list->io_start = kiocb->ki_pos;
1296 async_list->io_len = len;
Jens Axboe31b51512019-01-18 22:56:34 -07001297 async_list->file = filp;
1298 }
Jens Axboe31b51512019-01-18 22:56:34 -07001299}
1300
Jens Axboee0c5c572019-03-12 10:18:47 -06001301static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
Jens Axboe8358e3a2019-04-23 08:17:58 -06001302 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001303{
1304 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1305 struct kiocb *kiocb = &req->rw;
1306 struct iov_iter iter;
1307 struct file *file;
Jens Axboe31b51512019-01-18 22:56:34 -07001308 size_t iov_count;
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001309 ssize_t read_size, ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001310
Jens Axboe8358e3a2019-04-23 08:17:58 -06001311 ret = io_prep_rw(req, s, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001312 if (ret)
1313 return ret;
1314 file = kiocb->ki_filp;
1315
Jens Axboe2b188cc2019-01-07 10:46:33 -07001316 if (unlikely(!(file->f_mode & FMODE_READ)))
Jens Axboe09bb8392019-03-13 12:39:28 -06001317 return -EBADF;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001318 if (unlikely(!file->f_op->read_iter))
Jens Axboe09bb8392019-03-13 12:39:28 -06001319 return -EINVAL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001320
1321 ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001322 if (ret < 0)
Jens Axboe09bb8392019-03-13 12:39:28 -06001323 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001324
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001325 read_size = ret;
Jens Axboe9e645e112019-05-10 16:07:28 -06001326 if (req->flags & REQ_F_LINK)
1327 req->result = read_size;
1328
Jens Axboe31b51512019-01-18 22:56:34 -07001329 iov_count = iov_iter_count(&iter);
1330 ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001331 if (!ret) {
1332 ssize_t ret2;
1333
Jens Axboe2b188cc2019-01-07 10:46:33 -07001334 ret2 = call_read_iter(file, kiocb, &iter);
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001335 /*
1336 * In case of a short read, punt to async. This can happen
1337 * if we have data partially cached. Alternatively we can
1338 * return the short read, in which case the application will
1339 * need to issue another SQE and wait for it. That SQE will
1340 * need async punt anyway, so it's more efficient to do it
1341 * here.
1342 */
1343 if (force_nonblock && ret2 > 0 && ret2 < read_size)
1344 ret2 = -EAGAIN;
1345 /* Catch -EAGAIN return for forced non-blocking submission */
Jens Axboe31b51512019-01-18 22:56:34 -07001346 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07001347 io_rw_done(kiocb, ret2);
Jens Axboe31b51512019-01-18 22:56:34 -07001348 } else {
1349 /*
1350 * If ->needs_lock is true, we're already in async
1351 * context.
1352 */
1353 if (!s->needs_lock)
1354 io_async_list_note(READ, req, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001355 ret = -EAGAIN;
Jens Axboe31b51512019-01-18 22:56:34 -07001356 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001357 }
1358 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001359 return ret;
1360}
1361
Jens Axboee0c5c572019-03-12 10:18:47 -06001362static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
Jens Axboe8358e3a2019-04-23 08:17:58 -06001363 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001364{
1365 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1366 struct kiocb *kiocb = &req->rw;
1367 struct iov_iter iter;
1368 struct file *file;
Jens Axboe31b51512019-01-18 22:56:34 -07001369 size_t iov_count;
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001370 ssize_t ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001371
Jens Axboe8358e3a2019-04-23 08:17:58 -06001372 ret = io_prep_rw(req, s, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001373 if (ret)
1374 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001375
Jens Axboe2b188cc2019-01-07 10:46:33 -07001376 file = kiocb->ki_filp;
1377 if (unlikely(!(file->f_mode & FMODE_WRITE)))
Jens Axboe09bb8392019-03-13 12:39:28 -06001378 return -EBADF;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001379 if (unlikely(!file->f_op->write_iter))
Jens Axboe09bb8392019-03-13 12:39:28 -06001380 return -EINVAL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001381
1382 ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001383 if (ret < 0)
Jens Axboe09bb8392019-03-13 12:39:28 -06001384 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001385
Jens Axboe9e645e112019-05-10 16:07:28 -06001386 if (req->flags & REQ_F_LINK)
1387 req->result = ret;
1388
Jens Axboe31b51512019-01-18 22:56:34 -07001389 iov_count = iov_iter_count(&iter);
1390
1391 ret = -EAGAIN;
1392 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) {
1393 /* If ->needs_lock is true, we're already in async context. */
1394 if (!s->needs_lock)
1395 io_async_list_note(WRITE, req, iov_count);
1396 goto out_free;
1397 }
1398
1399 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001400 if (!ret) {
Roman Penyaev9bf79332019-03-25 20:09:24 +01001401 ssize_t ret2;
1402
Jens Axboe2b188cc2019-01-07 10:46:33 -07001403 /*
1404 * Open-code file_start_write here to grab freeze protection,
1405 * which will be released by another thread in
1406 * io_complete_rw(). Fool lockdep by telling it the lock got
1407 * released so that it doesn't complain about the held lock when
1408 * we return to userspace.
1409 */
1410 if (S_ISREG(file_inode(file)->i_mode)) {
1411 __sb_start_write(file_inode(file)->i_sb,
1412 SB_FREEZE_WRITE, true);
1413 __sb_writers_release(file_inode(file)->i_sb,
1414 SB_FREEZE_WRITE);
1415 }
1416 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01001417
1418 ret2 = call_write_iter(file, kiocb, &iter);
1419 if (!force_nonblock || ret2 != -EAGAIN) {
1420 io_rw_done(kiocb, ret2);
1421 } else {
1422 /*
1423 * If ->needs_lock is true, we're already in async
1424 * context.
1425 */
1426 if (!s->needs_lock)
1427 io_async_list_note(WRITE, req, iov_count);
1428 ret = -EAGAIN;
1429 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001430 }
Jens Axboe31b51512019-01-18 22:56:34 -07001431out_free:
Jens Axboe2b188cc2019-01-07 10:46:33 -07001432 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001433 return ret;
1434}
1435
1436/*
1437 * IORING_OP_NOP just posts a completion event, nothing else.
1438 */
1439static int io_nop(struct io_kiocb *req, u64 user_data)
1440{
1441 struct io_ring_ctx *ctx = req->ctx;
1442 long err = 0;
1443
Jens Axboedef596e2019-01-09 08:59:42 -07001444 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1445 return -EINVAL;
1446
Jens Axboec71ffb62019-05-13 20:58:29 -06001447 io_cqring_add_event(ctx, user_data, err);
Jens Axboee65ef562019-03-12 10:16:44 -06001448 io_put_req(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001449 return 0;
1450}
1451
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001452static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1453{
Jens Axboe6b063142019-01-10 22:13:58 -07001454 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001455
Jens Axboe09bb8392019-03-13 12:39:28 -06001456 if (!req->file)
1457 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001458
Jens Axboe6b063142019-01-10 22:13:58 -07001459 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07001460 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07001461 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001462 return -EINVAL;
1463
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001464 return 0;
1465}
1466
1467static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1468 bool force_nonblock)
1469{
1470 loff_t sqe_off = READ_ONCE(sqe->off);
1471 loff_t sqe_len = READ_ONCE(sqe->len);
1472 loff_t end = sqe_off + sqe_len;
1473 unsigned fsync_flags;
1474 int ret;
1475
1476 fsync_flags = READ_ONCE(sqe->fsync_flags);
1477 if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
1478 return -EINVAL;
1479
1480 ret = io_prep_fsync(req, sqe);
1481 if (ret)
1482 return ret;
1483
1484 /* fsync always requires a blocking context */
1485 if (force_nonblock)
1486 return -EAGAIN;
1487
1488 ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
1489 end > 0 ? end : LLONG_MAX,
1490 fsync_flags & IORING_FSYNC_DATASYNC);
1491
Jens Axboe9e645e112019-05-10 16:07:28 -06001492 if (ret < 0 && (req->flags & REQ_F_LINK))
1493 req->flags |= REQ_F_FAIL_LINK;
Jens Axboec71ffb62019-05-13 20:58:29 -06001494 io_cqring_add_event(req->ctx, sqe->user_data, ret);
Jens Axboee65ef562019-03-12 10:16:44 -06001495 io_put_req(req);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001496 return 0;
1497}
1498
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001499static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1500{
1501 struct io_ring_ctx *ctx = req->ctx;
1502 int ret = 0;
1503
1504 if (!req->file)
1505 return -EBADF;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001506
1507 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1508 return -EINVAL;
1509 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
1510 return -EINVAL;
1511
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001512 return ret;
1513}
1514
1515static int io_sync_file_range(struct io_kiocb *req,
1516 const struct io_uring_sqe *sqe,
1517 bool force_nonblock)
1518{
1519 loff_t sqe_off;
1520 loff_t sqe_len;
1521 unsigned flags;
1522 int ret;
1523
1524 ret = io_prep_sfr(req, sqe);
1525 if (ret)
1526 return ret;
1527
1528 /* sync_file_range always requires a blocking context */
1529 if (force_nonblock)
1530 return -EAGAIN;
1531
1532 sqe_off = READ_ONCE(sqe->off);
1533 sqe_len = READ_ONCE(sqe->len);
1534 flags = READ_ONCE(sqe->sync_range_flags);
1535
1536 ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
1537
Jens Axboe9e645e112019-05-10 16:07:28 -06001538 if (ret < 0 && (req->flags & REQ_F_LINK))
1539 req->flags |= REQ_F_FAIL_LINK;
Jens Axboec71ffb62019-05-13 20:58:29 -06001540 io_cqring_add_event(req->ctx, sqe->user_data, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001541 io_put_req(req);
1542 return 0;
1543}
1544
Jens Axboe0fa03c62019-04-19 13:34:07 -06001545#if defined(CONFIG_NET)
Jens Axboeaa1fa282019-04-19 13:38:09 -06001546static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1547 bool force_nonblock,
1548 long (*fn)(struct socket *, struct user_msghdr __user *,
1549 unsigned int))
1550{
Jens Axboe0fa03c62019-04-19 13:34:07 -06001551 struct socket *sock;
1552 int ret;
1553
1554 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1555 return -EINVAL;
1556
1557 sock = sock_from_file(req->file, &ret);
1558 if (sock) {
1559 struct user_msghdr __user *msg;
1560 unsigned flags;
1561
1562 flags = READ_ONCE(sqe->msg_flags);
1563 if (flags & MSG_DONTWAIT)
1564 req->flags |= REQ_F_NOWAIT;
1565 else if (force_nonblock)
1566 flags |= MSG_DONTWAIT;
1567
1568 msg = (struct user_msghdr __user *) (unsigned long)
1569 READ_ONCE(sqe->addr);
1570
Jens Axboeaa1fa282019-04-19 13:38:09 -06001571 ret = fn(sock, msg, flags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06001572 if (force_nonblock && ret == -EAGAIN)
1573 return ret;
1574 }
1575
1576 io_cqring_add_event(req->ctx, sqe->user_data, ret);
1577 io_put_req(req);
1578 return 0;
Jens Axboeaa1fa282019-04-19 13:38:09 -06001579}
1580#endif
1581
1582static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1583 bool force_nonblock)
1584{
1585#if defined(CONFIG_NET)
1586 return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock);
1587#else
1588 return -EOPNOTSUPP;
1589#endif
1590}
1591
1592static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1593 bool force_nonblock)
1594{
1595#if defined(CONFIG_NET)
1596 return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock);
Jens Axboe0fa03c62019-04-19 13:34:07 -06001597#else
1598 return -EOPNOTSUPP;
1599#endif
1600}
1601
Jens Axboe221c5eb2019-01-17 09:41:58 -07001602static void io_poll_remove_one(struct io_kiocb *req)
1603{
1604 struct io_poll_iocb *poll = &req->poll;
1605
1606 spin_lock(&poll->head->lock);
1607 WRITE_ONCE(poll->canceled, true);
1608 if (!list_empty(&poll->wait.entry)) {
1609 list_del_init(&poll->wait.entry);
Jens Axboe18d9be12019-09-10 09:13:05 -06001610 io_queue_async_work(req->ctx, req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001611 }
1612 spin_unlock(&poll->head->lock);
1613
1614 list_del_init(&req->list);
1615}
1616
1617static void io_poll_remove_all(struct io_ring_ctx *ctx)
1618{
1619 struct io_kiocb *req;
1620
1621 spin_lock_irq(&ctx->completion_lock);
1622 while (!list_empty(&ctx->cancel_list)) {
1623 req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
1624 io_poll_remove_one(req);
1625 }
1626 spin_unlock_irq(&ctx->completion_lock);
1627}
1628
1629/*
1630 * Find a running poll command that matches one specified in sqe->addr,
1631 * and remove it if found.
1632 */
1633static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1634{
1635 struct io_ring_ctx *ctx = req->ctx;
1636 struct io_kiocb *poll_req, *next;
1637 int ret = -ENOENT;
1638
1639 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1640 return -EINVAL;
1641 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
1642 sqe->poll_events)
1643 return -EINVAL;
1644
1645 spin_lock_irq(&ctx->completion_lock);
1646 list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) {
1647 if (READ_ONCE(sqe->addr) == poll_req->user_data) {
1648 io_poll_remove_one(poll_req);
1649 ret = 0;
1650 break;
1651 }
1652 }
1653 spin_unlock_irq(&ctx->completion_lock);
1654
Jens Axboec71ffb62019-05-13 20:58:29 -06001655 io_cqring_add_event(req->ctx, sqe->user_data, ret);
Jens Axboee65ef562019-03-12 10:16:44 -06001656 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001657 return 0;
1658}
1659
Jens Axboe8c838782019-03-12 15:48:16 -06001660static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
1661 __poll_t mask)
Jens Axboe221c5eb2019-01-17 09:41:58 -07001662{
Jens Axboe8c838782019-03-12 15:48:16 -06001663 req->poll.done = true;
Jens Axboec71ffb62019-05-13 20:58:29 -06001664 io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask));
Jens Axboe8c838782019-03-12 15:48:16 -06001665 io_commit_cqring(ctx);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001666}
1667
1668static void io_poll_complete_work(struct work_struct *work)
1669{
1670 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1671 struct io_poll_iocb *poll = &req->poll;
1672 struct poll_table_struct pt = { ._key = poll->events };
1673 struct io_ring_ctx *ctx = req->ctx;
1674 __poll_t mask = 0;
1675
1676 if (!READ_ONCE(poll->canceled))
1677 mask = vfs_poll(poll->file, &pt) & poll->events;
1678
1679 /*
1680 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1681 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
1682 * synchronize with them. In the cancellation case the list_del_init
1683 * itself is not actually needed, but harmless so we keep it in to
1684 * avoid further branches in the fast path.
1685 */
1686 spin_lock_irq(&ctx->completion_lock);
1687 if (!mask && !READ_ONCE(poll->canceled)) {
1688 add_wait_queue(poll->head, &poll->wait);
1689 spin_unlock_irq(&ctx->completion_lock);
1690 return;
1691 }
1692 list_del_init(&req->list);
Jens Axboe8c838782019-03-12 15:48:16 -06001693 io_poll_complete(ctx, req, mask);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001694 spin_unlock_irq(&ctx->completion_lock);
1695
Jens Axboe8c838782019-03-12 15:48:16 -06001696 io_cqring_ev_posted(ctx);
1697 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001698}
1699
1700static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1701 void *key)
1702{
1703 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
1704 wait);
1705 struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
1706 struct io_ring_ctx *ctx = req->ctx;
1707 __poll_t mask = key_to_poll(key);
Jens Axboe8c838782019-03-12 15:48:16 -06001708 unsigned long flags;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001709
1710 /* for instances that support it check for an event match first: */
Jens Axboe8c838782019-03-12 15:48:16 -06001711 if (mask && !(mask & poll->events))
1712 return 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001713
1714 list_del_init(&poll->wait.entry);
Jens Axboe8c838782019-03-12 15:48:16 -06001715
1716 if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
1717 list_del(&req->list);
1718 io_poll_complete(ctx, req, mask);
1719 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1720
1721 io_cqring_ev_posted(ctx);
1722 io_put_req(req);
1723 } else {
Jens Axboe18d9be12019-09-10 09:13:05 -06001724 io_queue_async_work(ctx, req);
Jens Axboe8c838782019-03-12 15:48:16 -06001725 }
1726
Jens Axboe221c5eb2019-01-17 09:41:58 -07001727 return 1;
1728}
1729
1730struct io_poll_table {
1731 struct poll_table_struct pt;
1732 struct io_kiocb *req;
1733 int error;
1734};
1735
1736static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1737 struct poll_table_struct *p)
1738{
1739 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
1740
1741 if (unlikely(pt->req->poll.head)) {
1742 pt->error = -EINVAL;
1743 return;
1744 }
1745
1746 pt->error = 0;
1747 pt->req->poll.head = head;
1748 add_wait_queue(head, &pt->req->poll.wait);
1749}
1750
1751static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1752{
1753 struct io_poll_iocb *poll = &req->poll;
1754 struct io_ring_ctx *ctx = req->ctx;
1755 struct io_poll_table ipt;
Jens Axboe8c838782019-03-12 15:48:16 -06001756 bool cancel = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001757 __poll_t mask;
1758 u16 events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001759
1760 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1761 return -EINVAL;
1762 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1763 return -EINVAL;
Jens Axboe09bb8392019-03-13 12:39:28 -06001764 if (!poll->file)
1765 return -EBADF;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001766
Jens Axboe6cc47d12019-09-18 11:18:23 -06001767 req->submit.sqe = NULL;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001768 INIT_WORK(&req->work, io_poll_complete_work);
1769 events = READ_ONCE(sqe->poll_events);
1770 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
1771
Jens Axboe221c5eb2019-01-17 09:41:58 -07001772 poll->head = NULL;
Jens Axboe8c838782019-03-12 15:48:16 -06001773 poll->done = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001774 poll->canceled = false;
1775
1776 ipt.pt._qproc = io_poll_queue_proc;
1777 ipt.pt._key = poll->events;
1778 ipt.req = req;
1779 ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1780
1781 /* initialized the list so that we can do list_empty checks */
1782 INIT_LIST_HEAD(&poll->wait.entry);
1783 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
1784
Jens Axboe36703242019-07-25 10:20:18 -06001785 INIT_LIST_HEAD(&req->list);
1786
Jens Axboe221c5eb2019-01-17 09:41:58 -07001787 mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001788
1789 spin_lock_irq(&ctx->completion_lock);
Jens Axboe8c838782019-03-12 15:48:16 -06001790 if (likely(poll->head)) {
1791 spin_lock(&poll->head->lock);
1792 if (unlikely(list_empty(&poll->wait.entry))) {
1793 if (ipt.error)
1794 cancel = true;
1795 ipt.error = 0;
1796 mask = 0;
1797 }
1798 if (mask || ipt.error)
1799 list_del_init(&poll->wait.entry);
1800 else if (cancel)
1801 WRITE_ONCE(poll->canceled, true);
1802 else if (!poll->done) /* actually waiting for an event */
1803 list_add_tail(&req->list, &ctx->cancel_list);
1804 spin_unlock(&poll->head->lock);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001805 }
Jens Axboe8c838782019-03-12 15:48:16 -06001806 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06001807 ipt.error = 0;
1808 io_poll_complete(ctx, req, mask);
1809 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07001810 spin_unlock_irq(&ctx->completion_lock);
1811
Jens Axboe8c838782019-03-12 15:48:16 -06001812 if (mask) {
1813 io_cqring_ev_posted(ctx);
Jens Axboee65ef562019-03-12 10:16:44 -06001814 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001815 }
Jens Axboe8c838782019-03-12 15:48:16 -06001816 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001817}
1818
Jens Axboe5262f562019-09-17 12:26:57 -06001819static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
1820{
1821 struct io_ring_ctx *ctx;
1822 struct io_kiocb *req;
1823 unsigned long flags;
1824
1825 req = container_of(timer, struct io_kiocb, timeout.timer);
1826 ctx = req->ctx;
1827 atomic_inc(&ctx->cq_timeouts);
1828
1829 spin_lock_irqsave(&ctx->completion_lock, flags);
1830 list_del(&req->list);
1831
1832 io_cqring_fill_event(ctx, req->user_data, -ETIME);
1833 io_commit_cqring(ctx);
1834 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1835
1836 io_cqring_ev_posted(ctx);
1837
1838 io_put_req(req);
1839 return HRTIMER_NORESTART;
1840}
1841
1842static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1843{
1844 unsigned count, req_dist, tail_index;
1845 struct io_ring_ctx *ctx = req->ctx;
1846 struct list_head *entry;
1847 struct timespec ts;
1848
1849 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1850 return -EINVAL;
1851 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags ||
1852 sqe->len != 1)
1853 return -EINVAL;
1854 if (copy_from_user(&ts, (void __user *) (unsigned long) sqe->addr,
1855 sizeof(ts)))
1856 return -EFAULT;
1857
1858 /*
1859 * sqe->off holds how many events that need to occur for this
1860 * timeout event to be satisfied.
1861 */
1862 count = READ_ONCE(sqe->off);
1863 if (!count)
1864 count = 1;
1865
1866 req->sequence = ctx->cached_sq_head + count - 1;
1867 req->flags |= REQ_F_TIMEOUT;
1868
1869 /*
1870 * Insertion sort, ensuring the first entry in the list is always
1871 * the one we need first.
1872 */
1873 tail_index = ctx->cached_cq_tail - ctx->rings->sq_dropped;
1874 req_dist = req->sequence - tail_index;
1875 spin_lock_irq(&ctx->completion_lock);
1876 list_for_each_prev(entry, &ctx->timeout_list) {
1877 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
1878 unsigned dist;
1879
1880 dist = nxt->sequence - tail_index;
1881 if (req_dist >= dist)
1882 break;
1883 }
1884 list_add(&req->list, entry);
1885 spin_unlock_irq(&ctx->completion_lock);
1886
1887 hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1888 req->timeout.timer.function = io_timeout_fn;
1889 hrtimer_start(&req->timeout.timer, timespec_to_ktime(ts),
1890 HRTIMER_MODE_REL);
1891 return 0;
1892}
1893
Jens Axboede0617e2019-04-06 21:51:27 -06001894static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
1895 const struct io_uring_sqe *sqe)
1896{
1897 struct io_uring_sqe *sqe_copy;
1898
1899 if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
1900 return 0;
1901
1902 sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
1903 if (!sqe_copy)
1904 return -EAGAIN;
1905
1906 spin_lock_irq(&ctx->completion_lock);
1907 if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) {
1908 spin_unlock_irq(&ctx->completion_lock);
1909 kfree(sqe_copy);
1910 return 0;
1911 }
1912
1913 memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
1914 req->submit.sqe = sqe_copy;
1915
1916 INIT_WORK(&req->work, io_sq_wq_submit_work);
1917 list_add_tail(&req->list, &ctx->defer_list);
1918 spin_unlock_irq(&ctx->completion_lock);
1919 return -EIOCBQUEUED;
1920}
1921
Jens Axboe2b188cc2019-01-07 10:46:33 -07001922static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Jens Axboe8358e3a2019-04-23 08:17:58 -06001923 const struct sqe_submit *s, bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001924{
Jens Axboee0c5c572019-03-12 10:18:47 -06001925 int ret, opcode;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001926
Jens Axboe9e645e112019-05-10 16:07:28 -06001927 req->user_data = READ_ONCE(s->sqe->user_data);
1928
Jens Axboe2b188cc2019-01-07 10:46:33 -07001929 if (unlikely(s->index >= ctx->sq_entries))
1930 return -EINVAL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001931
1932 opcode = READ_ONCE(s->sqe->opcode);
1933 switch (opcode) {
1934 case IORING_OP_NOP:
1935 ret = io_nop(req, req->user_data);
1936 break;
1937 case IORING_OP_READV:
Jens Axboeedafcce2019-01-09 09:16:05 -07001938 if (unlikely(s->sqe->buf_index))
1939 return -EINVAL;
Jens Axboe8358e3a2019-04-23 08:17:58 -06001940 ret = io_read(req, s, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001941 break;
1942 case IORING_OP_WRITEV:
Jens Axboeedafcce2019-01-09 09:16:05 -07001943 if (unlikely(s->sqe->buf_index))
1944 return -EINVAL;
Jens Axboe8358e3a2019-04-23 08:17:58 -06001945 ret = io_write(req, s, force_nonblock);
Jens Axboeedafcce2019-01-09 09:16:05 -07001946 break;
1947 case IORING_OP_READ_FIXED:
Jens Axboe8358e3a2019-04-23 08:17:58 -06001948 ret = io_read(req, s, force_nonblock);
Jens Axboeedafcce2019-01-09 09:16:05 -07001949 break;
1950 case IORING_OP_WRITE_FIXED:
Jens Axboe8358e3a2019-04-23 08:17:58 -06001951 ret = io_write(req, s, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001952 break;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001953 case IORING_OP_FSYNC:
1954 ret = io_fsync(req, s->sqe, force_nonblock);
1955 break;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001956 case IORING_OP_POLL_ADD:
1957 ret = io_poll_add(req, s->sqe);
1958 break;
1959 case IORING_OP_POLL_REMOVE:
1960 ret = io_poll_remove(req, s->sqe);
1961 break;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001962 case IORING_OP_SYNC_FILE_RANGE:
1963 ret = io_sync_file_range(req, s->sqe, force_nonblock);
1964 break;
Jens Axboe0fa03c62019-04-19 13:34:07 -06001965 case IORING_OP_SENDMSG:
1966 ret = io_sendmsg(req, s->sqe, force_nonblock);
1967 break;
Jens Axboeaa1fa282019-04-19 13:38:09 -06001968 case IORING_OP_RECVMSG:
1969 ret = io_recvmsg(req, s->sqe, force_nonblock);
1970 break;
Jens Axboe5262f562019-09-17 12:26:57 -06001971 case IORING_OP_TIMEOUT:
1972 ret = io_timeout(req, s->sqe);
1973 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001974 default:
1975 ret = -EINVAL;
1976 break;
1977 }
1978
Jens Axboedef596e2019-01-09 08:59:42 -07001979 if (ret)
1980 return ret;
1981
1982 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboe9e645e112019-05-10 16:07:28 -06001983 if (req->result == -EAGAIN)
Jens Axboedef596e2019-01-09 08:59:42 -07001984 return -EAGAIN;
1985
1986 /* workqueue context doesn't hold uring_lock, grab it now */
1987 if (s->needs_lock)
1988 mutex_lock(&ctx->uring_lock);
1989 io_iopoll_req_issued(req);
1990 if (s->needs_lock)
1991 mutex_unlock(&ctx->uring_lock);
1992 }
1993
1994 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001995}
1996
Jens Axboe31b51512019-01-18 22:56:34 -07001997static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx,
1998 const struct io_uring_sqe *sqe)
1999{
2000 switch (sqe->opcode) {
2001 case IORING_OP_READV:
2002 case IORING_OP_READ_FIXED:
2003 return &ctx->pending_async[READ];
2004 case IORING_OP_WRITEV:
2005 case IORING_OP_WRITE_FIXED:
2006 return &ctx->pending_async[WRITE];
2007 default:
2008 return NULL;
2009 }
2010}
2011
Jens Axboeedafcce2019-01-09 09:16:05 -07002012static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
2013{
2014 u8 opcode = READ_ONCE(sqe->opcode);
2015
2016 return !(opcode == IORING_OP_READ_FIXED ||
2017 opcode == IORING_OP_WRITE_FIXED);
2018}
2019
Jens Axboe2b188cc2019-01-07 10:46:33 -07002020static void io_sq_wq_submit_work(struct work_struct *work)
2021{
2022 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002023 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe31b51512019-01-18 22:56:34 -07002024 struct mm_struct *cur_mm = NULL;
2025 struct async_list *async_list;
2026 LIST_HEAD(req_list);
Jens Axboeedafcce2019-01-09 09:16:05 -07002027 mm_segment_t old_fs;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002028 int ret;
2029
Jens Axboe31b51512019-01-18 22:56:34 -07002030 async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
2031restart:
2032 do {
2033 struct sqe_submit *s = &req->submit;
2034 const struct io_uring_sqe *sqe = s->sqe;
Jackie Liud0ee8792019-07-31 14:39:33 +08002035 unsigned int flags = req->flags;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002036
Stefan Bühler8449eed2019-04-27 20:34:19 +02002037 /* Ensure we clear previously set non-block flag */
Jens Axboe31b51512019-01-18 22:56:34 -07002038 req->rw.ki_flags &= ~IOCB_NOWAIT;
2039
2040 ret = 0;
2041 if (io_sqe_needs_user(sqe) && !cur_mm) {
2042 if (!mmget_not_zero(ctx->sqo_mm)) {
2043 ret = -EFAULT;
2044 } else {
2045 cur_mm = ctx->sqo_mm;
2046 use_mm(cur_mm);
2047 old_fs = get_fs();
2048 set_fs(USER_DS);
2049 }
2050 }
2051
2052 if (!ret) {
2053 s->has_user = cur_mm != NULL;
2054 s->needs_lock = true;
2055 do {
Jens Axboe8358e3a2019-04-23 08:17:58 -06002056 ret = __io_submit_sqe(ctx, req, s, false);
Jens Axboe31b51512019-01-18 22:56:34 -07002057 /*
2058 * We can get EAGAIN for polled IO even though
2059 * we're forcing a sync submission from here,
2060 * since we can't wait for request slots on the
2061 * block side.
2062 */
2063 if (ret != -EAGAIN)
2064 break;
2065 cond_resched();
2066 } while (1);
2067 }
Jens Axboe817869d2019-04-30 14:44:05 -06002068
2069 /* drop submission reference */
2070 io_put_req(req);
2071
Jens Axboe31b51512019-01-18 22:56:34 -07002072 if (ret) {
Jens Axboec71ffb62019-05-13 20:58:29 -06002073 io_cqring_add_event(ctx, sqe->user_data, ret);
Jens Axboee65ef562019-03-12 10:16:44 -06002074 io_put_req(req);
Jens Axboe31b51512019-01-18 22:56:34 -07002075 }
2076
2077 /* async context always use a copy of the sqe */
2078 kfree(sqe);
2079
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +08002080 /* req from defer and link list needn't decrease async cnt */
Jackie Liud0ee8792019-07-31 14:39:33 +08002081 if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +08002082 goto out;
2083
Jens Axboe31b51512019-01-18 22:56:34 -07002084 if (!async_list)
2085 break;
2086 if (!list_empty(&req_list)) {
2087 req = list_first_entry(&req_list, struct io_kiocb,
2088 list);
2089 list_del(&req->list);
2090 continue;
2091 }
2092 if (list_empty(&async_list->list))
2093 break;
2094
2095 req = NULL;
2096 spin_lock(&async_list->lock);
2097 if (list_empty(&async_list->list)) {
2098 spin_unlock(&async_list->lock);
2099 break;
2100 }
2101 list_splice_init(&async_list->list, &req_list);
2102 spin_unlock(&async_list->lock);
2103
2104 req = list_first_entry(&req_list, struct io_kiocb, list);
2105 list_del(&req->list);
2106 } while (req);
Jens Axboeedafcce2019-01-09 09:16:05 -07002107
2108 /*
Jens Axboe31b51512019-01-18 22:56:34 -07002109 * Rare case of racing with a submitter. If we find the count has
2110 * dropped to zero AND we have pending work items, then restart
2111 * the processing. This is a tiny race window.
Jens Axboeedafcce2019-01-09 09:16:05 -07002112 */
Jens Axboe31b51512019-01-18 22:56:34 -07002113 if (async_list) {
2114 ret = atomic_dec_return(&async_list->cnt);
2115 while (!ret && !list_empty(&async_list->list)) {
2116 spin_lock(&async_list->lock);
2117 atomic_inc(&async_list->cnt);
2118 list_splice_init(&async_list->list, &req_list);
2119 spin_unlock(&async_list->lock);
2120
2121 if (!list_empty(&req_list)) {
2122 req = list_first_entry(&req_list,
2123 struct io_kiocb, list);
2124 list_del(&req->list);
2125 goto restart;
2126 }
2127 ret = atomic_dec_return(&async_list->cnt);
Jens Axboeedafcce2019-01-09 09:16:05 -07002128 }
Jens Axboeedafcce2019-01-09 09:16:05 -07002129 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002130
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +08002131out:
Jens Axboe31b51512019-01-18 22:56:34 -07002132 if (cur_mm) {
Jens Axboeedafcce2019-01-09 09:16:05 -07002133 set_fs(old_fs);
Jens Axboe31b51512019-01-18 22:56:34 -07002134 unuse_mm(cur_mm);
2135 mmput(cur_mm);
Jens Axboeedafcce2019-01-09 09:16:05 -07002136 }
Jens Axboe31b51512019-01-18 22:56:34 -07002137}
Jens Axboe2b188cc2019-01-07 10:46:33 -07002138
Jens Axboe31b51512019-01-18 22:56:34 -07002139/*
2140 * See if we can piggy back onto previously submitted work, that is still
2141 * running. We currently only allow this if the new request is sequential
2142 * to the previous one we punted.
2143 */
2144static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
2145{
Jens Axboe6d5d5ac2019-09-11 10:16:13 -06002146 bool ret;
Jens Axboe31b51512019-01-18 22:56:34 -07002147
2148 if (!list)
2149 return false;
2150 if (!(req->flags & REQ_F_SEQ_PREV))
2151 return false;
2152 if (!atomic_read(&list->cnt))
2153 return false;
2154
2155 ret = true;
2156 spin_lock(&list->lock);
2157 list_add_tail(&req->list, &list->list);
Zhengyuan Liuc0e48f92019-07-18 20:44:00 +08002158 /*
2159 * Ensure we see a simultaneous modification from io_sq_wq_submit_work()
2160 */
2161 smp_mb();
Jens Axboe31b51512019-01-18 22:56:34 -07002162 if (!atomic_read(&list->cnt)) {
2163 list_del_init(&req->list);
2164 ret = false;
2165 }
2166 spin_unlock(&list->lock);
2167 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002168}
2169
Jens Axboe09bb8392019-03-13 12:39:28 -06002170static bool io_op_needs_file(const struct io_uring_sqe *sqe)
2171{
2172 int op = READ_ONCE(sqe->opcode);
2173
2174 switch (op) {
2175 case IORING_OP_NOP:
2176 case IORING_OP_POLL_REMOVE:
2177 return false;
2178 default:
2179 return true;
2180 }
2181}
2182
2183static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
2184 struct io_submit_state *state, struct io_kiocb *req)
2185{
2186 unsigned flags;
2187 int fd;
2188
2189 flags = READ_ONCE(s->sqe->flags);
2190 fd = READ_ONCE(s->sqe->fd);
2191
Jackie Liu4fe2c962019-09-09 20:50:40 +08002192 if (flags & IOSQE_IO_DRAIN)
Jens Axboede0617e2019-04-06 21:51:27 -06002193 req->flags |= REQ_F_IO_DRAIN;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002194 /*
2195 * All io need record the previous position, if LINK vs DARIN,
2196 * it can be used to mark the position of the first IO in the
2197 * link list.
2198 */
2199 req->sequence = s->sequence;
Jens Axboede0617e2019-04-06 21:51:27 -06002200
Jens Axboe60c112b2019-06-21 10:20:18 -06002201 if (!io_op_needs_file(s->sqe))
Jens Axboe09bb8392019-03-13 12:39:28 -06002202 return 0;
Jens Axboe09bb8392019-03-13 12:39:28 -06002203
2204 if (flags & IOSQE_FIXED_FILE) {
2205 if (unlikely(!ctx->user_files ||
2206 (unsigned) fd >= ctx->nr_user_files))
2207 return -EBADF;
2208 req->file = ctx->user_files[fd];
2209 req->flags |= REQ_F_FIXED_FILE;
2210 } else {
2211 if (s->needs_fixed_file)
2212 return -EBADF;
2213 req->file = io_file_get(state, fd);
2214 if (unlikely(!req->file))
2215 return -EBADF;
2216 }
2217
2218 return 0;
2219}
2220
Jackie Liu4fe2c962019-09-09 20:50:40 +08002221static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Jens Axboec57666682019-09-09 16:19:45 -06002222 struct sqe_submit *s, bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002223{
Jens Axboee0c5c572019-03-12 10:18:47 -06002224 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002225
Jens Axboec57666682019-09-09 16:19:45 -06002226 ret = __io_submit_sqe(ctx, req, s, force_nonblock);
Stefan Bühler8449eed2019-04-27 20:34:19 +02002227 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07002228 struct io_uring_sqe *sqe_copy;
2229
Jackie Liu954dab12019-09-18 10:37:52 +08002230 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002231 if (sqe_copy) {
Jens Axboe31b51512019-01-18 22:56:34 -07002232 struct async_list *list;
2233
Jens Axboe2b188cc2019-01-07 10:46:33 -07002234 s->sqe = sqe_copy;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002235 memcpy(&req->submit, s, sizeof(*s));
Jens Axboe31b51512019-01-18 22:56:34 -07002236 list = io_async_list_from_sqe(ctx, s->sqe);
2237 if (!io_add_to_prev_work(list, req)) {
2238 if (list)
2239 atomic_inc(&list->cnt);
2240 INIT_WORK(&req->work, io_sq_wq_submit_work);
Jens Axboe18d9be12019-09-10 09:13:05 -06002241 io_queue_async_work(ctx, req);
Jens Axboe31b51512019-01-18 22:56:34 -07002242 }
Jens Axboee65ef562019-03-12 10:16:44 -06002243
2244 /*
2245 * Queued up for async execution, worker will release
Jens Axboe9e645e112019-05-10 16:07:28 -06002246 * submit reference when the iocb is actually submitted.
Jens Axboee65ef562019-03-12 10:16:44 -06002247 */
2248 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002249 }
2250 }
Jens Axboee65ef562019-03-12 10:16:44 -06002251
2252 /* drop submission reference */
2253 io_put_req(req);
2254
2255 /* and drop final reference, if we failed */
Jens Axboe9e645e112019-05-10 16:07:28 -06002256 if (ret) {
2257 io_cqring_add_event(ctx, req->user_data, ret);
2258 if (req->flags & REQ_F_LINK)
2259 req->flags |= REQ_F_FAIL_LINK;
Jens Axboee65ef562019-03-12 10:16:44 -06002260 io_put_req(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002261 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002262
2263 return ret;
2264}
2265
Jackie Liu4fe2c962019-09-09 20:50:40 +08002266static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Jens Axboec57666682019-09-09 16:19:45 -06002267 struct sqe_submit *s, bool force_nonblock)
Jackie Liu4fe2c962019-09-09 20:50:40 +08002268{
2269 int ret;
2270
2271 ret = io_req_defer(ctx, req, s->sqe);
2272 if (ret) {
2273 if (ret != -EIOCBQUEUED) {
2274 io_free_req(req);
2275 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2276 }
2277 return 0;
2278 }
2279
Jens Axboec57666682019-09-09 16:19:45 -06002280 return __io_queue_sqe(ctx, req, s, force_nonblock);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002281}
2282
2283static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
Jens Axboec57666682019-09-09 16:19:45 -06002284 struct sqe_submit *s, struct io_kiocb *shadow,
2285 bool force_nonblock)
Jackie Liu4fe2c962019-09-09 20:50:40 +08002286{
2287 int ret;
2288 int need_submit = false;
2289
2290 if (!shadow)
Jens Axboec57666682019-09-09 16:19:45 -06002291 return io_queue_sqe(ctx, req, s, force_nonblock);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002292
2293 /*
2294 * Mark the first IO in link list as DRAIN, let all the following
2295 * IOs enter the defer list. all IO needs to be completed before link
2296 * list.
2297 */
2298 req->flags |= REQ_F_IO_DRAIN;
2299 ret = io_req_defer(ctx, req, s->sqe);
2300 if (ret) {
2301 if (ret != -EIOCBQUEUED) {
2302 io_free_req(req);
2303 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2304 return 0;
2305 }
2306 } else {
2307 /*
2308 * If ret == 0 means that all IOs in front of link io are
2309 * running done. let's queue link head.
2310 */
2311 need_submit = true;
2312 }
2313
2314 /* Insert shadow req to defer_list, blocking next IOs */
2315 spin_lock_irq(&ctx->completion_lock);
2316 list_add_tail(&shadow->list, &ctx->defer_list);
2317 spin_unlock_irq(&ctx->completion_lock);
2318
2319 if (need_submit)
Jens Axboec57666682019-09-09 16:19:45 -06002320 return __io_queue_sqe(ctx, req, s, force_nonblock);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002321
2322 return 0;
2323}
2324
Jens Axboe9e645e112019-05-10 16:07:28 -06002325#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
2326
2327static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
Jens Axboec57666682019-09-09 16:19:45 -06002328 struct io_submit_state *state, struct io_kiocb **link,
2329 bool force_nonblock)
Jens Axboe9e645e112019-05-10 16:07:28 -06002330{
2331 struct io_uring_sqe *sqe_copy;
2332 struct io_kiocb *req;
2333 int ret;
2334
2335 /* enforce forwards compatibility on users */
2336 if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
2337 ret = -EINVAL;
2338 goto err;
2339 }
2340
2341 req = io_get_req(ctx, state);
2342 if (unlikely(!req)) {
2343 ret = -EAGAIN;
2344 goto err;
2345 }
2346
2347 ret = io_req_set_file(ctx, s, state, req);
2348 if (unlikely(ret)) {
2349err_req:
2350 io_free_req(req);
2351err:
2352 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2353 return;
2354 }
2355
Jens Axboe9e645e112019-05-10 16:07:28 -06002356 /*
2357 * If we already have a head request, queue this one for async
2358 * submittal once the head completes. If we don't have a head but
2359 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2360 * submitted sync once the chain is complete. If none of those
2361 * conditions are true (normal request), then just queue it.
2362 */
2363 if (*link) {
2364 struct io_kiocb *prev = *link;
2365
2366 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2367 if (!sqe_copy) {
2368 ret = -EAGAIN;
2369 goto err_req;
2370 }
2371
2372 s->sqe = sqe_copy;
2373 memcpy(&req->submit, s, sizeof(*s));
2374 list_add_tail(&req->list, &prev->link_list);
2375 } else if (s->sqe->flags & IOSQE_IO_LINK) {
2376 req->flags |= REQ_F_LINK;
2377
2378 memcpy(&req->submit, s, sizeof(*s));
2379 INIT_LIST_HEAD(&req->link_list);
2380 *link = req;
2381 } else {
Jens Axboec57666682019-09-09 16:19:45 -06002382 io_queue_sqe(ctx, req, s, force_nonblock);
Jens Axboe9e645e112019-05-10 16:07:28 -06002383 }
2384}
2385
Jens Axboe9a56a232019-01-09 09:06:50 -07002386/*
2387 * Batched submission is done, ensure local IO is flushed out.
2388 */
2389static void io_submit_state_end(struct io_submit_state *state)
2390{
2391 blk_finish_plug(&state->plug);
Jens Axboe3d6770f2019-04-13 11:50:54 -06002392 io_file_put(state);
Jens Axboe2579f912019-01-09 09:10:43 -07002393 if (state->free_reqs)
2394 kmem_cache_free_bulk(req_cachep, state->free_reqs,
2395 &state->reqs[state->cur_req]);
Jens Axboe9a56a232019-01-09 09:06:50 -07002396}
2397
2398/*
2399 * Start submission side cache.
2400 */
2401static void io_submit_state_start(struct io_submit_state *state,
2402 struct io_ring_ctx *ctx, unsigned max_ios)
2403{
2404 blk_start_plug(&state->plug);
Jens Axboe2579f912019-01-09 09:10:43 -07002405 state->free_reqs = 0;
Jens Axboe9a56a232019-01-09 09:06:50 -07002406 state->file = NULL;
2407 state->ios_left = max_ios;
2408}
2409
Jens Axboe2b188cc2019-01-07 10:46:33 -07002410static void io_commit_sqring(struct io_ring_ctx *ctx)
2411{
Hristo Venev75b28af2019-08-26 17:23:46 +00002412 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002413
Hristo Venev75b28af2019-08-26 17:23:46 +00002414 if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07002415 /*
2416 * Ensure any loads from the SQEs are done at this point,
2417 * since once we write the new head, the application could
2418 * write new data to them.
2419 */
Hristo Venev75b28af2019-08-26 17:23:46 +00002420 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002421 }
2422}
2423
2424/*
Jens Axboe2b188cc2019-01-07 10:46:33 -07002425 * Fetch an sqe, if one is available. Note that s->sqe will point to memory
2426 * that is mapped by userspace. This means that care needs to be taken to
2427 * ensure that reads are stable, as we cannot rely on userspace always
2428 * being a good citizen. If members of the sqe are validated and then later
2429 * used, it's important that those reads are done through READ_ONCE() to
2430 * prevent a re-load down the line.
2431 */
2432static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
2433{
Hristo Venev75b28af2019-08-26 17:23:46 +00002434 struct io_rings *rings = ctx->rings;
2435 u32 *sq_array = ctx->sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002436 unsigned head;
2437
2438 /*
2439 * The cached sq head (or cq tail) serves two purposes:
2440 *
2441 * 1) allows us to batch the cost of updating the user visible
2442 * head updates.
2443 * 2) allows the kernel side to track the head on its own, even
2444 * though the application is the one updating it.
2445 */
2446 head = ctx->cached_sq_head;
Stefan Bühlere523a292019-04-19 11:57:44 +02002447 /* make sure SQ entry isn't read before tail */
Hristo Venev75b28af2019-08-26 17:23:46 +00002448 if (head == smp_load_acquire(&rings->sq.tail))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002449 return false;
2450
Hristo Venev75b28af2019-08-26 17:23:46 +00002451 head = READ_ONCE(sq_array[head & ctx->sq_mask]);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002452 if (head < ctx->sq_entries) {
2453 s->index = head;
2454 s->sqe = &ctx->sq_sqes[head];
Jackie Liu8776f3f2019-09-09 20:50:39 +08002455 s->sequence = ctx->cached_sq_head;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002456 ctx->cached_sq_head++;
2457 return true;
2458 }
2459
2460 /* drop invalid entries */
2461 ctx->cached_sq_head++;
Hristo Venev75b28af2019-08-26 17:23:46 +00002462 rings->sq_dropped++;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002463 return false;
2464}
2465
Jens Axboe6c271ce2019-01-10 11:22:30 -07002466static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
2467 unsigned int nr, bool has_user, bool mm_fault)
2468{
2469 struct io_submit_state state, *statep = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002470 struct io_kiocb *link = NULL;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002471 struct io_kiocb *shadow_req = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002472 bool prev_was_link = false;
2473 int i, submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002474
2475 if (nr > IO_PLUG_THRESHOLD) {
2476 io_submit_state_start(&state, ctx, nr);
2477 statep = &state;
2478 }
2479
2480 for (i = 0; i < nr; i++) {
Jens Axboe9e645e112019-05-10 16:07:28 -06002481 /*
2482 * If previous wasn't linked and we have a linked command,
2483 * that's the end of the chain. Submit the previous link.
2484 */
2485 if (!prev_was_link && link) {
Jens Axboec57666682019-09-09 16:19:45 -06002486 io_queue_link_head(ctx, link, &link->submit, shadow_req,
2487 true);
Jens Axboe9e645e112019-05-10 16:07:28 -06002488 link = NULL;
Jackie Liu5f5ad9c2019-09-18 10:37:53 +08002489 shadow_req = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002490 }
2491 prev_was_link = (sqes[i].sqe->flags & IOSQE_IO_LINK) != 0;
2492
Jackie Liu4fe2c962019-09-09 20:50:40 +08002493 if (link && (sqes[i].sqe->flags & IOSQE_IO_DRAIN)) {
2494 if (!shadow_req) {
2495 shadow_req = io_get_req(ctx, NULL);
Jackie Liua1041c22019-09-18 17:25:52 +08002496 if (unlikely(!shadow_req))
2497 goto out;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002498 shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
2499 refcount_dec(&shadow_req->refs);
2500 }
2501 shadow_req->sequence = sqes[i].sequence;
2502 }
2503
Jackie Liua1041c22019-09-18 17:25:52 +08002504out:
Jens Axboe6c271ce2019-01-10 11:22:30 -07002505 if (unlikely(mm_fault)) {
Jens Axboe9e645e112019-05-10 16:07:28 -06002506 io_cqring_add_event(ctx, sqes[i].sqe->user_data,
2507 -EFAULT);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002508 } else {
2509 sqes[i].has_user = has_user;
2510 sqes[i].needs_lock = true;
2511 sqes[i].needs_fixed_file = true;
Jens Axboec57666682019-09-09 16:19:45 -06002512 io_submit_sqe(ctx, &sqes[i], statep, &link, true);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002513 submitted++;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002514 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07002515 }
2516
Jens Axboe9e645e112019-05-10 16:07:28 -06002517 if (link)
Jens Axboec57666682019-09-09 16:19:45 -06002518 io_queue_link_head(ctx, link, &link->submit, shadow_req, true);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002519 if (statep)
2520 io_submit_state_end(&state);
2521
2522 return submitted;
2523}
2524
2525static int io_sq_thread(void *data)
2526{
2527 struct sqe_submit sqes[IO_IOPOLL_BATCH];
2528 struct io_ring_ctx *ctx = data;
2529 struct mm_struct *cur_mm = NULL;
2530 mm_segment_t old_fs;
2531 DEFINE_WAIT(wait);
2532 unsigned inflight;
2533 unsigned long timeout;
2534
Jackie Liua4c0b3d2019-07-08 13:41:12 +08002535 complete(&ctx->sqo_thread_started);
2536
Jens Axboe6c271ce2019-01-10 11:22:30 -07002537 old_fs = get_fs();
2538 set_fs(USER_DS);
2539
2540 timeout = inflight = 0;
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002541 while (!kthread_should_park()) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07002542 bool all_fixed, mm_fault = false;
2543 int i;
2544
2545 if (inflight) {
2546 unsigned nr_events = 0;
2547
2548 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07002549 io_iopoll_check(ctx, &nr_events, 0);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002550 } else {
2551 /*
2552 * Normal IO, just pretend everything completed.
2553 * We don't have to poll completions for that.
2554 */
2555 nr_events = inflight;
2556 }
2557
2558 inflight -= nr_events;
2559 if (!inflight)
2560 timeout = jiffies + ctx->sq_thread_idle;
2561 }
2562
2563 if (!io_get_sqring(ctx, &sqes[0])) {
2564 /*
2565 * We're polling. If we're within the defined idle
2566 * period, then let us spin without work before going
2567 * to sleep.
2568 */
2569 if (inflight || !time_after(jiffies, timeout)) {
Jens Axboe9831a902019-09-19 09:48:55 -06002570 cond_resched();
Jens Axboe6c271ce2019-01-10 11:22:30 -07002571 continue;
2572 }
2573
2574 /*
2575 * Drop cur_mm before scheduling, we can't hold it for
2576 * long periods (or over schedule()). Do this before
2577 * adding ourselves to the waitqueue, as the unuse/drop
2578 * may sleep.
2579 */
2580 if (cur_mm) {
2581 unuse_mm(cur_mm);
2582 mmput(cur_mm);
2583 cur_mm = NULL;
2584 }
2585
2586 prepare_to_wait(&ctx->sqo_wait, &wait,
2587 TASK_INTERRUPTIBLE);
2588
2589 /* Tell userspace we may need a wakeup call */
Hristo Venev75b28af2019-08-26 17:23:46 +00002590 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
Stefan Bühler0d7bae62019-04-19 11:57:45 +02002591 /* make sure to read SQ tail after writing flags */
2592 smp_mb();
Jens Axboe6c271ce2019-01-10 11:22:30 -07002593
2594 if (!io_get_sqring(ctx, &sqes[0])) {
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002595 if (kthread_should_park()) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07002596 finish_wait(&ctx->sqo_wait, &wait);
2597 break;
2598 }
2599 if (signal_pending(current))
2600 flush_signals(current);
2601 schedule();
2602 finish_wait(&ctx->sqo_wait, &wait);
2603
Hristo Venev75b28af2019-08-26 17:23:46 +00002604 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002605 continue;
2606 }
2607 finish_wait(&ctx->sqo_wait, &wait);
2608
Hristo Venev75b28af2019-08-26 17:23:46 +00002609 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002610 }
2611
2612 i = 0;
2613 all_fixed = true;
2614 do {
2615 if (all_fixed && io_sqe_needs_user(sqes[i].sqe))
2616 all_fixed = false;
2617
2618 i++;
2619 if (i == ARRAY_SIZE(sqes))
2620 break;
2621 } while (io_get_sqring(ctx, &sqes[i]));
2622
2623 /* Unless all new commands are FIXED regions, grab mm */
2624 if (!all_fixed && !cur_mm) {
2625 mm_fault = !mmget_not_zero(ctx->sqo_mm);
2626 if (!mm_fault) {
2627 use_mm(ctx->sqo_mm);
2628 cur_mm = ctx->sqo_mm;
2629 }
2630 }
2631
2632 inflight += io_submit_sqes(ctx, sqes, i, cur_mm != NULL,
2633 mm_fault);
2634
2635 /* Commit SQ ring head once we've consumed all SQEs */
2636 io_commit_sqring(ctx);
2637 }
2638
2639 set_fs(old_fs);
2640 if (cur_mm) {
2641 unuse_mm(cur_mm);
2642 mmput(cur_mm);
2643 }
Jens Axboe06058632019-04-13 09:26:03 -06002644
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002645 kthread_parkme();
Jens Axboe06058632019-04-13 09:26:03 -06002646
Jens Axboe6c271ce2019-01-10 11:22:30 -07002647 return 0;
2648}
2649
Jens Axboec57666682019-09-09 16:19:45 -06002650static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
2651 bool block_for_last)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002652{
Jens Axboe9a56a232019-01-09 09:06:50 -07002653 struct io_submit_state state, *statep = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002654 struct io_kiocb *link = NULL;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002655 struct io_kiocb *shadow_req = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002656 bool prev_was_link = false;
Jens Axboe5c8b0b52019-04-30 10:16:07 -06002657 int i, submit = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002658
Jens Axboe9a56a232019-01-09 09:06:50 -07002659 if (to_submit > IO_PLUG_THRESHOLD) {
2660 io_submit_state_start(&state, ctx, to_submit);
2661 statep = &state;
2662 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002663
2664 for (i = 0; i < to_submit; i++) {
Jens Axboec57666682019-09-09 16:19:45 -06002665 bool force_nonblock = true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002666 struct sqe_submit s;
2667
2668 if (!io_get_sqring(ctx, &s))
2669 break;
2670
Jens Axboe9e645e112019-05-10 16:07:28 -06002671 /*
2672 * If previous wasn't linked and we have a linked command,
2673 * that's the end of the chain. Submit the previous link.
2674 */
2675 if (!prev_was_link && link) {
Jens Axboec57666682019-09-09 16:19:45 -06002676 io_queue_link_head(ctx, link, &link->submit, shadow_req,
2677 force_nonblock);
Jens Axboe9e645e112019-05-10 16:07:28 -06002678 link = NULL;
Jackie Liu5f5ad9c2019-09-18 10:37:53 +08002679 shadow_req = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002680 }
2681 prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
2682
Jackie Liu4fe2c962019-09-09 20:50:40 +08002683 if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
2684 if (!shadow_req) {
2685 shadow_req = io_get_req(ctx, NULL);
Jackie Liua1041c22019-09-18 17:25:52 +08002686 if (unlikely(!shadow_req))
2687 goto out;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002688 shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
2689 refcount_dec(&shadow_req->refs);
2690 }
2691 shadow_req->sequence = s.sequence;
2692 }
2693
Jackie Liua1041c22019-09-18 17:25:52 +08002694out:
Jens Axboe2b188cc2019-01-07 10:46:33 -07002695 s.has_user = true;
Jens Axboedef596e2019-01-09 08:59:42 -07002696 s.needs_lock = false;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002697 s.needs_fixed_file = false;
Jens Axboe5c8b0b52019-04-30 10:16:07 -06002698 submit++;
Jens Axboec57666682019-09-09 16:19:45 -06002699
2700 /*
2701 * The caller will block for events after submit, submit the
2702 * last IO non-blocking. This is either the only IO it's
2703 * submitting, or it already submitted the previous ones. This
2704 * improves performance by avoiding an async punt that we don't
2705 * need to do.
2706 */
2707 if (block_for_last && submit == to_submit)
2708 force_nonblock = false;
2709
2710 io_submit_sqe(ctx, &s, statep, &link, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002711 }
2712 io_commit_sqring(ctx);
2713
Jens Axboe9e645e112019-05-10 16:07:28 -06002714 if (link)
Jens Axboec57666682019-09-09 16:19:45 -06002715 io_queue_link_head(ctx, link, &link->submit, shadow_req,
2716 block_for_last);
Jens Axboe9a56a232019-01-09 09:06:50 -07002717 if (statep)
2718 io_submit_state_end(statep);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002719
Jens Axboe5c8b0b52019-04-30 10:16:07 -06002720 return submit;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002721}
2722
Jens Axboe2b188cc2019-01-07 10:46:33 -07002723/*
2724 * Wait until events become available, if we don't already have some. The
2725 * application must reap them itself, as they reside on the shared cq ring.
2726 */
2727static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
2728 const sigset_t __user *sig, size_t sigsz)
2729{
Hristo Venev75b28af2019-08-26 17:23:46 +00002730 struct io_rings *rings = ctx->rings;
Jens Axboe5262f562019-09-17 12:26:57 -06002731 unsigned nr_timeouts;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002732 int ret;
2733
Hristo Venev75b28af2019-08-26 17:23:46 +00002734 if (io_cqring_events(rings) >= min_events)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002735 return 0;
2736
2737 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01002738#ifdef CONFIG_COMPAT
2739 if (in_compat_syscall())
2740 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07002741 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01002742 else
2743#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07002744 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01002745
Jens Axboe2b188cc2019-01-07 10:46:33 -07002746 if (ret)
2747 return ret;
2748 }
2749
Jens Axboe5262f562019-09-17 12:26:57 -06002750 nr_timeouts = atomic_read(&ctx->cq_timeouts);
2751 /*
2752 * Return if we have enough events, or if a timeout occured since
2753 * we started waiting. For timeouts, we always want to return to
2754 * userspace.
2755 */
2756 ret = wait_event_interruptible(ctx->wait,
2757 io_cqring_events(rings) >= min_events ||
2758 atomic_read(&ctx->cq_timeouts) != nr_timeouts);
Oleg Nesterovb7724342019-07-16 16:29:53 -07002759 restore_saved_sigmask_unless(ret == -ERESTARTSYS);
Oleg Nesterov97abc882019-06-28 12:06:50 -07002760 if (ret == -ERESTARTSYS)
2761 ret = -EINTR;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002762
Hristo Venev75b28af2019-08-26 17:23:46 +00002763 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002764}
2765
Jens Axboe6b063142019-01-10 22:13:58 -07002766static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
2767{
2768#if defined(CONFIG_UNIX)
2769 if (ctx->ring_sock) {
2770 struct sock *sock = ctx->ring_sock->sk;
2771 struct sk_buff *skb;
2772
2773 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
2774 kfree_skb(skb);
2775 }
2776#else
2777 int i;
2778
2779 for (i = 0; i < ctx->nr_user_files; i++)
2780 fput(ctx->user_files[i]);
2781#endif
2782}
2783
2784static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
2785{
2786 if (!ctx->user_files)
2787 return -ENXIO;
2788
2789 __io_sqe_files_unregister(ctx);
2790 kfree(ctx->user_files);
2791 ctx->user_files = NULL;
2792 ctx->nr_user_files = 0;
2793 return 0;
2794}
2795
Jens Axboe6c271ce2019-01-10 11:22:30 -07002796static void io_sq_thread_stop(struct io_ring_ctx *ctx)
2797{
2798 if (ctx->sqo_thread) {
Jackie Liua4c0b3d2019-07-08 13:41:12 +08002799 wait_for_completion(&ctx->sqo_thread_started);
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002800 /*
2801 * The park is a bit of a work-around, without it we get
2802 * warning spews on shutdown with SQPOLL set and affinity
2803 * set to a single CPU.
2804 */
Jens Axboe06058632019-04-13 09:26:03 -06002805 kthread_park(ctx->sqo_thread);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002806 kthread_stop(ctx->sqo_thread);
2807 ctx->sqo_thread = NULL;
2808 }
2809}
2810
Jens Axboe6b063142019-01-10 22:13:58 -07002811static void io_finish_async(struct io_ring_ctx *ctx)
2812{
Jens Axboe54a91f32019-09-10 09:15:04 -06002813 int i;
2814
Jens Axboe6c271ce2019-01-10 11:22:30 -07002815 io_sq_thread_stop(ctx);
2816
Jens Axboe54a91f32019-09-10 09:15:04 -06002817 for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++) {
2818 if (ctx->sqo_wq[i]) {
2819 destroy_workqueue(ctx->sqo_wq[i]);
2820 ctx->sqo_wq[i] = NULL;
2821 }
Jens Axboe6b063142019-01-10 22:13:58 -07002822 }
2823}
2824
2825#if defined(CONFIG_UNIX)
2826static void io_destruct_skb(struct sk_buff *skb)
2827{
2828 struct io_ring_ctx *ctx = skb->sk->sk_user_data;
2829
2830 io_finish_async(ctx);
2831 unix_destruct_scm(skb);
2832}
2833
2834/*
2835 * Ensure the UNIX gc is aware of our file set, so we are certain that
2836 * the io_uring can be safely unregistered on process exit, even if we have
2837 * loops in the file referencing.
2838 */
2839static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
2840{
2841 struct sock *sk = ctx->ring_sock->sk;
2842 struct scm_fp_list *fpl;
2843 struct sk_buff *skb;
2844 int i;
2845
2846 if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
2847 unsigned long inflight = ctx->user->unix_inflight + nr;
2848
2849 if (inflight > task_rlimit(current, RLIMIT_NOFILE))
2850 return -EMFILE;
2851 }
2852
2853 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
2854 if (!fpl)
2855 return -ENOMEM;
2856
2857 skb = alloc_skb(0, GFP_KERNEL);
2858 if (!skb) {
2859 kfree(fpl);
2860 return -ENOMEM;
2861 }
2862
2863 skb->sk = sk;
2864 skb->destructor = io_destruct_skb;
2865
2866 fpl->user = get_uid(ctx->user);
2867 for (i = 0; i < nr; i++) {
2868 fpl->fp[i] = get_file(ctx->user_files[i + offset]);
2869 unix_inflight(fpl->user, fpl->fp[i]);
2870 }
2871
2872 fpl->max = fpl->count = nr;
2873 UNIXCB(skb).fp = fpl;
2874 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
2875 skb_queue_head(&sk->sk_receive_queue, skb);
2876
2877 for (i = 0; i < nr; i++)
2878 fput(fpl->fp[i]);
2879
2880 return 0;
2881}
2882
2883/*
2884 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
2885 * causes regular reference counting to break down. We rely on the UNIX
2886 * garbage collection to take care of this problem for us.
2887 */
2888static int io_sqe_files_scm(struct io_ring_ctx *ctx)
2889{
2890 unsigned left, total;
2891 int ret = 0;
2892
2893 total = 0;
2894 left = ctx->nr_user_files;
2895 while (left) {
2896 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07002897
2898 ret = __io_sqe_files_scm(ctx, this_files, total);
2899 if (ret)
2900 break;
2901 left -= this_files;
2902 total += this_files;
2903 }
2904
2905 if (!ret)
2906 return 0;
2907
2908 while (total < ctx->nr_user_files) {
2909 fput(ctx->user_files[total]);
2910 total++;
2911 }
2912
2913 return ret;
2914}
2915#else
2916static int io_sqe_files_scm(struct io_ring_ctx *ctx)
2917{
2918 return 0;
2919}
2920#endif
2921
2922static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
2923 unsigned nr_args)
2924{
2925 __s32 __user *fds = (__s32 __user *) arg;
2926 int fd, ret = 0;
2927 unsigned i;
2928
2929 if (ctx->user_files)
2930 return -EBUSY;
2931 if (!nr_args)
2932 return -EINVAL;
2933 if (nr_args > IORING_MAX_FIXED_FILES)
2934 return -EMFILE;
2935
2936 ctx->user_files = kcalloc(nr_args, sizeof(struct file *), GFP_KERNEL);
2937 if (!ctx->user_files)
2938 return -ENOMEM;
2939
2940 for (i = 0; i < nr_args; i++) {
2941 ret = -EFAULT;
2942 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
2943 break;
2944
2945 ctx->user_files[i] = fget(fd);
2946
2947 ret = -EBADF;
2948 if (!ctx->user_files[i])
2949 break;
2950 /*
2951 * Don't allow io_uring instances to be registered. If UNIX
2952 * isn't enabled, then this causes a reference cycle and this
2953 * instance can never get freed. If UNIX is enabled we'll
2954 * handle it just fine, but there's still no point in allowing
2955 * a ring fd as it doesn't support regular read/write anyway.
2956 */
2957 if (ctx->user_files[i]->f_op == &io_uring_fops) {
2958 fput(ctx->user_files[i]);
2959 break;
2960 }
2961 ctx->nr_user_files++;
2962 ret = 0;
2963 }
2964
2965 if (ret) {
2966 for (i = 0; i < ctx->nr_user_files; i++)
2967 fput(ctx->user_files[i]);
2968
2969 kfree(ctx->user_files);
Jens Axboe25adf502019-04-03 09:52:40 -06002970 ctx->user_files = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07002971 ctx->nr_user_files = 0;
2972 return ret;
2973 }
2974
2975 ret = io_sqe_files_scm(ctx);
2976 if (ret)
2977 io_sqe_files_unregister(ctx);
2978
2979 return ret;
2980}
2981
Jens Axboe6c271ce2019-01-10 11:22:30 -07002982static int io_sq_offload_start(struct io_ring_ctx *ctx,
2983 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002984{
2985 int ret;
2986
Jens Axboe6c271ce2019-01-10 11:22:30 -07002987 init_waitqueue_head(&ctx->sqo_wait);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002988 mmgrab(current->mm);
2989 ctx->sqo_mm = current->mm;
2990
Jens Axboe6c271ce2019-01-10 11:22:30 -07002991 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe3ec482d2019-04-08 10:51:01 -06002992 ret = -EPERM;
2993 if (!capable(CAP_SYS_ADMIN))
2994 goto err;
2995
Jens Axboe917257d2019-04-13 09:28:55 -06002996 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
2997 if (!ctx->sq_thread_idle)
2998 ctx->sq_thread_idle = HZ;
2999
Jens Axboe6c271ce2019-01-10 11:22:30 -07003000 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06003001 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07003002
Jens Axboe917257d2019-04-13 09:28:55 -06003003 ret = -EINVAL;
Jens Axboe44a9bd12019-05-14 20:00:30 -06003004 if (cpu >= nr_cpu_ids)
3005 goto err;
Shenghui Wang7889f442019-05-07 16:03:19 +08003006 if (!cpu_online(cpu))
Jens Axboe917257d2019-04-13 09:28:55 -06003007 goto err;
3008
Jens Axboe6c271ce2019-01-10 11:22:30 -07003009 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
3010 ctx, cpu,
3011 "io_uring-sq");
3012 } else {
3013 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
3014 "io_uring-sq");
3015 }
3016 if (IS_ERR(ctx->sqo_thread)) {
3017 ret = PTR_ERR(ctx->sqo_thread);
3018 ctx->sqo_thread = NULL;
3019 goto err;
3020 }
3021 wake_up_process(ctx->sqo_thread);
3022 } else if (p->flags & IORING_SETUP_SQ_AFF) {
3023 /* Can't have SQ_AFF without SQPOLL */
3024 ret = -EINVAL;
3025 goto err;
3026 }
3027
Jens Axboe2b188cc2019-01-07 10:46:33 -07003028 /* Do QD, or 2 * CPUS, whatever is smallest */
Jens Axboe54a91f32019-09-10 09:15:04 -06003029 ctx->sqo_wq[0] = alloc_workqueue("io_ring-wq",
3030 WQ_UNBOUND | WQ_FREEZABLE,
Jens Axboe2b188cc2019-01-07 10:46:33 -07003031 min(ctx->sq_entries - 1, 2 * num_online_cpus()));
Jens Axboe54a91f32019-09-10 09:15:04 -06003032 if (!ctx->sqo_wq[0]) {
3033 ret = -ENOMEM;
3034 goto err;
3035 }
3036
3037 /*
3038 * This is for buffered writes, where we want to limit the parallelism
3039 * due to file locking in file systems. As "normal" buffered writes
3040 * should parellelize on writeout quite nicely, limit us to having 2
3041 * pending. This avoids massive contention on the inode when doing
3042 * buffered async writes.
3043 */
3044 ctx->sqo_wq[1] = alloc_workqueue("io_ring-write-wq",
3045 WQ_UNBOUND | WQ_FREEZABLE, 2);
3046 if (!ctx->sqo_wq[1]) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07003047 ret = -ENOMEM;
3048 goto err;
3049 }
3050
3051 return 0;
3052err:
Jens Axboe54a91f32019-09-10 09:15:04 -06003053 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003054 mmdrop(ctx->sqo_mm);
3055 ctx->sqo_mm = NULL;
3056 return ret;
3057}
3058
3059static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
3060{
3061 atomic_long_sub(nr_pages, &user->locked_vm);
3062}
3063
3064static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
3065{
3066 unsigned long page_limit, cur_pages, new_pages;
3067
3068 /* Don't allow more pages than we can safely lock */
3069 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
3070
3071 do {
3072 cur_pages = atomic_long_read(&user->locked_vm);
3073 new_pages = cur_pages + nr_pages;
3074 if (new_pages > page_limit)
3075 return -ENOMEM;
3076 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
3077 new_pages) != cur_pages);
3078
3079 return 0;
3080}
3081
3082static void io_mem_free(void *ptr)
3083{
Mark Rutland52e04ef2019-04-30 17:30:21 +01003084 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003085
Mark Rutland52e04ef2019-04-30 17:30:21 +01003086 if (!ptr)
3087 return;
3088
3089 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003090 if (put_page_testzero(page))
3091 free_compound_page(page);
3092}
3093
3094static void *io_mem_alloc(size_t size)
3095{
3096 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
3097 __GFP_NORETRY;
3098
3099 return (void *) __get_free_pages(gfp_flags, get_order(size));
3100}
3101
Hristo Venev75b28af2019-08-26 17:23:46 +00003102static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
3103 size_t *sq_offset)
3104{
3105 struct io_rings *rings;
3106 size_t off, sq_array_size;
3107
3108 off = struct_size(rings, cqes, cq_entries);
3109 if (off == SIZE_MAX)
3110 return SIZE_MAX;
3111
3112#ifdef CONFIG_SMP
3113 off = ALIGN(off, SMP_CACHE_BYTES);
3114 if (off == 0)
3115 return SIZE_MAX;
3116#endif
3117
3118 sq_array_size = array_size(sizeof(u32), sq_entries);
3119 if (sq_array_size == SIZE_MAX)
3120 return SIZE_MAX;
3121
3122 if (check_add_overflow(off, sq_array_size, &off))
3123 return SIZE_MAX;
3124
3125 if (sq_offset)
3126 *sq_offset = off;
3127
3128 return off;
3129}
3130
Jens Axboe2b188cc2019-01-07 10:46:33 -07003131static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
3132{
Hristo Venev75b28af2019-08-26 17:23:46 +00003133 size_t pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003134
Hristo Venev75b28af2019-08-26 17:23:46 +00003135 pages = (size_t)1 << get_order(
3136 rings_size(sq_entries, cq_entries, NULL));
3137 pages += (size_t)1 << get_order(
3138 array_size(sizeof(struct io_uring_sqe), sq_entries));
Jens Axboe2b188cc2019-01-07 10:46:33 -07003139
Hristo Venev75b28af2019-08-26 17:23:46 +00003140 return pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003141}
3142
Jens Axboeedafcce2019-01-09 09:16:05 -07003143static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
3144{
3145 int i, j;
3146
3147 if (!ctx->user_bufs)
3148 return -ENXIO;
3149
3150 for (i = 0; i < ctx->nr_user_bufs; i++) {
3151 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3152
3153 for (j = 0; j < imu->nr_bvecs; j++)
John Hubbard27c4d3a2019-08-04 19:32:06 -07003154 put_user_page(imu->bvec[j].bv_page);
Jens Axboeedafcce2019-01-09 09:16:05 -07003155
3156 if (ctx->account_mem)
3157 io_unaccount_mem(ctx->user, imu->nr_bvecs);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003158 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07003159 imu->nr_bvecs = 0;
3160 }
3161
3162 kfree(ctx->user_bufs);
3163 ctx->user_bufs = NULL;
3164 ctx->nr_user_bufs = 0;
3165 return 0;
3166}
3167
3168static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
3169 void __user *arg, unsigned index)
3170{
3171 struct iovec __user *src;
3172
3173#ifdef CONFIG_COMPAT
3174 if (ctx->compat) {
3175 struct compat_iovec __user *ciovs;
3176 struct compat_iovec ciov;
3177
3178 ciovs = (struct compat_iovec __user *) arg;
3179 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
3180 return -EFAULT;
3181
3182 dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
3183 dst->iov_len = ciov.iov_len;
3184 return 0;
3185 }
3186#endif
3187 src = (struct iovec __user *) arg;
3188 if (copy_from_user(dst, &src[index], sizeof(*dst)))
3189 return -EFAULT;
3190 return 0;
3191}
3192
3193static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
3194 unsigned nr_args)
3195{
3196 struct vm_area_struct **vmas = NULL;
3197 struct page **pages = NULL;
3198 int i, j, got_pages = 0;
3199 int ret = -EINVAL;
3200
3201 if (ctx->user_bufs)
3202 return -EBUSY;
3203 if (!nr_args || nr_args > UIO_MAXIOV)
3204 return -EINVAL;
3205
3206 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
3207 GFP_KERNEL);
3208 if (!ctx->user_bufs)
3209 return -ENOMEM;
3210
3211 for (i = 0; i < nr_args; i++) {
3212 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3213 unsigned long off, start, end, ubuf;
3214 int pret, nr_pages;
3215 struct iovec iov;
3216 size_t size;
3217
3218 ret = io_copy_iov(ctx, &iov, arg, i);
3219 if (ret)
Pavel Begunkova2786822019-05-26 12:35:47 +03003220 goto err;
Jens Axboeedafcce2019-01-09 09:16:05 -07003221
3222 /*
3223 * Don't impose further limits on the size and buffer
3224 * constraints here, we'll -EINVAL later when IO is
3225 * submitted if they are wrong.
3226 */
3227 ret = -EFAULT;
3228 if (!iov.iov_base || !iov.iov_len)
3229 goto err;
3230
3231 /* arbitrary limit, but we need something */
3232 if (iov.iov_len > SZ_1G)
3233 goto err;
3234
3235 ubuf = (unsigned long) iov.iov_base;
3236 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3237 start = ubuf >> PAGE_SHIFT;
3238 nr_pages = end - start;
3239
3240 if (ctx->account_mem) {
3241 ret = io_account_mem(ctx->user, nr_pages);
3242 if (ret)
3243 goto err;
3244 }
3245
3246 ret = 0;
3247 if (!pages || nr_pages > got_pages) {
3248 kfree(vmas);
3249 kfree(pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003250 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
Jens Axboeedafcce2019-01-09 09:16:05 -07003251 GFP_KERNEL);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003252 vmas = kvmalloc_array(nr_pages,
Jens Axboeedafcce2019-01-09 09:16:05 -07003253 sizeof(struct vm_area_struct *),
3254 GFP_KERNEL);
3255 if (!pages || !vmas) {
3256 ret = -ENOMEM;
3257 if (ctx->account_mem)
3258 io_unaccount_mem(ctx->user, nr_pages);
3259 goto err;
3260 }
3261 got_pages = nr_pages;
3262 }
3263
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003264 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
Jens Axboeedafcce2019-01-09 09:16:05 -07003265 GFP_KERNEL);
3266 ret = -ENOMEM;
3267 if (!imu->bvec) {
3268 if (ctx->account_mem)
3269 io_unaccount_mem(ctx->user, nr_pages);
3270 goto err;
3271 }
3272
3273 ret = 0;
3274 down_read(&current->mm->mmap_sem);
Ira Weiny932f4a62019-05-13 17:17:03 -07003275 pret = get_user_pages(ubuf, nr_pages,
3276 FOLL_WRITE | FOLL_LONGTERM,
3277 pages, vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07003278 if (pret == nr_pages) {
3279 /* don't support file backed memory */
3280 for (j = 0; j < nr_pages; j++) {
3281 struct vm_area_struct *vma = vmas[j];
3282
3283 if (vma->vm_file &&
3284 !is_file_hugepages(vma->vm_file)) {
3285 ret = -EOPNOTSUPP;
3286 break;
3287 }
3288 }
3289 } else {
3290 ret = pret < 0 ? pret : -EFAULT;
3291 }
3292 up_read(&current->mm->mmap_sem);
3293 if (ret) {
3294 /*
3295 * if we did partial map, or found file backed vmas,
3296 * release any pages we did get
3297 */
John Hubbard27c4d3a2019-08-04 19:32:06 -07003298 if (pret > 0)
3299 put_user_pages(pages, pret);
Jens Axboeedafcce2019-01-09 09:16:05 -07003300 if (ctx->account_mem)
3301 io_unaccount_mem(ctx->user, nr_pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003302 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07003303 goto err;
3304 }
3305
3306 off = ubuf & ~PAGE_MASK;
3307 size = iov.iov_len;
3308 for (j = 0; j < nr_pages; j++) {
3309 size_t vec_len;
3310
3311 vec_len = min_t(size_t, size, PAGE_SIZE - off);
3312 imu->bvec[j].bv_page = pages[j];
3313 imu->bvec[j].bv_len = vec_len;
3314 imu->bvec[j].bv_offset = off;
3315 off = 0;
3316 size -= vec_len;
3317 }
3318 /* store original address for later verification */
3319 imu->ubuf = ubuf;
3320 imu->len = iov.iov_len;
3321 imu->nr_bvecs = nr_pages;
3322
3323 ctx->nr_user_bufs++;
3324 }
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003325 kvfree(pages);
3326 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07003327 return 0;
3328err:
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003329 kvfree(pages);
3330 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07003331 io_sqe_buffer_unregister(ctx);
3332 return ret;
3333}
3334
Jens Axboe9b402842019-04-11 11:45:41 -06003335static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
3336{
3337 __s32 __user *fds = arg;
3338 int fd;
3339
3340 if (ctx->cq_ev_fd)
3341 return -EBUSY;
3342
3343 if (copy_from_user(&fd, fds, sizeof(*fds)))
3344 return -EFAULT;
3345
3346 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
3347 if (IS_ERR(ctx->cq_ev_fd)) {
3348 int ret = PTR_ERR(ctx->cq_ev_fd);
3349 ctx->cq_ev_fd = NULL;
3350 return ret;
3351 }
3352
3353 return 0;
3354}
3355
3356static int io_eventfd_unregister(struct io_ring_ctx *ctx)
3357{
3358 if (ctx->cq_ev_fd) {
3359 eventfd_ctx_put(ctx->cq_ev_fd);
3360 ctx->cq_ev_fd = NULL;
3361 return 0;
3362 }
3363
3364 return -ENXIO;
3365}
3366
Jens Axboe2b188cc2019-01-07 10:46:33 -07003367static void io_ring_ctx_free(struct io_ring_ctx *ctx)
3368{
Jens Axboe6b063142019-01-10 22:13:58 -07003369 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003370 if (ctx->sqo_mm)
3371 mmdrop(ctx->sqo_mm);
Jens Axboedef596e2019-01-09 08:59:42 -07003372
3373 io_iopoll_reap_events(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -07003374 io_sqe_buffer_unregister(ctx);
Jens Axboe6b063142019-01-10 22:13:58 -07003375 io_sqe_files_unregister(ctx);
Jens Axboe9b402842019-04-11 11:45:41 -06003376 io_eventfd_unregister(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -07003377
Jens Axboe2b188cc2019-01-07 10:46:33 -07003378#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07003379 if (ctx->ring_sock) {
3380 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07003381 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07003382 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003383#endif
3384
Hristo Venev75b28af2019-08-26 17:23:46 +00003385 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003386 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003387
3388 percpu_ref_exit(&ctx->refs);
3389 if (ctx->account_mem)
3390 io_unaccount_mem(ctx->user,
3391 ring_pages(ctx->sq_entries, ctx->cq_entries));
3392 free_uid(ctx->user);
3393 kfree(ctx);
3394}
3395
3396static __poll_t io_uring_poll(struct file *file, poll_table *wait)
3397{
3398 struct io_ring_ctx *ctx = file->private_data;
3399 __poll_t mask = 0;
3400
3401 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02003402 /*
3403 * synchronizes with barrier from wq_has_sleeper call in
3404 * io_commit_cqring
3405 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07003406 smp_rmb();
Hristo Venev75b28af2019-08-26 17:23:46 +00003407 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
3408 ctx->rings->sq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003409 mask |= EPOLLOUT | EPOLLWRNORM;
Hristo Venev75b28af2019-08-26 17:23:46 +00003410 if (READ_ONCE(ctx->rings->sq.head) != ctx->cached_cq_tail)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003411 mask |= EPOLLIN | EPOLLRDNORM;
3412
3413 return mask;
3414}
3415
3416static int io_uring_fasync(int fd, struct file *file, int on)
3417{
3418 struct io_ring_ctx *ctx = file->private_data;
3419
3420 return fasync_helper(fd, file, on, &ctx->cq_fasync);
3421}
3422
3423static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
3424{
3425 mutex_lock(&ctx->uring_lock);
3426 percpu_ref_kill(&ctx->refs);
3427 mutex_unlock(&ctx->uring_lock);
3428
Jens Axboe5262f562019-09-17 12:26:57 -06003429 io_kill_timeouts(ctx);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003430 io_poll_remove_all(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -07003431 io_iopoll_reap_events(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003432 wait_for_completion(&ctx->ctx_done);
3433 io_ring_ctx_free(ctx);
3434}
3435
3436static int io_uring_release(struct inode *inode, struct file *file)
3437{
3438 struct io_ring_ctx *ctx = file->private_data;
3439
3440 file->private_data = NULL;
3441 io_ring_ctx_wait_and_kill(ctx);
3442 return 0;
3443}
3444
3445static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3446{
3447 loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
3448 unsigned long sz = vma->vm_end - vma->vm_start;
3449 struct io_ring_ctx *ctx = file->private_data;
3450 unsigned long pfn;
3451 struct page *page;
3452 void *ptr;
3453
3454 switch (offset) {
3455 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00003456 case IORING_OFF_CQ_RING:
3457 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003458 break;
3459 case IORING_OFF_SQES:
3460 ptr = ctx->sq_sqes;
3461 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003462 default:
3463 return -EINVAL;
3464 }
3465
3466 page = virt_to_head_page(ptr);
3467 if (sz > (PAGE_SIZE << compound_order(page)))
3468 return -EINVAL;
3469
3470 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
3471 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
3472}
3473
3474SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3475 u32, min_complete, u32, flags, const sigset_t __user *, sig,
3476 size_t, sigsz)
3477{
3478 struct io_ring_ctx *ctx;
3479 long ret = -EBADF;
3480 int submitted = 0;
3481 struct fd f;
3482
Jens Axboe6c271ce2019-01-10 11:22:30 -07003483 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
Jens Axboe2b188cc2019-01-07 10:46:33 -07003484 return -EINVAL;
3485
3486 f = fdget(fd);
3487 if (!f.file)
3488 return -EBADF;
3489
3490 ret = -EOPNOTSUPP;
3491 if (f.file->f_op != &io_uring_fops)
3492 goto out_fput;
3493
3494 ret = -ENXIO;
3495 ctx = f.file->private_data;
3496 if (!percpu_ref_tryget(&ctx->refs))
3497 goto out_fput;
3498
Jens Axboe6c271ce2019-01-10 11:22:30 -07003499 /*
3500 * For SQ polling, the thread will do all submissions and completions.
3501 * Just return the requested submit count, and wake the thread if
3502 * we were asked to.
3503 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06003504 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07003505 if (ctx->flags & IORING_SETUP_SQPOLL) {
3506 if (flags & IORING_ENTER_SQ_WAKEUP)
3507 wake_up(&ctx->sqo_wait);
3508 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06003509 } else if (to_submit) {
Jens Axboec57666682019-09-09 16:19:45 -06003510 bool block_for_last = false;
3511
Jens Axboe2b188cc2019-01-07 10:46:33 -07003512 to_submit = min(to_submit, ctx->sq_entries);
3513
Jens Axboec57666682019-09-09 16:19:45 -06003514 /*
3515 * Allow last submission to block in a series, IFF the caller
3516 * asked to wait for events and we don't currently have
3517 * enough. This potentially avoids an async punt.
3518 */
3519 if (to_submit == min_complete &&
3520 io_cqring_events(ctx->rings) < min_complete)
3521 block_for_last = true;
3522
Jens Axboe2b188cc2019-01-07 10:46:33 -07003523 mutex_lock(&ctx->uring_lock);
Jens Axboec57666682019-09-09 16:19:45 -06003524 submitted = io_ring_submit(ctx, to_submit, block_for_last);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003525 mutex_unlock(&ctx->uring_lock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003526 }
3527 if (flags & IORING_ENTER_GETEVENTS) {
Jens Axboedef596e2019-01-09 08:59:42 -07003528 unsigned nr_events = 0;
3529
Jens Axboe2b188cc2019-01-07 10:46:33 -07003530 min_complete = min(min_complete, ctx->cq_entries);
3531
Jens Axboedef596e2019-01-09 08:59:42 -07003532 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07003533 ret = io_iopoll_check(ctx, &nr_events, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07003534 } else {
3535 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
3536 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003537 }
3538
Jens Axboe2b188cc2019-01-07 10:46:33 -07003539 io_ring_drop_ctx_refs(ctx, 1);
3540out_fput:
3541 fdput(f);
3542 return submitted ? submitted : ret;
3543}
3544
3545static const struct file_operations io_uring_fops = {
3546 .release = io_uring_release,
3547 .mmap = io_uring_mmap,
3548 .poll = io_uring_poll,
3549 .fasync = io_uring_fasync,
3550};
3551
3552static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3553 struct io_uring_params *p)
3554{
Hristo Venev75b28af2019-08-26 17:23:46 +00003555 struct io_rings *rings;
3556 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003557
Hristo Venev75b28af2019-08-26 17:23:46 +00003558 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
3559 if (size == SIZE_MAX)
3560 return -EOVERFLOW;
3561
3562 rings = io_mem_alloc(size);
3563 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003564 return -ENOMEM;
3565
Hristo Venev75b28af2019-08-26 17:23:46 +00003566 ctx->rings = rings;
3567 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
3568 rings->sq_ring_mask = p->sq_entries - 1;
3569 rings->cq_ring_mask = p->cq_entries - 1;
3570 rings->sq_ring_entries = p->sq_entries;
3571 rings->cq_ring_entries = p->cq_entries;
3572 ctx->sq_mask = rings->sq_ring_mask;
3573 ctx->cq_mask = rings->cq_ring_mask;
3574 ctx->sq_entries = rings->sq_ring_entries;
3575 ctx->cq_entries = rings->cq_ring_entries;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003576
3577 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
3578 if (size == SIZE_MAX)
3579 return -EOVERFLOW;
3580
3581 ctx->sq_sqes = io_mem_alloc(size);
Mark Rutland52e04ef2019-04-30 17:30:21 +01003582 if (!ctx->sq_sqes)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003583 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003584
Jens Axboe2b188cc2019-01-07 10:46:33 -07003585 return 0;
3586}
3587
3588/*
3589 * Allocate an anonymous fd, this is what constitutes the application
3590 * visible backing of an io_uring instance. The application mmaps this
3591 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
3592 * we have to tie this fd to a socket for file garbage collection purposes.
3593 */
3594static int io_uring_get_fd(struct io_ring_ctx *ctx)
3595{
3596 struct file *file;
3597 int ret;
3598
3599#if defined(CONFIG_UNIX)
3600 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
3601 &ctx->ring_sock);
3602 if (ret)
3603 return ret;
3604#endif
3605
3606 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
3607 if (ret < 0)
3608 goto err;
3609
3610 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
3611 O_RDWR | O_CLOEXEC);
3612 if (IS_ERR(file)) {
3613 put_unused_fd(ret);
3614 ret = PTR_ERR(file);
3615 goto err;
3616 }
3617
3618#if defined(CONFIG_UNIX)
3619 ctx->ring_sock->file = file;
Jens Axboe6b063142019-01-10 22:13:58 -07003620 ctx->ring_sock->sk->sk_user_data = ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003621#endif
3622 fd_install(ret, file);
3623 return ret;
3624err:
3625#if defined(CONFIG_UNIX)
3626 sock_release(ctx->ring_sock);
3627 ctx->ring_sock = NULL;
3628#endif
3629 return ret;
3630}
3631
3632static int io_uring_create(unsigned entries, struct io_uring_params *p)
3633{
3634 struct user_struct *user = NULL;
3635 struct io_ring_ctx *ctx;
3636 bool account_mem;
3637 int ret;
3638
3639 if (!entries || entries > IORING_MAX_ENTRIES)
3640 return -EINVAL;
3641
3642 /*
3643 * Use twice as many entries for the CQ ring. It's possible for the
3644 * application to drive a higher depth than the size of the SQ ring,
3645 * since the sqes are only used at submission time. This allows for
3646 * some flexibility in overcommitting a bit.
3647 */
3648 p->sq_entries = roundup_pow_of_two(entries);
3649 p->cq_entries = 2 * p->sq_entries;
3650
3651 user = get_uid(current_user());
3652 account_mem = !capable(CAP_IPC_LOCK);
3653
3654 if (account_mem) {
3655 ret = io_account_mem(user,
3656 ring_pages(p->sq_entries, p->cq_entries));
3657 if (ret) {
3658 free_uid(user);
3659 return ret;
3660 }
3661 }
3662
3663 ctx = io_ring_ctx_alloc(p);
3664 if (!ctx) {
3665 if (account_mem)
3666 io_unaccount_mem(user, ring_pages(p->sq_entries,
3667 p->cq_entries));
3668 free_uid(user);
3669 return -ENOMEM;
3670 }
3671 ctx->compat = in_compat_syscall();
3672 ctx->account_mem = account_mem;
3673 ctx->user = user;
3674
3675 ret = io_allocate_scq_urings(ctx, p);
3676 if (ret)
3677 goto err;
3678
Jens Axboe6c271ce2019-01-10 11:22:30 -07003679 ret = io_sq_offload_start(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003680 if (ret)
3681 goto err;
3682
3683 ret = io_uring_get_fd(ctx);
3684 if (ret < 0)
3685 goto err;
3686
3687 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00003688 p->sq_off.head = offsetof(struct io_rings, sq.head);
3689 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
3690 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
3691 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
3692 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
3693 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
3694 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003695
3696 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00003697 p->cq_off.head = offsetof(struct io_rings, cq.head);
3698 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
3699 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
3700 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
3701 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
3702 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Jens Axboeac90f242019-09-06 10:26:21 -06003703
3704 p->features = IORING_FEAT_SINGLE_MMAP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003705 return ret;
3706err:
3707 io_ring_ctx_wait_and_kill(ctx);
3708 return ret;
3709}
3710
3711/*
3712 * Sets up an aio uring context, and returns the fd. Applications asks for a
3713 * ring size, we return the actual sq/cq ring sizes (among other things) in the
3714 * params structure passed in.
3715 */
3716static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
3717{
3718 struct io_uring_params p;
3719 long ret;
3720 int i;
3721
3722 if (copy_from_user(&p, params, sizeof(p)))
3723 return -EFAULT;
3724 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
3725 if (p.resv[i])
3726 return -EINVAL;
3727 }
3728
Jens Axboe6c271ce2019-01-10 11:22:30 -07003729 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
3730 IORING_SETUP_SQ_AFF))
Jens Axboe2b188cc2019-01-07 10:46:33 -07003731 return -EINVAL;
3732
3733 ret = io_uring_create(entries, &p);
3734 if (ret < 0)
3735 return ret;
3736
3737 if (copy_to_user(params, &p, sizeof(p)))
3738 return -EFAULT;
3739
3740 return ret;
3741}
3742
3743SYSCALL_DEFINE2(io_uring_setup, u32, entries,
3744 struct io_uring_params __user *, params)
3745{
3746 return io_uring_setup(entries, params);
3747}
3748
Jens Axboeedafcce2019-01-09 09:16:05 -07003749static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
3750 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -06003751 __releases(ctx->uring_lock)
3752 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -07003753{
3754 int ret;
3755
Jens Axboe35fa71a2019-04-22 10:23:23 -06003756 /*
3757 * We're inside the ring mutex, if the ref is already dying, then
3758 * someone else killed the ctx or is already going through
3759 * io_uring_register().
3760 */
3761 if (percpu_ref_is_dying(&ctx->refs))
3762 return -ENXIO;
3763
Jens Axboeedafcce2019-01-09 09:16:05 -07003764 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -06003765
3766 /*
3767 * Drop uring mutex before waiting for references to exit. If another
3768 * thread is currently inside io_uring_enter() it might need to grab
3769 * the uring_lock to make progress. If we hold it here across the drain
3770 * wait, then we can deadlock. It's safe to drop the mutex here, since
3771 * no new references will come in after we've killed the percpu ref.
3772 */
3773 mutex_unlock(&ctx->uring_lock);
Jens Axboeedafcce2019-01-09 09:16:05 -07003774 wait_for_completion(&ctx->ctx_done);
Jens Axboeb19062a2019-04-15 10:49:38 -06003775 mutex_lock(&ctx->uring_lock);
Jens Axboeedafcce2019-01-09 09:16:05 -07003776
3777 switch (opcode) {
3778 case IORING_REGISTER_BUFFERS:
3779 ret = io_sqe_buffer_register(ctx, arg, nr_args);
3780 break;
3781 case IORING_UNREGISTER_BUFFERS:
3782 ret = -EINVAL;
3783 if (arg || nr_args)
3784 break;
3785 ret = io_sqe_buffer_unregister(ctx);
3786 break;
Jens Axboe6b063142019-01-10 22:13:58 -07003787 case IORING_REGISTER_FILES:
3788 ret = io_sqe_files_register(ctx, arg, nr_args);
3789 break;
3790 case IORING_UNREGISTER_FILES:
3791 ret = -EINVAL;
3792 if (arg || nr_args)
3793 break;
3794 ret = io_sqe_files_unregister(ctx);
3795 break;
Jens Axboe9b402842019-04-11 11:45:41 -06003796 case IORING_REGISTER_EVENTFD:
3797 ret = -EINVAL;
3798 if (nr_args != 1)
3799 break;
3800 ret = io_eventfd_register(ctx, arg);
3801 break;
3802 case IORING_UNREGISTER_EVENTFD:
3803 ret = -EINVAL;
3804 if (arg || nr_args)
3805 break;
3806 ret = io_eventfd_unregister(ctx);
3807 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07003808 default:
3809 ret = -EINVAL;
3810 break;
3811 }
3812
3813 /* bring the ctx back to life */
3814 reinit_completion(&ctx->ctx_done);
3815 percpu_ref_reinit(&ctx->refs);
3816 return ret;
3817}
3818
3819SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
3820 void __user *, arg, unsigned int, nr_args)
3821{
3822 struct io_ring_ctx *ctx;
3823 long ret = -EBADF;
3824 struct fd f;
3825
3826 f = fdget(fd);
3827 if (!f.file)
3828 return -EBADF;
3829
3830 ret = -EOPNOTSUPP;
3831 if (f.file->f_op != &io_uring_fops)
3832 goto out_fput;
3833
3834 ctx = f.file->private_data;
3835
3836 mutex_lock(&ctx->uring_lock);
3837 ret = __io_uring_register(ctx, opcode, arg, nr_args);
3838 mutex_unlock(&ctx->uring_lock);
3839out_fput:
3840 fdput(f);
3841 return ret;
3842}
3843
Jens Axboe2b188cc2019-01-07 10:46:33 -07003844static int __init io_uring_init(void)
3845{
3846 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3847 return 0;
3848};
3849__initcall(io_uring_init);