blob: 4edc94aab17ee07b7ce95496f1e856295af72efd [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
47#include <linux/refcount.h>
48#include <linux/uio.h>
49
50#include <linux/sched/signal.h>
51#include <linux/fs.h>
52#include <linux/file.h>
53#include <linux/fdtable.h>
54#include <linux/mm.h>
55#include <linux/mman.h>
56#include <linux/mmu_context.h>
57#include <linux/percpu.h>
58#include <linux/slab.h>
Jens Axboe6c271ce2019-01-10 11:22:30 -070059#include <linux/kthread.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070060#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070061#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070065#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070066#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070070#include <linux/sizes.h>
71#include <linux/hugetlb.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070072
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020073#define CREATE_TRACE_POINTS
74#include <trace/events/io_uring.h>
75
Jens Axboe2b188cc2019-01-07 10:46:33 -070076#include <uapi/linux/io_uring.h>
77
78#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060079#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070080
Daniel Xu5277dea2019-09-14 14:23:45 -070081#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060082#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Jens Axboe65e19f52019-10-26 07:20:21 -060083
84/*
85 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
86 */
87#define IORING_FILE_TABLE_SHIFT 9
88#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
89#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
90#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
Jens Axboe2b188cc2019-01-07 10:46:33 -070091
92struct io_uring {
93 u32 head ____cacheline_aligned_in_smp;
94 u32 tail ____cacheline_aligned_in_smp;
95};
96
Stefan Bühler1e84b972019-04-24 23:54:16 +020097/*
Hristo Venev75b28af2019-08-26 17:23:46 +000098 * This data is shared with the application through the mmap at offsets
99 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200100 *
101 * The offsets to the member fields are published through struct
102 * io_sqring_offsets when calling io_uring_setup.
103 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000104struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200105 /*
106 * Head and tail offsets into the ring; the offsets need to be
107 * masked to get valid indices.
108 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000109 * The kernel controls head of the sq ring and the tail of the cq ring,
110 * and the application controls tail of the sq ring and the head of the
111 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200112 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000113 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200114 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000115 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200116 * ring_entries - 1)
117 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000118 u32 sq_ring_mask, cq_ring_mask;
119 /* Ring sizes (constant, power of 2) */
120 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200121 /*
122 * Number of invalid entries dropped by the kernel due to
123 * invalid index stored in array
124 *
125 * Written by the kernel, shouldn't be modified by the
126 * application (i.e. get number of "new events" by comparing to
127 * cached value).
128 *
129 * After a new SQ head value was read by the application this
130 * counter includes all submissions that were dropped reaching
131 * the new SQ head (and possibly more).
132 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000133 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200134 /*
135 * Runtime flags
136 *
137 * Written by the kernel, shouldn't be modified by the
138 * application.
139 *
140 * The application needs a full memory barrier before checking
141 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
142 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000143 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200144 /*
145 * Number of completion events lost because the queue was full;
146 * this should be avoided by the application by making sure
147 * there are not more requests pending thatn there is space in
148 * the completion queue.
149 *
150 * Written by the kernel, shouldn't be modified by the
151 * application (i.e. get number of "new events" by comparing to
152 * cached value).
153 *
154 * As completion events come in out of order this counter is not
155 * ordered with any other data.
156 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000157 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200158 /*
159 * Ring buffer of completion events.
160 *
161 * The kernel writes completion events fresh every time they are
162 * produced, so the application is allowed to modify pending
163 * entries.
164 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000165 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700166};
167
Jens Axboeedafcce2019-01-09 09:16:05 -0700168struct io_mapped_ubuf {
169 u64 ubuf;
170 size_t len;
171 struct bio_vec *bvec;
172 unsigned int nr_bvecs;
173};
174
Jens Axboe65e19f52019-10-26 07:20:21 -0600175struct fixed_file_table {
176 struct file **files;
177};
178
Jens Axboe2b188cc2019-01-07 10:46:33 -0700179struct io_ring_ctx {
180 struct {
181 struct percpu_ref refs;
182 } ____cacheline_aligned_in_smp;
183
184 struct {
185 unsigned int flags;
186 bool compat;
187 bool account_mem;
188
Hristo Venev75b28af2019-08-26 17:23:46 +0000189 /*
190 * Ring buffer of indices into array of io_uring_sqe, which is
191 * mmapped by the application using the IORING_OFF_SQES offset.
192 *
193 * This indirection could e.g. be used to assign fixed
194 * io_uring_sqe entries to operations and only submit them to
195 * the queue when needed.
196 *
197 * The kernel modifies neither the indices array nor the entries
198 * array.
199 */
200 u32 *sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700201 unsigned cached_sq_head;
202 unsigned sq_entries;
203 unsigned sq_mask;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700204 unsigned sq_thread_idle;
Jens Axboe498ccd92019-10-25 10:04:25 -0600205 unsigned cached_sq_dropped;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700206 struct io_uring_sqe *sq_sqes;
Jens Axboede0617e2019-04-06 21:51:27 -0600207
208 struct list_head defer_list;
Jens Axboe5262f562019-09-17 12:26:57 -0600209 struct list_head timeout_list;
Jens Axboefcb323c2019-10-24 12:39:47 -0600210
211 wait_queue_head_t inflight_wait;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700212 } ____cacheline_aligned_in_smp;
213
214 /* IO offload */
Jens Axboe561fb042019-10-24 07:25:42 -0600215 struct io_wq *io_wq;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700216 struct task_struct *sqo_thread; /* if using sq thread polling */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700217 struct mm_struct *sqo_mm;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700218 wait_queue_head_t sqo_wait;
Jackie Liua4c0b3d2019-07-08 13:41:12 +0800219 struct completion sqo_thread_started;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700220
221 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700222 unsigned cached_cq_tail;
Jens Axboe498ccd92019-10-25 10:04:25 -0600223 atomic_t cached_cq_overflow;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700224 unsigned cq_entries;
225 unsigned cq_mask;
226 struct wait_queue_head cq_wait;
227 struct fasync_struct *cq_fasync;
Jens Axboe9b402842019-04-11 11:45:41 -0600228 struct eventfd_ctx *cq_ev_fd;
Jens Axboe5262f562019-09-17 12:26:57 -0600229 atomic_t cq_timeouts;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700230 } ____cacheline_aligned_in_smp;
231
Hristo Venev75b28af2019-08-26 17:23:46 +0000232 struct io_rings *rings;
233
Jens Axboe6b063142019-01-10 22:13:58 -0700234 /*
235 * If used, fixed file set. Writers must ensure that ->refs is dead,
236 * readers must ensure that ->refs is alive as long as the file* is
237 * used. Only updated through io_uring_register(2).
238 */
Jens Axboe65e19f52019-10-26 07:20:21 -0600239 struct fixed_file_table *file_table;
Jens Axboe6b063142019-01-10 22:13:58 -0700240 unsigned nr_user_files;
241
Jens Axboeedafcce2019-01-09 09:16:05 -0700242 /* if used, fixed mapped user buffers */
243 unsigned nr_user_bufs;
244 struct io_mapped_ubuf *user_bufs;
245
Jens Axboe2b188cc2019-01-07 10:46:33 -0700246 struct user_struct *user;
247
248 struct completion ctx_done;
249
250 struct {
251 struct mutex uring_lock;
252 wait_queue_head_t wait;
253 } ____cacheline_aligned_in_smp;
254
255 struct {
256 spinlock_t completion_lock;
Jens Axboedef596e2019-01-09 08:59:42 -0700257 bool poll_multi_file;
258 /*
259 * ->poll_list is protected by the ctx->uring_lock for
260 * io_uring instances that don't use IORING_SETUP_SQPOLL.
261 * For SQPOLL, only the single threaded io_sq_thread() will
262 * manipulate the list, hence no extra locking is needed there.
263 */
264 struct list_head poll_list;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700265 struct list_head cancel_list;
Jens Axboefcb323c2019-10-24 12:39:47 -0600266
267 spinlock_t inflight_lock;
268 struct list_head inflight_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700269 } ____cacheline_aligned_in_smp;
270
271#if defined(CONFIG_UNIX)
272 struct socket *ring_sock;
273#endif
274};
275
276struct sqe_submit {
277 const struct io_uring_sqe *sqe;
Jens Axboefcb323c2019-10-24 12:39:47 -0600278 struct file *ring_file;
279 int ring_fd;
Jackie Liu8776f3f2019-09-09 20:50:39 +0800280 u32 sequence;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700281 bool has_user;
Jackie Liuba5290c2019-10-09 09:19:59 +0800282 bool in_async;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700283 bool needs_fixed_file;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700284};
285
Jens Axboe09bb8392019-03-13 12:39:28 -0600286/*
287 * First field must be the file pointer in all the
288 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
289 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700290struct io_poll_iocb {
291 struct file *file;
292 struct wait_queue_head *head;
293 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600294 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700295 bool canceled;
296 struct wait_queue_entry wait;
297};
298
Jens Axboe5262f562019-09-17 12:26:57 -0600299struct io_timeout {
300 struct file *file;
301 struct hrtimer timer;
302};
303
Jens Axboe09bb8392019-03-13 12:39:28 -0600304/*
305 * NOTE! Each of the iocb union members has the file pointer
306 * as the first entry in their struct definition. So you can
307 * access the file pointer through any of the sub-structs,
308 * or directly as just 'ki_filp' in this struct.
309 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700310struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700311 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600312 struct file *file;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700313 struct kiocb rw;
314 struct io_poll_iocb poll;
Jens Axboe5262f562019-09-17 12:26:57 -0600315 struct io_timeout timeout;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700316 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700317
318 struct sqe_submit submit;
319
320 struct io_ring_ctx *ctx;
321 struct list_head list;
Jens Axboe9e645e112019-05-10 16:07:28 -0600322 struct list_head link_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700323 unsigned int flags;
Jens Axboec16361c2019-01-17 08:39:48 -0700324 refcount_t refs;
Stefan Bühler8449eed2019-04-27 20:34:19 +0200325#define REQ_F_NOWAIT 1 /* must not punt to workers */
Jens Axboedef596e2019-01-09 08:59:42 -0700326#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
Jens Axboe6b063142019-01-10 22:13:58 -0700327#define REQ_F_FIXED_FILE 4 /* ctx owns file */
Jens Axboe31b51512019-01-18 22:56:34 -0700328#define REQ_F_SEQ_PREV 8 /* sequential with previous */
Stefan Bühlere2033e32019-05-11 19:08:01 +0200329#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
330#define REQ_F_IO_DRAINED 32 /* drain done */
Jens Axboe9e645e112019-05-10 16:07:28 -0600331#define REQ_F_LINK 64 /* linked sqes */
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +0800332#define REQ_F_LINK_DONE 128 /* linked sqes done */
333#define REQ_F_FAIL_LINK 256 /* fail rest of links */
Jackie Liu4fe2c962019-09-09 20:50:40 +0800334#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
Jens Axboe5262f562019-09-17 12:26:57 -0600335#define REQ_F_TIMEOUT 1024 /* timeout request */
Jens Axboe491381ce2019-10-17 09:20:46 -0600336#define REQ_F_ISREG 2048 /* regular file */
337#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
Jens Axboefcb323c2019-10-24 12:39:47 -0600338#define REQ_F_INFLIGHT 8192 /* on inflight list */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700339 u64 user_data;
Jens Axboe9e645e112019-05-10 16:07:28 -0600340 u32 result;
Jens Axboede0617e2019-04-06 21:51:27 -0600341 u32 sequence;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700342
Jens Axboefcb323c2019-10-24 12:39:47 -0600343 struct list_head inflight_entry;
344
Jens Axboe561fb042019-10-24 07:25:42 -0600345 struct io_wq_work work;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700346};
347
348#define IO_PLUG_THRESHOLD 2
Jens Axboedef596e2019-01-09 08:59:42 -0700349#define IO_IOPOLL_BATCH 8
Jens Axboe2b188cc2019-01-07 10:46:33 -0700350
Jens Axboe9a56a232019-01-09 09:06:50 -0700351struct io_submit_state {
352 struct blk_plug plug;
353
354 /*
Jens Axboe2579f912019-01-09 09:10:43 -0700355 * io_kiocb alloc cache
356 */
357 void *reqs[IO_IOPOLL_BATCH];
358 unsigned int free_reqs;
359 unsigned int cur_req;
360
361 /*
Jens Axboe9a56a232019-01-09 09:06:50 -0700362 * File reference cache
363 */
364 struct file *file;
365 unsigned int fd;
366 unsigned int has_refs;
367 unsigned int used_refs;
368 unsigned int ios_left;
369};
370
Jens Axboe561fb042019-10-24 07:25:42 -0600371static void io_wq_submit_work(struct io_wq_work **workptr);
Jens Axboe5262f562019-09-17 12:26:57 -0600372static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
373 long res);
Jackie Liu4fe2c962019-09-09 20:50:40 +0800374static void __io_free_req(struct io_kiocb *req);
Jens Axboede0617e2019-04-06 21:51:27 -0600375
Jens Axboe2b188cc2019-01-07 10:46:33 -0700376static struct kmem_cache *req_cachep;
377
378static const struct file_operations io_uring_fops;
379
380struct sock *io_uring_get_socket(struct file *file)
381{
382#if defined(CONFIG_UNIX)
383 if (file->f_op == &io_uring_fops) {
384 struct io_ring_ctx *ctx = file->private_data;
385
386 return ctx->ring_sock->sk;
387 }
388#endif
389 return NULL;
390}
391EXPORT_SYMBOL(io_uring_get_socket);
392
393static void io_ring_ctx_ref_free(struct percpu_ref *ref)
394{
395 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
396
397 complete(&ctx->ctx_done);
398}
399
400static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
401{
402 struct io_ring_ctx *ctx;
403
404 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
405 if (!ctx)
406 return NULL;
407
Roman Gushchin21482892019-05-07 10:01:48 -0700408 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
409 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700410 kfree(ctx);
411 return NULL;
412 }
413
414 ctx->flags = p->flags;
415 init_waitqueue_head(&ctx->cq_wait);
416 init_completion(&ctx->ctx_done);
Jackie Liua4c0b3d2019-07-08 13:41:12 +0800417 init_completion(&ctx->sqo_thread_started);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700418 mutex_init(&ctx->uring_lock);
419 init_waitqueue_head(&ctx->wait);
420 spin_lock_init(&ctx->completion_lock);
Jens Axboedef596e2019-01-09 08:59:42 -0700421 INIT_LIST_HEAD(&ctx->poll_list);
Jens Axboe221c5eb2019-01-17 09:41:58 -0700422 INIT_LIST_HEAD(&ctx->cancel_list);
Jens Axboede0617e2019-04-06 21:51:27 -0600423 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -0600424 INIT_LIST_HEAD(&ctx->timeout_list);
Jens Axboefcb323c2019-10-24 12:39:47 -0600425 init_waitqueue_head(&ctx->inflight_wait);
426 spin_lock_init(&ctx->inflight_lock);
427 INIT_LIST_HEAD(&ctx->inflight_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700428 return ctx;
429}
430
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600431static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
432 struct io_kiocb *req)
Jens Axboede0617e2019-04-06 21:51:27 -0600433{
Jens Axboe498ccd92019-10-25 10:04:25 -0600434 return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
435 + atomic_read(&ctx->cached_cq_overflow);
Jens Axboede0617e2019-04-06 21:51:27 -0600436}
437
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600438static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
439 struct io_kiocb *req)
440{
441 if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
442 return false;
443
444 return __io_sequence_defer(ctx, req);
445}
446
447static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
Jens Axboede0617e2019-04-06 21:51:27 -0600448{
449 struct io_kiocb *req;
450
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600451 req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
452 if (req && !io_sequence_defer(ctx, req)) {
Jens Axboede0617e2019-04-06 21:51:27 -0600453 list_del_init(&req->list);
454 return req;
455 }
456
457 return NULL;
458}
459
Jens Axboe5262f562019-09-17 12:26:57 -0600460static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
461{
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600462 struct io_kiocb *req;
463
464 req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
465 if (req && !__io_sequence_defer(ctx, req)) {
466 list_del_init(&req->list);
467 return req;
468 }
469
470 return NULL;
Jens Axboe5262f562019-09-17 12:26:57 -0600471}
472
Jens Axboede0617e2019-04-06 21:51:27 -0600473static void __io_commit_cqring(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700474{
Hristo Venev75b28af2019-08-26 17:23:46 +0000475 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700476
Hristo Venev75b28af2019-08-26 17:23:46 +0000477 if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700478 /* order cqe stores with ring update */
Hristo Venev75b28af2019-08-26 17:23:46 +0000479 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700480
Jens Axboe2b188cc2019-01-07 10:46:33 -0700481 if (wq_has_sleeper(&ctx->cq_wait)) {
482 wake_up_interruptible(&ctx->cq_wait);
483 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
484 }
485 }
486}
487
Jens Axboe561fb042019-10-24 07:25:42 -0600488static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
Jens Axboe18d9be12019-09-10 09:13:05 -0600489{
Jens Axboe561fb042019-10-24 07:25:42 -0600490 u8 opcode = READ_ONCE(sqe->opcode);
491
492 return !(opcode == IORING_OP_READ_FIXED ||
493 opcode == IORING_OP_WRITE_FIXED);
494}
495
496static inline bool io_prep_async_work(struct io_kiocb *req)
497{
498 bool do_hashed = false;
Jens Axboe54a91f32019-09-10 09:15:04 -0600499
Jens Axboe6cc47d12019-09-18 11:18:23 -0600500 if (req->submit.sqe) {
501 switch (req->submit.sqe->opcode) {
502 case IORING_OP_WRITEV:
503 case IORING_OP_WRITE_FIXED:
Jens Axboe561fb042019-10-24 07:25:42 -0600504 do_hashed = true;
Jens Axboe6cc47d12019-09-18 11:18:23 -0600505 break;
506 }
Jens Axboe561fb042019-10-24 07:25:42 -0600507 if (io_sqe_needs_user(req->submit.sqe))
508 req->work.flags |= IO_WQ_WORK_NEEDS_USER;
Jens Axboe54a91f32019-09-10 09:15:04 -0600509 }
510
Jens Axboe561fb042019-10-24 07:25:42 -0600511 return do_hashed;
512}
513
514static inline void io_queue_async_work(struct io_ring_ctx *ctx,
515 struct io_kiocb *req)
516{
517 bool do_hashed = io_prep_async_work(req);
518
519 trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
520 req->flags);
521 if (!do_hashed) {
522 io_wq_enqueue(ctx->io_wq, &req->work);
523 } else {
524 io_wq_enqueue_hashed(ctx->io_wq, &req->work,
525 file_inode(req->file));
526 }
Jens Axboe18d9be12019-09-10 09:13:05 -0600527}
528
Jens Axboe5262f562019-09-17 12:26:57 -0600529static void io_kill_timeout(struct io_kiocb *req)
530{
531 int ret;
532
533 ret = hrtimer_try_to_cancel(&req->timeout.timer);
534 if (ret != -1) {
535 atomic_inc(&req->ctx->cq_timeouts);
Jens Axboe842f9612019-10-29 12:34:10 -0600536 list_del_init(&req->list);
Jens Axboe5262f562019-09-17 12:26:57 -0600537 io_cqring_fill_event(req->ctx, req->user_data, 0);
538 __io_free_req(req);
539 }
540}
541
542static void io_kill_timeouts(struct io_ring_ctx *ctx)
543{
544 struct io_kiocb *req, *tmp;
545
546 spin_lock_irq(&ctx->completion_lock);
547 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
548 io_kill_timeout(req);
549 spin_unlock_irq(&ctx->completion_lock);
550}
551
Jens Axboede0617e2019-04-06 21:51:27 -0600552static void io_commit_cqring(struct io_ring_ctx *ctx)
553{
554 struct io_kiocb *req;
555
Jens Axboe5262f562019-09-17 12:26:57 -0600556 while ((req = io_get_timeout_req(ctx)) != NULL)
557 io_kill_timeout(req);
558
Jens Axboede0617e2019-04-06 21:51:27 -0600559 __io_commit_cqring(ctx);
560
561 while ((req = io_get_deferred_req(ctx)) != NULL) {
Jackie Liu4fe2c962019-09-09 20:50:40 +0800562 if (req->flags & REQ_F_SHADOW_DRAIN) {
563 /* Just for drain, free it. */
564 __io_free_req(req);
565 continue;
566 }
Jens Axboede0617e2019-04-06 21:51:27 -0600567 req->flags |= REQ_F_IO_DRAINED;
Jens Axboe18d9be12019-09-10 09:13:05 -0600568 io_queue_async_work(ctx, req);
Jens Axboede0617e2019-04-06 21:51:27 -0600569 }
570}
571
Jens Axboe2b188cc2019-01-07 10:46:33 -0700572static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
573{
Hristo Venev75b28af2019-08-26 17:23:46 +0000574 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700575 unsigned tail;
576
577 tail = ctx->cached_cq_tail;
Stefan Bühler115e12e2019-04-24 23:54:18 +0200578 /*
579 * writes to the cq entry need to come after reading head; the
580 * control dependency is enough as we're using WRITE_ONCE to
581 * fill the cq entry
582 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000583 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700584 return NULL;
585
586 ctx->cached_cq_tail++;
Hristo Venev75b28af2019-08-26 17:23:46 +0000587 return &rings->cqes[tail & ctx->cq_mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -0700588}
589
590static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
Jens Axboec71ffb62019-05-13 20:58:29 -0600591 long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700592{
593 struct io_uring_cqe *cqe;
594
Jens Axboe51c3ff62019-11-03 06:52:50 -0700595 trace_io_uring_complete(ctx, ki_user_data, res);
596
Jens Axboe2b188cc2019-01-07 10:46:33 -0700597 /*
598 * If we can't get a cq entry, userspace overflowed the
599 * submission (by quite a lot). Increment the overflow count in
600 * the ring.
601 */
602 cqe = io_get_cqring(ctx);
603 if (cqe) {
604 WRITE_ONCE(cqe->user_data, ki_user_data);
605 WRITE_ONCE(cqe->res, res);
Jens Axboec71ffb62019-05-13 20:58:29 -0600606 WRITE_ONCE(cqe->flags, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700607 } else {
Jens Axboe498ccd92019-10-25 10:04:25 -0600608 WRITE_ONCE(ctx->rings->cq_overflow,
609 atomic_inc_return(&ctx->cached_cq_overflow));
Jens Axboe2b188cc2019-01-07 10:46:33 -0700610 }
611}
612
Jens Axboe8c838782019-03-12 15:48:16 -0600613static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
614{
615 if (waitqueue_active(&ctx->wait))
616 wake_up(&ctx->wait);
617 if (waitqueue_active(&ctx->sqo_wait))
618 wake_up(&ctx->sqo_wait);
Jens Axboe9b402842019-04-11 11:45:41 -0600619 if (ctx->cq_ev_fd)
620 eventfd_signal(ctx->cq_ev_fd, 1);
Jens Axboe8c838782019-03-12 15:48:16 -0600621}
622
623static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
Jens Axboec71ffb62019-05-13 20:58:29 -0600624 long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700625{
626 unsigned long flags;
627
628 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboec71ffb62019-05-13 20:58:29 -0600629 io_cqring_fill_event(ctx, user_data, res);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700630 io_commit_cqring(ctx);
631 spin_unlock_irqrestore(&ctx->completion_lock, flags);
632
Jens Axboe8c838782019-03-12 15:48:16 -0600633 io_cqring_ev_posted(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700634}
635
Jens Axboe2579f912019-01-09 09:10:43 -0700636static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
637 struct io_submit_state *state)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700638{
Jens Axboefd6fab22019-03-14 16:30:06 -0600639 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700640 struct io_kiocb *req;
641
642 if (!percpu_ref_tryget(&ctx->refs))
643 return NULL;
644
Jens Axboe2579f912019-01-09 09:10:43 -0700645 if (!state) {
Jens Axboefd6fab22019-03-14 16:30:06 -0600646 req = kmem_cache_alloc(req_cachep, gfp);
Jens Axboe2579f912019-01-09 09:10:43 -0700647 if (unlikely(!req))
648 goto out;
649 } else if (!state->free_reqs) {
650 size_t sz;
651 int ret;
652
653 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
Jens Axboefd6fab22019-03-14 16:30:06 -0600654 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
655
656 /*
657 * Bulk alloc is all-or-nothing. If we fail to get a batch,
658 * retry single alloc to be on the safe side.
659 */
660 if (unlikely(ret <= 0)) {
661 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
662 if (!state->reqs[0])
663 goto out;
664 ret = 1;
665 }
Jens Axboe2579f912019-01-09 09:10:43 -0700666 state->free_reqs = ret - 1;
667 state->cur_req = 1;
668 req = state->reqs[0];
669 } else {
670 req = state->reqs[state->cur_req];
671 state->free_reqs--;
672 state->cur_req++;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700673 }
674
Jens Axboe60c112b2019-06-21 10:20:18 -0600675 req->file = NULL;
Jens Axboe2579f912019-01-09 09:10:43 -0700676 req->ctx = ctx;
677 req->flags = 0;
Jens Axboee65ef562019-03-12 10:16:44 -0600678 /* one is dropped after submission, the other at completion */
679 refcount_set(&req->refs, 2);
Jens Axboe9e645e112019-05-10 16:07:28 -0600680 req->result = 0;
Jens Axboe561fb042019-10-24 07:25:42 -0600681 INIT_IO_WORK(&req->work, io_wq_submit_work);
Jens Axboe2579f912019-01-09 09:10:43 -0700682 return req;
683out:
Pavel Begunkov6805b322019-10-08 02:18:42 +0300684 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700685 return NULL;
686}
687
Jens Axboedef596e2019-01-09 08:59:42 -0700688static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
689{
690 if (*nr) {
691 kmem_cache_free_bulk(req_cachep, *nr, reqs);
Pavel Begunkov6805b322019-10-08 02:18:42 +0300692 percpu_ref_put_many(&ctx->refs, *nr);
Jens Axboedef596e2019-01-09 08:59:42 -0700693 *nr = 0;
694 }
695}
696
Jens Axboe9e645e112019-05-10 16:07:28 -0600697static void __io_free_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700698{
Jens Axboefcb323c2019-10-24 12:39:47 -0600699 struct io_ring_ctx *ctx = req->ctx;
700
Jens Axboe09bb8392019-03-13 12:39:28 -0600701 if (req->file && !(req->flags & REQ_F_FIXED_FILE))
702 fput(req->file);
Jens Axboefcb323c2019-10-24 12:39:47 -0600703 if (req->flags & REQ_F_INFLIGHT) {
704 unsigned long flags;
705
706 spin_lock_irqsave(&ctx->inflight_lock, flags);
707 list_del(&req->inflight_entry);
708 if (waitqueue_active(&ctx->inflight_wait))
709 wake_up(&ctx->inflight_wait);
710 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
711 }
712 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -0600713 kmem_cache_free(req_cachep, req);
714}
715
Jens Axboeba816ad2019-09-28 11:36:45 -0600716static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
Jens Axboe9e645e112019-05-10 16:07:28 -0600717{
718 struct io_kiocb *nxt;
719
720 /*
721 * The list should never be empty when we are called here. But could
722 * potentially happen if the chain is messed up, check to be on the
723 * safe side.
724 */
725 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
726 if (nxt) {
727 list_del(&nxt->list);
728 if (!list_empty(&req->link_list)) {
729 INIT_LIST_HEAD(&nxt->link_list);
730 list_splice(&req->link_list, &nxt->link_list);
731 nxt->flags |= REQ_F_LINK;
732 }
733
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +0800734 nxt->flags |= REQ_F_LINK_DONE;
Jens Axboeba816ad2019-09-28 11:36:45 -0600735 /*
736 * If we're in async work, we can continue processing the chain
737 * in this context instead of having to queue up new async work.
738 */
Jens Axboe561fb042019-10-24 07:25:42 -0600739 if (nxtptr && current_work())
Jens Axboeba816ad2019-09-28 11:36:45 -0600740 *nxtptr = nxt;
Jens Axboe561fb042019-10-24 07:25:42 -0600741 else
Jens Axboeba816ad2019-09-28 11:36:45 -0600742 io_queue_async_work(req->ctx, nxt);
Jens Axboe9e645e112019-05-10 16:07:28 -0600743 }
744}
745
746/*
747 * Called if REQ_F_LINK is set, and we fail the head request
748 */
749static void io_fail_links(struct io_kiocb *req)
750{
751 struct io_kiocb *link;
752
753 while (!list_empty(&req->link_list)) {
754 link = list_first_entry(&req->link_list, struct io_kiocb, list);
755 list_del(&link->list);
756
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +0200757 trace_io_uring_fail_link(req, link);
Jens Axboe9e645e112019-05-10 16:07:28 -0600758 io_cqring_add_event(req->ctx, link->user_data, -ECANCELED);
759 __io_free_req(link);
760 }
761}
762
Jens Axboeba816ad2019-09-28 11:36:45 -0600763static void io_free_req(struct io_kiocb *req, struct io_kiocb **nxt)
Jens Axboe9e645e112019-05-10 16:07:28 -0600764{
765 /*
766 * If LINK is set, we have dependent requests in this chain. If we
767 * didn't fail this request, queue the first one up, moving any other
768 * dependencies to the next request. In case of failure, fail the rest
769 * of the chain.
770 */
771 if (req->flags & REQ_F_LINK) {
772 if (req->flags & REQ_F_FAIL_LINK)
773 io_fail_links(req);
774 else
Jens Axboeba816ad2019-09-28 11:36:45 -0600775 io_req_link_next(req, nxt);
Jens Axboe9e645e112019-05-10 16:07:28 -0600776 }
777
778 __io_free_req(req);
779}
780
Jens Axboeba816ad2019-09-28 11:36:45 -0600781/*
782 * Drop reference to request, return next in chain (if there is one) if this
783 * was the last reference to this request.
784 */
785static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -0600786{
Jens Axboeba816ad2019-09-28 11:36:45 -0600787 struct io_kiocb *nxt = NULL;
788
Jens Axboee65ef562019-03-12 10:16:44 -0600789 if (refcount_dec_and_test(&req->refs))
Jens Axboeba816ad2019-09-28 11:36:45 -0600790 io_free_req(req, &nxt);
791
792 return nxt;
793}
794
795static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr)
796{
797 struct io_kiocb *nxt;
798
799 nxt = io_put_req_find_next(req);
800 if (nxt) {
Jens Axboe561fb042019-10-24 07:25:42 -0600801 if (nxtptr)
Jens Axboeba816ad2019-09-28 11:36:45 -0600802 *nxtptr = nxt;
Jens Axboe561fb042019-10-24 07:25:42 -0600803 else
Jens Axboeba816ad2019-09-28 11:36:45 -0600804 io_queue_async_work(nxt->ctx, nxt);
Jens Axboeba816ad2019-09-28 11:36:45 -0600805 }
Jens Axboe2b188cc2019-01-07 10:46:33 -0700806}
807
Hristo Venev75b28af2019-08-26 17:23:46 +0000808static unsigned io_cqring_events(struct io_rings *rings)
Jens Axboea3a0e432019-08-20 11:03:11 -0600809{
810 /* See comment at the top of this file */
811 smp_rmb();
Hristo Venev75b28af2019-08-26 17:23:46 +0000812 return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
Jens Axboea3a0e432019-08-20 11:03:11 -0600813}
814
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +0300815static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
816{
817 struct io_rings *rings = ctx->rings;
818
819 /* make sure SQ entry isn't read before tail */
820 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
821}
822
Jens Axboedef596e2019-01-09 08:59:42 -0700823/*
824 * Find and free completed poll iocbs
825 */
826static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
827 struct list_head *done)
828{
829 void *reqs[IO_IOPOLL_BATCH];
830 struct io_kiocb *req;
Jens Axboe09bb8392019-03-13 12:39:28 -0600831 int to_free;
Jens Axboedef596e2019-01-09 08:59:42 -0700832
Jens Axboe09bb8392019-03-13 12:39:28 -0600833 to_free = 0;
Jens Axboedef596e2019-01-09 08:59:42 -0700834 while (!list_empty(done)) {
835 req = list_first_entry(done, struct io_kiocb, list);
836 list_del(&req->list);
837
Jens Axboe9e645e112019-05-10 16:07:28 -0600838 io_cqring_fill_event(ctx, req->user_data, req->result);
Jens Axboedef596e2019-01-09 08:59:42 -0700839 (*nr_events)++;
840
Jens Axboe09bb8392019-03-13 12:39:28 -0600841 if (refcount_dec_and_test(&req->refs)) {
842 /* If we're not using fixed files, we have to pair the
843 * completion part with the file put. Use regular
844 * completions for those, only batch free for fixed
Jens Axboe9e645e112019-05-10 16:07:28 -0600845 * file and non-linked commands.
Jens Axboe09bb8392019-03-13 12:39:28 -0600846 */
Jens Axboe9e645e112019-05-10 16:07:28 -0600847 if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
848 REQ_F_FIXED_FILE) {
Jens Axboe09bb8392019-03-13 12:39:28 -0600849 reqs[to_free++] = req;
850 if (to_free == ARRAY_SIZE(reqs))
851 io_free_req_many(ctx, reqs, &to_free);
Jens Axboe6b063142019-01-10 22:13:58 -0700852 } else {
Jens Axboeba816ad2019-09-28 11:36:45 -0600853 io_free_req(req, NULL);
Jens Axboe6b063142019-01-10 22:13:58 -0700854 }
Jens Axboe9a56a232019-01-09 09:06:50 -0700855 }
Jens Axboedef596e2019-01-09 08:59:42 -0700856 }
Jens Axboedef596e2019-01-09 08:59:42 -0700857
Jens Axboe09bb8392019-03-13 12:39:28 -0600858 io_commit_cqring(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -0700859 io_free_req_many(ctx, reqs, &to_free);
860}
861
862static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
863 long min)
864{
865 struct io_kiocb *req, *tmp;
866 LIST_HEAD(done);
867 bool spin;
868 int ret;
869
870 /*
871 * Only spin for completions if we don't have multiple devices hanging
872 * off our complete list, and we're under the requested amount.
873 */
874 spin = !ctx->poll_multi_file && *nr_events < min;
875
876 ret = 0;
877 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
878 struct kiocb *kiocb = &req->rw;
879
880 /*
881 * Move completed entries to our local list. If we find a
882 * request that requires polling, break out and complete
883 * the done list first, if we have entries there.
884 */
885 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
886 list_move_tail(&req->list, &done);
887 continue;
888 }
889 if (!list_empty(&done))
890 break;
891
892 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
893 if (ret < 0)
894 break;
895
896 if (ret && spin)
897 spin = false;
898 ret = 0;
899 }
900
901 if (!list_empty(&done))
902 io_iopoll_complete(ctx, nr_events, &done);
903
904 return ret;
905}
906
907/*
908 * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a
909 * non-spinning poll check - we'll still enter the driver poll loop, but only
910 * as a non-spinning completion check.
911 */
912static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
913 long min)
914{
Jens Axboe08f54392019-08-21 22:19:11 -0600915 while (!list_empty(&ctx->poll_list) && !need_resched()) {
Jens Axboedef596e2019-01-09 08:59:42 -0700916 int ret;
917
918 ret = io_do_iopoll(ctx, nr_events, min);
919 if (ret < 0)
920 return ret;
921 if (!min || *nr_events >= min)
922 return 0;
923 }
924
925 return 1;
926}
927
928/*
929 * We can't just wait for polled events to come to us, we have to actively
930 * find and complete them.
931 */
932static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
933{
934 if (!(ctx->flags & IORING_SETUP_IOPOLL))
935 return;
936
937 mutex_lock(&ctx->uring_lock);
938 while (!list_empty(&ctx->poll_list)) {
939 unsigned int nr_events = 0;
940
941 io_iopoll_getevents(ctx, &nr_events, 1);
Jens Axboe08f54392019-08-21 22:19:11 -0600942
943 /*
944 * Ensure we allow local-to-the-cpu processing to take place,
945 * in this case we need to ensure that we reap all events.
946 */
947 cond_resched();
Jens Axboedef596e2019-01-09 08:59:42 -0700948 }
949 mutex_unlock(&ctx->uring_lock);
950}
951
Jens Axboe2b2ed972019-10-25 10:06:15 -0600952static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
953 long min)
Jens Axboedef596e2019-01-09 08:59:42 -0700954{
Jens Axboe2b2ed972019-10-25 10:06:15 -0600955 int iters = 0, ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -0700956
957 do {
958 int tmin = 0;
959
Jens Axboe500f9fb2019-08-19 12:15:59 -0600960 /*
Jens Axboea3a0e432019-08-20 11:03:11 -0600961 * Don't enter poll loop if we already have events pending.
962 * If we do, we can potentially be spinning for commands that
963 * already triggered a CQE (eg in error).
964 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000965 if (io_cqring_events(ctx->rings))
Jens Axboea3a0e432019-08-20 11:03:11 -0600966 break;
967
968 /*
Jens Axboe500f9fb2019-08-19 12:15:59 -0600969 * If a submit got punted to a workqueue, we can have the
970 * application entering polling for a command before it gets
971 * issued. That app will hold the uring_lock for the duration
972 * of the poll right here, so we need to take a breather every
973 * now and then to ensure that the issue has a chance to add
974 * the poll to the issued list. Otherwise we can spin here
975 * forever, while the workqueue is stuck trying to acquire the
976 * very same mutex.
977 */
978 if (!(++iters & 7)) {
979 mutex_unlock(&ctx->uring_lock);
980 mutex_lock(&ctx->uring_lock);
981 }
982
Jens Axboedef596e2019-01-09 08:59:42 -0700983 if (*nr_events < min)
984 tmin = min - *nr_events;
985
986 ret = io_iopoll_getevents(ctx, nr_events, tmin);
987 if (ret <= 0)
988 break;
989 ret = 0;
990 } while (min && !*nr_events && !need_resched());
991
Jens Axboe2b2ed972019-10-25 10:06:15 -0600992 return ret;
993}
994
995static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
996 long min)
997{
998 int ret;
999
1000 /*
1001 * We disallow the app entering submit/complete with polling, but we
1002 * still need to lock the ring to prevent racing with polled issue
1003 * that got punted to a workqueue.
1004 */
1005 mutex_lock(&ctx->uring_lock);
1006 ret = __io_iopoll_check(ctx, nr_events, min);
Jens Axboe500f9fb2019-08-19 12:15:59 -06001007 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07001008 return ret;
1009}
1010
Jens Axboe491381ce2019-10-17 09:20:46 -06001011static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001012{
Jens Axboe491381ce2019-10-17 09:20:46 -06001013 /*
1014 * Tell lockdep we inherited freeze protection from submission
1015 * thread.
1016 */
1017 if (req->flags & REQ_F_ISREG) {
1018 struct inode *inode = file_inode(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001019
Jens Axboe491381ce2019-10-17 09:20:46 -06001020 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001021 }
Jens Axboe491381ce2019-10-17 09:20:46 -06001022 file_end_write(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001023}
1024
Jens Axboeba816ad2019-09-28 11:36:45 -06001025static void io_complete_rw_common(struct kiocb *kiocb, long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001026{
1027 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1028
Jens Axboe491381ce2019-10-17 09:20:46 -06001029 if (kiocb->ki_flags & IOCB_WRITE)
1030 kiocb_end_write(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001031
Jens Axboe9e645e112019-05-10 16:07:28 -06001032 if ((req->flags & REQ_F_LINK) && res != req->result)
1033 req->flags |= REQ_F_FAIL_LINK;
Jens Axboec71ffb62019-05-13 20:58:29 -06001034 io_cqring_add_event(req->ctx, req->user_data, res);
Jens Axboeba816ad2019-09-28 11:36:45 -06001035}
1036
1037static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
1038{
1039 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1040
1041 io_complete_rw_common(kiocb, res);
1042 io_put_req(req, NULL);
1043}
1044
1045static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
1046{
1047 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1048
1049 io_complete_rw_common(kiocb, res);
1050 return io_put_req_find_next(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001051}
1052
Jens Axboedef596e2019-01-09 08:59:42 -07001053static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
1054{
1055 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1056
Jens Axboe491381ce2019-10-17 09:20:46 -06001057 if (kiocb->ki_flags & IOCB_WRITE)
1058 kiocb_end_write(req);
Jens Axboedef596e2019-01-09 08:59:42 -07001059
Jens Axboe9e645e112019-05-10 16:07:28 -06001060 if ((req->flags & REQ_F_LINK) && res != req->result)
1061 req->flags |= REQ_F_FAIL_LINK;
1062 req->result = res;
Jens Axboedef596e2019-01-09 08:59:42 -07001063 if (res != -EAGAIN)
1064 req->flags |= REQ_F_IOPOLL_COMPLETED;
1065}
1066
1067/*
1068 * After the iocb has been issued, it's safe to be found on the poll list.
1069 * Adding the kiocb to the list AFTER submission ensures that we don't
1070 * find it from a io_iopoll_getevents() thread before the issuer is done
1071 * accessing the kiocb cookie.
1072 */
1073static void io_iopoll_req_issued(struct io_kiocb *req)
1074{
1075 struct io_ring_ctx *ctx = req->ctx;
1076
1077 /*
1078 * Track whether we have multiple files in our lists. This will impact
1079 * how we do polling eventually, not spinning if we're on potentially
1080 * different devices.
1081 */
1082 if (list_empty(&ctx->poll_list)) {
1083 ctx->poll_multi_file = false;
1084 } else if (!ctx->poll_multi_file) {
1085 struct io_kiocb *list_req;
1086
1087 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
1088 list);
1089 if (list_req->rw.ki_filp != req->rw.ki_filp)
1090 ctx->poll_multi_file = true;
1091 }
1092
1093 /*
1094 * For fast devices, IO may have already completed. If it has, add
1095 * it to the front so we find it first.
1096 */
1097 if (req->flags & REQ_F_IOPOLL_COMPLETED)
1098 list_add(&req->list, &ctx->poll_list);
1099 else
1100 list_add_tail(&req->list, &ctx->poll_list);
1101}
1102
Jens Axboe3d6770f2019-04-13 11:50:54 -06001103static void io_file_put(struct io_submit_state *state)
Jens Axboe9a56a232019-01-09 09:06:50 -07001104{
Jens Axboe3d6770f2019-04-13 11:50:54 -06001105 if (state->file) {
Jens Axboe9a56a232019-01-09 09:06:50 -07001106 int diff = state->has_refs - state->used_refs;
1107
1108 if (diff)
1109 fput_many(state->file, diff);
1110 state->file = NULL;
1111 }
1112}
1113
1114/*
1115 * Get as many references to a file as we have IOs left in this submission,
1116 * assuming most submissions are for one file, or at least that each file
1117 * has more than one submission.
1118 */
1119static struct file *io_file_get(struct io_submit_state *state, int fd)
1120{
1121 if (!state)
1122 return fget(fd);
1123
1124 if (state->file) {
1125 if (state->fd == fd) {
1126 state->used_refs++;
1127 state->ios_left--;
1128 return state->file;
1129 }
Jens Axboe3d6770f2019-04-13 11:50:54 -06001130 io_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07001131 }
1132 state->file = fget_many(fd, state->ios_left);
1133 if (!state->file)
1134 return NULL;
1135
1136 state->fd = fd;
1137 state->has_refs = state->ios_left;
1138 state->used_refs = 1;
1139 state->ios_left--;
1140 return state->file;
1141}
1142
Jens Axboe2b188cc2019-01-07 10:46:33 -07001143/*
1144 * If we tracked the file through the SCM inflight mechanism, we could support
1145 * any file. For now, just ensure that anything potentially problematic is done
1146 * inline.
1147 */
1148static bool io_file_supports_async(struct file *file)
1149{
1150 umode_t mode = file_inode(file)->i_mode;
1151
1152 if (S_ISBLK(mode) || S_ISCHR(mode))
1153 return true;
1154 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
1155 return true;
1156
1157 return false;
1158}
1159
Jens Axboe6c271ce2019-01-10 11:22:30 -07001160static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
Jens Axboe8358e3a2019-04-23 08:17:58 -06001161 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001162{
Jens Axboe6c271ce2019-01-10 11:22:30 -07001163 const struct io_uring_sqe *sqe = s->sqe;
Jens Axboedef596e2019-01-09 08:59:42 -07001164 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001165 struct kiocb *kiocb = &req->rw;
Jens Axboe09bb8392019-03-13 12:39:28 -06001166 unsigned ioprio;
1167 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001168
Jens Axboe09bb8392019-03-13 12:39:28 -06001169 if (!req->file)
1170 return -EBADF;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001171
Jens Axboe491381ce2019-10-17 09:20:46 -06001172 if (S_ISREG(file_inode(req->file)->i_mode))
1173 req->flags |= REQ_F_ISREG;
1174
1175 /*
1176 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
1177 * we know to async punt it even if it was opened O_NONBLOCK
1178 */
1179 if (force_nonblock && !io_file_supports_async(req->file)) {
1180 req->flags |= REQ_F_MUST_PUNT;
1181 return -EAGAIN;
1182 }
Jens Axboe6b063142019-01-10 22:13:58 -07001183
Jens Axboe2b188cc2019-01-07 10:46:33 -07001184 kiocb->ki_pos = READ_ONCE(sqe->off);
1185 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
1186 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
1187
1188 ioprio = READ_ONCE(sqe->ioprio);
1189 if (ioprio) {
1190 ret = ioprio_check_cap(ioprio);
1191 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06001192 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001193
1194 kiocb->ki_ioprio = ioprio;
1195 } else
1196 kiocb->ki_ioprio = get_current_ioprio();
1197
1198 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
1199 if (unlikely(ret))
Jens Axboe09bb8392019-03-13 12:39:28 -06001200 return ret;
Stefan Bühler8449eed2019-04-27 20:34:19 +02001201
1202 /* don't allow async punt if RWF_NOWAIT was requested */
Jens Axboe491381ce2019-10-17 09:20:46 -06001203 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
1204 (req->file->f_flags & O_NONBLOCK))
Stefan Bühler8449eed2019-04-27 20:34:19 +02001205 req->flags |= REQ_F_NOWAIT;
1206
1207 if (force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001208 kiocb->ki_flags |= IOCB_NOWAIT;
Stefan Bühler8449eed2019-04-27 20:34:19 +02001209
Jens Axboedef596e2019-01-09 08:59:42 -07001210 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07001211 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
1212 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06001213 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001214
Jens Axboedef596e2019-01-09 08:59:42 -07001215 kiocb->ki_flags |= IOCB_HIPRI;
1216 kiocb->ki_complete = io_complete_rw_iopoll;
1217 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06001218 if (kiocb->ki_flags & IOCB_HIPRI)
1219 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07001220 kiocb->ki_complete = io_complete_rw;
1221 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001222 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001223}
1224
1225static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
1226{
1227 switch (ret) {
1228 case -EIOCBQUEUED:
1229 break;
1230 case -ERESTARTSYS:
1231 case -ERESTARTNOINTR:
1232 case -ERESTARTNOHAND:
1233 case -ERESTART_RESTARTBLOCK:
1234 /*
1235 * We can't just restart the syscall, since previously
1236 * submitted sqes may already be in progress. Just fail this
1237 * IO with EINTR.
1238 */
1239 ret = -EINTR;
1240 /* fall through */
1241 default:
1242 kiocb->ki_complete(kiocb, ret, 0);
1243 }
1244}
1245
Jens Axboeba816ad2019-09-28 11:36:45 -06001246static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
1247 bool in_async)
1248{
1249 if (in_async && ret >= 0 && nxt && kiocb->ki_complete == io_complete_rw)
1250 *nxt = __io_complete_rw(kiocb, ret);
1251 else
1252 io_rw_done(kiocb, ret);
1253}
1254
Jens Axboeedafcce2019-01-09 09:16:05 -07001255static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
1256 const struct io_uring_sqe *sqe,
1257 struct iov_iter *iter)
1258{
1259 size_t len = READ_ONCE(sqe->len);
1260 struct io_mapped_ubuf *imu;
1261 unsigned index, buf_index;
1262 size_t offset;
1263 u64 buf_addr;
1264
1265 /* attempt to use fixed buffers without having provided iovecs */
1266 if (unlikely(!ctx->user_bufs))
1267 return -EFAULT;
1268
1269 buf_index = READ_ONCE(sqe->buf_index);
1270 if (unlikely(buf_index >= ctx->nr_user_bufs))
1271 return -EFAULT;
1272
1273 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
1274 imu = &ctx->user_bufs[index];
1275 buf_addr = READ_ONCE(sqe->addr);
1276
1277 /* overflow */
1278 if (buf_addr + len < buf_addr)
1279 return -EFAULT;
1280 /* not inside the mapped region */
1281 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
1282 return -EFAULT;
1283
1284 /*
1285 * May not be a start of buffer, set size appropriately
1286 * and advance us to the beginning.
1287 */
1288 offset = buf_addr - imu->ubuf;
1289 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06001290
1291 if (offset) {
1292 /*
1293 * Don't use iov_iter_advance() here, as it's really slow for
1294 * using the latter parts of a big fixed buffer - it iterates
1295 * over each segment manually. We can cheat a bit here, because
1296 * we know that:
1297 *
1298 * 1) it's a BVEC iter, we set it up
1299 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1300 * first and last bvec
1301 *
1302 * So just find our index, and adjust the iterator afterwards.
1303 * If the offset is within the first bvec (or the whole first
1304 * bvec, just use iov_iter_advance(). This makes it easier
1305 * since we can just skip the first segment, which may not
1306 * be PAGE_SIZE aligned.
1307 */
1308 const struct bio_vec *bvec = imu->bvec;
1309
1310 if (offset <= bvec->bv_len) {
1311 iov_iter_advance(iter, offset);
1312 } else {
1313 unsigned long seg_skip;
1314
1315 /* skip first vec */
1316 offset -= bvec->bv_len;
1317 seg_skip = 1 + (offset >> PAGE_SHIFT);
1318
1319 iter->bvec = bvec + seg_skip;
1320 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02001321 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06001322 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06001323 }
1324 }
1325
Jens Axboeedafcce2019-01-09 09:16:05 -07001326 return 0;
1327}
1328
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001329static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
1330 const struct sqe_submit *s, struct iovec **iovec,
1331 struct iov_iter *iter)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001332{
1333 const struct io_uring_sqe *sqe = s->sqe;
1334 void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1335 size_t sqe_len = READ_ONCE(sqe->len);
Jens Axboeedafcce2019-01-09 09:16:05 -07001336 u8 opcode;
1337
1338 /*
1339 * We're reading ->opcode for the second time, but the first read
1340 * doesn't care whether it's _FIXED or not, so it doesn't matter
1341 * whether ->opcode changes concurrently. The first read does care
1342 * about whether it is a READ or a WRITE, so we don't trust this read
1343 * for that purpose and instead let the caller pass in the read/write
1344 * flag.
1345 */
1346 opcode = READ_ONCE(sqe->opcode);
1347 if (opcode == IORING_OP_READ_FIXED ||
1348 opcode == IORING_OP_WRITE_FIXED) {
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001349 ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07001350 *iovec = NULL;
1351 return ret;
1352 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001353
1354 if (!s->has_user)
1355 return -EFAULT;
1356
1357#ifdef CONFIG_COMPAT
1358 if (ctx->compat)
1359 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
1360 iovec, iter);
1361#endif
1362
1363 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
1364}
1365
Jens Axboe32960612019-09-23 11:05:34 -06001366/*
1367 * For files that don't have ->read_iter() and ->write_iter(), handle them
1368 * by looping over ->read() or ->write() manually.
1369 */
1370static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
1371 struct iov_iter *iter)
1372{
1373 ssize_t ret = 0;
1374
1375 /*
1376 * Don't support polled IO through this interface, and we can't
1377 * support non-blocking either. For the latter, this just causes
1378 * the kiocb to be handled from an async context.
1379 */
1380 if (kiocb->ki_flags & IOCB_HIPRI)
1381 return -EOPNOTSUPP;
1382 if (kiocb->ki_flags & IOCB_NOWAIT)
1383 return -EAGAIN;
1384
1385 while (iov_iter_count(iter)) {
1386 struct iovec iovec = iov_iter_iovec(iter);
1387 ssize_t nr;
1388
1389 if (rw == READ) {
1390 nr = file->f_op->read(file, iovec.iov_base,
1391 iovec.iov_len, &kiocb->ki_pos);
1392 } else {
1393 nr = file->f_op->write(file, iovec.iov_base,
1394 iovec.iov_len, &kiocb->ki_pos);
1395 }
1396
1397 if (nr < 0) {
1398 if (!ret)
1399 ret = nr;
1400 break;
1401 }
1402 ret += nr;
1403 if (nr != iovec.iov_len)
1404 break;
1405 iov_iter_advance(iter, nr);
1406 }
1407
1408 return ret;
1409}
1410
Jens Axboee0c5c572019-03-12 10:18:47 -06001411static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
Jens Axboeba816ad2019-09-28 11:36:45 -06001412 struct io_kiocb **nxt, bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001413{
1414 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1415 struct kiocb *kiocb = &req->rw;
1416 struct iov_iter iter;
1417 struct file *file;
Jens Axboe31b51512019-01-18 22:56:34 -07001418 size_t iov_count;
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001419 ssize_t read_size, ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001420
Jens Axboe8358e3a2019-04-23 08:17:58 -06001421 ret = io_prep_rw(req, s, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001422 if (ret)
1423 return ret;
1424 file = kiocb->ki_filp;
1425
Jens Axboe2b188cc2019-01-07 10:46:33 -07001426 if (unlikely(!(file->f_mode & FMODE_READ)))
Jens Axboe09bb8392019-03-13 12:39:28 -06001427 return -EBADF;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001428
1429 ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001430 if (ret < 0)
Jens Axboe09bb8392019-03-13 12:39:28 -06001431 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001432
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001433 read_size = ret;
Jens Axboe9e645e112019-05-10 16:07:28 -06001434 if (req->flags & REQ_F_LINK)
1435 req->result = read_size;
1436
Jens Axboe31b51512019-01-18 22:56:34 -07001437 iov_count = iov_iter_count(&iter);
1438 ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001439 if (!ret) {
1440 ssize_t ret2;
1441
Jens Axboe32960612019-09-23 11:05:34 -06001442 if (file->f_op->read_iter)
1443 ret2 = call_read_iter(file, kiocb, &iter);
1444 else
1445 ret2 = loop_rw_iter(READ, file, kiocb, &iter);
1446
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001447 /*
1448 * In case of a short read, punt to async. This can happen
1449 * if we have data partially cached. Alternatively we can
1450 * return the short read, in which case the application will
1451 * need to issue another SQE and wait for it. That SQE will
1452 * need async punt anyway, so it's more efficient to do it
1453 * here.
1454 */
Jens Axboe491381ce2019-10-17 09:20:46 -06001455 if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
1456 (req->flags & REQ_F_ISREG) &&
1457 ret2 > 0 && ret2 < read_size)
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001458 ret2 = -EAGAIN;
1459 /* Catch -EAGAIN return for forced non-blocking submission */
Jens Axboe561fb042019-10-24 07:25:42 -06001460 if (!force_nonblock || ret2 != -EAGAIN)
Jackie Liuba5290c2019-10-09 09:19:59 +08001461 kiocb_done(kiocb, ret2, nxt, s->in_async);
Jens Axboe561fb042019-10-24 07:25:42 -06001462 else
Jens Axboe2b188cc2019-01-07 10:46:33 -07001463 ret = -EAGAIN;
1464 }
1465 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001466 return ret;
1467}
1468
Jens Axboee0c5c572019-03-12 10:18:47 -06001469static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
Jens Axboeba816ad2019-09-28 11:36:45 -06001470 struct io_kiocb **nxt, bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001471{
1472 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1473 struct kiocb *kiocb = &req->rw;
1474 struct iov_iter iter;
1475 struct file *file;
Jens Axboe31b51512019-01-18 22:56:34 -07001476 size_t iov_count;
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001477 ssize_t ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001478
Jens Axboe8358e3a2019-04-23 08:17:58 -06001479 ret = io_prep_rw(req, s, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001480 if (ret)
1481 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001482
Jens Axboe2b188cc2019-01-07 10:46:33 -07001483 file = kiocb->ki_filp;
1484 if (unlikely(!(file->f_mode & FMODE_WRITE)))
Jens Axboe09bb8392019-03-13 12:39:28 -06001485 return -EBADF;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001486
1487 ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001488 if (ret < 0)
Jens Axboe09bb8392019-03-13 12:39:28 -06001489 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001490
Jens Axboe9e645e112019-05-10 16:07:28 -06001491 if (req->flags & REQ_F_LINK)
1492 req->result = ret;
1493
Jens Axboe31b51512019-01-18 22:56:34 -07001494 iov_count = iov_iter_count(&iter);
1495
1496 ret = -EAGAIN;
Jens Axboe561fb042019-10-24 07:25:42 -06001497 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT))
Jens Axboe31b51512019-01-18 22:56:34 -07001498 goto out_free;
Jens Axboe31b51512019-01-18 22:56:34 -07001499
1500 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001501 if (!ret) {
Roman Penyaev9bf79332019-03-25 20:09:24 +01001502 ssize_t ret2;
1503
Jens Axboe2b188cc2019-01-07 10:46:33 -07001504 /*
1505 * Open-code file_start_write here to grab freeze protection,
1506 * which will be released by another thread in
1507 * io_complete_rw(). Fool lockdep by telling it the lock got
1508 * released so that it doesn't complain about the held lock when
1509 * we return to userspace.
1510 */
Jens Axboe491381ce2019-10-17 09:20:46 -06001511 if (req->flags & REQ_F_ISREG) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07001512 __sb_start_write(file_inode(file)->i_sb,
1513 SB_FREEZE_WRITE, true);
1514 __sb_writers_release(file_inode(file)->i_sb,
1515 SB_FREEZE_WRITE);
1516 }
1517 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01001518
Jens Axboe32960612019-09-23 11:05:34 -06001519 if (file->f_op->write_iter)
1520 ret2 = call_write_iter(file, kiocb, &iter);
1521 else
1522 ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
Jens Axboe561fb042019-10-24 07:25:42 -06001523 if (!force_nonblock || ret2 != -EAGAIN)
Jackie Liuba5290c2019-10-09 09:19:59 +08001524 kiocb_done(kiocb, ret2, nxt, s->in_async);
Jens Axboe561fb042019-10-24 07:25:42 -06001525 else
Roman Penyaev9bf79332019-03-25 20:09:24 +01001526 ret = -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001527 }
Jens Axboe31b51512019-01-18 22:56:34 -07001528out_free:
Jens Axboe2b188cc2019-01-07 10:46:33 -07001529 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001530 return ret;
1531}
1532
1533/*
1534 * IORING_OP_NOP just posts a completion event, nothing else.
1535 */
1536static int io_nop(struct io_kiocb *req, u64 user_data)
1537{
1538 struct io_ring_ctx *ctx = req->ctx;
1539 long err = 0;
1540
Jens Axboedef596e2019-01-09 08:59:42 -07001541 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1542 return -EINVAL;
1543
Jens Axboec71ffb62019-05-13 20:58:29 -06001544 io_cqring_add_event(ctx, user_data, err);
Jens Axboeba816ad2019-09-28 11:36:45 -06001545 io_put_req(req, NULL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001546 return 0;
1547}
1548
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001549static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1550{
Jens Axboe6b063142019-01-10 22:13:58 -07001551 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001552
Jens Axboe09bb8392019-03-13 12:39:28 -06001553 if (!req->file)
1554 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001555
Jens Axboe6b063142019-01-10 22:13:58 -07001556 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07001557 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07001558 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001559 return -EINVAL;
1560
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001561 return 0;
1562}
1563
1564static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboeba816ad2019-09-28 11:36:45 -06001565 struct io_kiocb **nxt, bool force_nonblock)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001566{
1567 loff_t sqe_off = READ_ONCE(sqe->off);
1568 loff_t sqe_len = READ_ONCE(sqe->len);
1569 loff_t end = sqe_off + sqe_len;
1570 unsigned fsync_flags;
1571 int ret;
1572
1573 fsync_flags = READ_ONCE(sqe->fsync_flags);
1574 if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
1575 return -EINVAL;
1576
1577 ret = io_prep_fsync(req, sqe);
1578 if (ret)
1579 return ret;
1580
1581 /* fsync always requires a blocking context */
1582 if (force_nonblock)
1583 return -EAGAIN;
1584
1585 ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
1586 end > 0 ? end : LLONG_MAX,
1587 fsync_flags & IORING_FSYNC_DATASYNC);
1588
Jens Axboe9e645e112019-05-10 16:07:28 -06001589 if (ret < 0 && (req->flags & REQ_F_LINK))
1590 req->flags |= REQ_F_FAIL_LINK;
Jens Axboec71ffb62019-05-13 20:58:29 -06001591 io_cqring_add_event(req->ctx, sqe->user_data, ret);
Jens Axboeba816ad2019-09-28 11:36:45 -06001592 io_put_req(req, nxt);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001593 return 0;
1594}
1595
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001596static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1597{
1598 struct io_ring_ctx *ctx = req->ctx;
1599 int ret = 0;
1600
1601 if (!req->file)
1602 return -EBADF;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001603
1604 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1605 return -EINVAL;
1606 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
1607 return -EINVAL;
1608
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001609 return ret;
1610}
1611
1612static int io_sync_file_range(struct io_kiocb *req,
1613 const struct io_uring_sqe *sqe,
Jens Axboeba816ad2019-09-28 11:36:45 -06001614 struct io_kiocb **nxt,
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001615 bool force_nonblock)
1616{
1617 loff_t sqe_off;
1618 loff_t sqe_len;
1619 unsigned flags;
1620 int ret;
1621
1622 ret = io_prep_sfr(req, sqe);
1623 if (ret)
1624 return ret;
1625
1626 /* sync_file_range always requires a blocking context */
1627 if (force_nonblock)
1628 return -EAGAIN;
1629
1630 sqe_off = READ_ONCE(sqe->off);
1631 sqe_len = READ_ONCE(sqe->len);
1632 flags = READ_ONCE(sqe->sync_range_flags);
1633
1634 ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
1635
Jens Axboe9e645e112019-05-10 16:07:28 -06001636 if (ret < 0 && (req->flags & REQ_F_LINK))
1637 req->flags |= REQ_F_FAIL_LINK;
Jens Axboec71ffb62019-05-13 20:58:29 -06001638 io_cqring_add_event(req->ctx, sqe->user_data, ret);
Jens Axboeba816ad2019-09-28 11:36:45 -06001639 io_put_req(req, nxt);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001640 return 0;
1641}
1642
Jens Axboe0fa03c62019-04-19 13:34:07 -06001643#if defined(CONFIG_NET)
Jens Axboeaa1fa282019-04-19 13:38:09 -06001644static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboeba816ad2019-09-28 11:36:45 -06001645 struct io_kiocb **nxt, bool force_nonblock,
Jens Axboeaa1fa282019-04-19 13:38:09 -06001646 long (*fn)(struct socket *, struct user_msghdr __user *,
1647 unsigned int))
1648{
Jens Axboe0fa03c62019-04-19 13:34:07 -06001649 struct socket *sock;
1650 int ret;
1651
1652 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1653 return -EINVAL;
1654
1655 sock = sock_from_file(req->file, &ret);
1656 if (sock) {
1657 struct user_msghdr __user *msg;
1658 unsigned flags;
1659
1660 flags = READ_ONCE(sqe->msg_flags);
1661 if (flags & MSG_DONTWAIT)
1662 req->flags |= REQ_F_NOWAIT;
1663 else if (force_nonblock)
1664 flags |= MSG_DONTWAIT;
1665
1666 msg = (struct user_msghdr __user *) (unsigned long)
1667 READ_ONCE(sqe->addr);
1668
Jens Axboeaa1fa282019-04-19 13:38:09 -06001669 ret = fn(sock, msg, flags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06001670 if (force_nonblock && ret == -EAGAIN)
1671 return ret;
1672 }
1673
1674 io_cqring_add_event(req->ctx, sqe->user_data, ret);
Jens Axboef1f40852019-11-05 20:33:16 -07001675 if (ret < 0 && (req->flags & REQ_F_LINK))
1676 req->flags |= REQ_F_FAIL_LINK;
Jens Axboeba816ad2019-09-28 11:36:45 -06001677 io_put_req(req, nxt);
Jens Axboe0fa03c62019-04-19 13:34:07 -06001678 return 0;
Jens Axboeaa1fa282019-04-19 13:38:09 -06001679}
1680#endif
1681
1682static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboeba816ad2019-09-28 11:36:45 -06001683 struct io_kiocb **nxt, bool force_nonblock)
Jens Axboeaa1fa282019-04-19 13:38:09 -06001684{
1685#if defined(CONFIG_NET)
Jens Axboeba816ad2019-09-28 11:36:45 -06001686 return io_send_recvmsg(req, sqe, nxt, force_nonblock,
1687 __sys_sendmsg_sock);
Jens Axboeaa1fa282019-04-19 13:38:09 -06001688#else
1689 return -EOPNOTSUPP;
1690#endif
1691}
1692
1693static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboeba816ad2019-09-28 11:36:45 -06001694 struct io_kiocb **nxt, bool force_nonblock)
Jens Axboeaa1fa282019-04-19 13:38:09 -06001695{
1696#if defined(CONFIG_NET)
Jens Axboeba816ad2019-09-28 11:36:45 -06001697 return io_send_recvmsg(req, sqe, nxt, force_nonblock,
1698 __sys_recvmsg_sock);
Jens Axboe0fa03c62019-04-19 13:34:07 -06001699#else
1700 return -EOPNOTSUPP;
1701#endif
1702}
1703
Jens Axboe17f2fe32019-10-17 14:42:58 -06001704static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1705 struct io_kiocb **nxt, bool force_nonblock)
1706{
1707#if defined(CONFIG_NET)
1708 struct sockaddr __user *addr;
1709 int __user *addr_len;
1710 unsigned file_flags;
1711 int flags, ret;
1712
1713 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
1714 return -EINVAL;
1715 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1716 return -EINVAL;
1717
1718 addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
1719 addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2);
1720 flags = READ_ONCE(sqe->accept_flags);
1721 file_flags = force_nonblock ? O_NONBLOCK : 0;
1722
1723 ret = __sys_accept4_file(req->file, file_flags, addr, addr_len, flags);
1724 if (ret == -EAGAIN && force_nonblock) {
1725 req->work.flags |= IO_WQ_WORK_NEEDS_FILES;
1726 return -EAGAIN;
1727 }
1728 if (ret < 0 && (req->flags & REQ_F_LINK))
1729 req->flags |= REQ_F_FAIL_LINK;
1730 io_cqring_add_event(req->ctx, sqe->user_data, ret);
1731 io_put_req(req, nxt);
1732 return 0;
1733#else
1734 return -EOPNOTSUPP;
1735#endif
1736}
1737
Jens Axboe221c5eb2019-01-17 09:41:58 -07001738static void io_poll_remove_one(struct io_kiocb *req)
1739{
1740 struct io_poll_iocb *poll = &req->poll;
1741
1742 spin_lock(&poll->head->lock);
1743 WRITE_ONCE(poll->canceled, true);
1744 if (!list_empty(&poll->wait.entry)) {
1745 list_del_init(&poll->wait.entry);
Jens Axboe18d9be12019-09-10 09:13:05 -06001746 io_queue_async_work(req->ctx, req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001747 }
1748 spin_unlock(&poll->head->lock);
1749
1750 list_del_init(&req->list);
1751}
1752
1753static void io_poll_remove_all(struct io_ring_ctx *ctx)
1754{
1755 struct io_kiocb *req;
1756
1757 spin_lock_irq(&ctx->completion_lock);
1758 while (!list_empty(&ctx->cancel_list)) {
1759 req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
1760 io_poll_remove_one(req);
1761 }
1762 spin_unlock_irq(&ctx->completion_lock);
1763}
1764
1765/*
1766 * Find a running poll command that matches one specified in sqe->addr,
1767 * and remove it if found.
1768 */
1769static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1770{
1771 struct io_ring_ctx *ctx = req->ctx;
1772 struct io_kiocb *poll_req, *next;
1773 int ret = -ENOENT;
1774
1775 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1776 return -EINVAL;
1777 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
1778 sqe->poll_events)
1779 return -EINVAL;
1780
1781 spin_lock_irq(&ctx->completion_lock);
1782 list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) {
1783 if (READ_ONCE(sqe->addr) == poll_req->user_data) {
1784 io_poll_remove_one(poll_req);
1785 ret = 0;
1786 break;
1787 }
1788 }
1789 spin_unlock_irq(&ctx->completion_lock);
1790
Jens Axboec71ffb62019-05-13 20:58:29 -06001791 io_cqring_add_event(req->ctx, sqe->user_data, ret);
Jens Axboef1f40852019-11-05 20:33:16 -07001792 if (ret < 0 && (req->flags & REQ_F_LINK))
1793 req->flags |= REQ_F_FAIL_LINK;
Jens Axboeba816ad2019-09-28 11:36:45 -06001794 io_put_req(req, NULL);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001795 return 0;
1796}
1797
Jens Axboe8c838782019-03-12 15:48:16 -06001798static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
1799 __poll_t mask)
Jens Axboe221c5eb2019-01-17 09:41:58 -07001800{
Jens Axboe8c838782019-03-12 15:48:16 -06001801 req->poll.done = true;
Jens Axboec71ffb62019-05-13 20:58:29 -06001802 io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask));
Jens Axboe8c838782019-03-12 15:48:16 -06001803 io_commit_cqring(ctx);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001804}
1805
Jens Axboe561fb042019-10-24 07:25:42 -06001806static void io_poll_complete_work(struct io_wq_work **workptr)
Jens Axboe221c5eb2019-01-17 09:41:58 -07001807{
Jens Axboe561fb042019-10-24 07:25:42 -06001808 struct io_wq_work *work = *workptr;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001809 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1810 struct io_poll_iocb *poll = &req->poll;
1811 struct poll_table_struct pt = { ._key = poll->events };
1812 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe89723d02019-11-05 15:32:58 -07001813 struct io_kiocb *nxt = NULL;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001814 __poll_t mask = 0;
1815
Jens Axboe561fb042019-10-24 07:25:42 -06001816 if (work->flags & IO_WQ_WORK_CANCEL)
1817 WRITE_ONCE(poll->canceled, true);
1818
Jens Axboe221c5eb2019-01-17 09:41:58 -07001819 if (!READ_ONCE(poll->canceled))
1820 mask = vfs_poll(poll->file, &pt) & poll->events;
1821
1822 /*
1823 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1824 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
1825 * synchronize with them. In the cancellation case the list_del_init
1826 * itself is not actually needed, but harmless so we keep it in to
1827 * avoid further branches in the fast path.
1828 */
1829 spin_lock_irq(&ctx->completion_lock);
1830 if (!mask && !READ_ONCE(poll->canceled)) {
1831 add_wait_queue(poll->head, &poll->wait);
1832 spin_unlock_irq(&ctx->completion_lock);
1833 return;
1834 }
1835 list_del_init(&req->list);
Jens Axboe8c838782019-03-12 15:48:16 -06001836 io_poll_complete(ctx, req, mask);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001837 spin_unlock_irq(&ctx->completion_lock);
1838
Jens Axboe8c838782019-03-12 15:48:16 -06001839 io_cqring_ev_posted(ctx);
Jens Axboe89723d02019-11-05 15:32:58 -07001840
1841 io_put_req(req, &nxt);
1842 if (nxt)
1843 *workptr = &nxt->work;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001844}
1845
1846static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1847 void *key)
1848{
1849 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
1850 wait);
1851 struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
1852 struct io_ring_ctx *ctx = req->ctx;
1853 __poll_t mask = key_to_poll(key);
Jens Axboe8c838782019-03-12 15:48:16 -06001854 unsigned long flags;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001855
1856 /* for instances that support it check for an event match first: */
Jens Axboe8c838782019-03-12 15:48:16 -06001857 if (mask && !(mask & poll->events))
1858 return 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001859
1860 list_del_init(&poll->wait.entry);
Jens Axboe8c838782019-03-12 15:48:16 -06001861
1862 if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
1863 list_del(&req->list);
1864 io_poll_complete(ctx, req, mask);
1865 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1866
1867 io_cqring_ev_posted(ctx);
Jens Axboeba816ad2019-09-28 11:36:45 -06001868 io_put_req(req, NULL);
Jens Axboe8c838782019-03-12 15:48:16 -06001869 } else {
Jens Axboe18d9be12019-09-10 09:13:05 -06001870 io_queue_async_work(ctx, req);
Jens Axboe8c838782019-03-12 15:48:16 -06001871 }
1872
Jens Axboe221c5eb2019-01-17 09:41:58 -07001873 return 1;
1874}
1875
1876struct io_poll_table {
1877 struct poll_table_struct pt;
1878 struct io_kiocb *req;
1879 int error;
1880};
1881
1882static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1883 struct poll_table_struct *p)
1884{
1885 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
1886
1887 if (unlikely(pt->req->poll.head)) {
1888 pt->error = -EINVAL;
1889 return;
1890 }
1891
1892 pt->error = 0;
1893 pt->req->poll.head = head;
1894 add_wait_queue(head, &pt->req->poll.wait);
1895}
1896
Jens Axboe89723d02019-11-05 15:32:58 -07001897static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1898 struct io_kiocb **nxt)
Jens Axboe221c5eb2019-01-17 09:41:58 -07001899{
1900 struct io_poll_iocb *poll = &req->poll;
1901 struct io_ring_ctx *ctx = req->ctx;
1902 struct io_poll_table ipt;
Jens Axboe8c838782019-03-12 15:48:16 -06001903 bool cancel = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001904 __poll_t mask;
1905 u16 events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001906
1907 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1908 return -EINVAL;
1909 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1910 return -EINVAL;
Jens Axboe09bb8392019-03-13 12:39:28 -06001911 if (!poll->file)
1912 return -EBADF;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001913
Jens Axboe6cc47d12019-09-18 11:18:23 -06001914 req->submit.sqe = NULL;
Jens Axboe561fb042019-10-24 07:25:42 -06001915 INIT_IO_WORK(&req->work, io_poll_complete_work);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001916 events = READ_ONCE(sqe->poll_events);
1917 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
1918
Jens Axboe221c5eb2019-01-17 09:41:58 -07001919 poll->head = NULL;
Jens Axboe8c838782019-03-12 15:48:16 -06001920 poll->done = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001921 poll->canceled = false;
1922
1923 ipt.pt._qproc = io_poll_queue_proc;
1924 ipt.pt._key = poll->events;
1925 ipt.req = req;
1926 ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1927
1928 /* initialized the list so that we can do list_empty checks */
1929 INIT_LIST_HEAD(&poll->wait.entry);
1930 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
1931
Jens Axboe36703242019-07-25 10:20:18 -06001932 INIT_LIST_HEAD(&req->list);
1933
Jens Axboe221c5eb2019-01-17 09:41:58 -07001934 mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001935
1936 spin_lock_irq(&ctx->completion_lock);
Jens Axboe8c838782019-03-12 15:48:16 -06001937 if (likely(poll->head)) {
1938 spin_lock(&poll->head->lock);
1939 if (unlikely(list_empty(&poll->wait.entry))) {
1940 if (ipt.error)
1941 cancel = true;
1942 ipt.error = 0;
1943 mask = 0;
1944 }
1945 if (mask || ipt.error)
1946 list_del_init(&poll->wait.entry);
1947 else if (cancel)
1948 WRITE_ONCE(poll->canceled, true);
1949 else if (!poll->done) /* actually waiting for an event */
1950 list_add_tail(&req->list, &ctx->cancel_list);
1951 spin_unlock(&poll->head->lock);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001952 }
Jens Axboe8c838782019-03-12 15:48:16 -06001953 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06001954 ipt.error = 0;
1955 io_poll_complete(ctx, req, mask);
1956 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07001957 spin_unlock_irq(&ctx->completion_lock);
1958
Jens Axboe8c838782019-03-12 15:48:16 -06001959 if (mask) {
1960 io_cqring_ev_posted(ctx);
Jens Axboe89723d02019-11-05 15:32:58 -07001961 io_put_req(req, nxt);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001962 }
Jens Axboe8c838782019-03-12 15:48:16 -06001963 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001964}
1965
Jens Axboe5262f562019-09-17 12:26:57 -06001966static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
1967{
1968 struct io_ring_ctx *ctx;
Jens Axboe11365042019-10-16 09:08:32 -06001969 struct io_kiocb *req;
Jens Axboe5262f562019-09-17 12:26:57 -06001970 unsigned long flags;
1971
1972 req = container_of(timer, struct io_kiocb, timeout.timer);
1973 ctx = req->ctx;
1974 atomic_inc(&ctx->cq_timeouts);
1975
1976 spin_lock_irqsave(&ctx->completion_lock, flags);
zhangyi (F)ef036812019-10-23 15:10:08 +08001977 /*
Jens Axboe11365042019-10-16 09:08:32 -06001978 * We could be racing with timeout deletion. If the list is empty,
1979 * then timeout lookup already found it and will be handling it.
zhangyi (F)ef036812019-10-23 15:10:08 +08001980 */
Jens Axboe842f9612019-10-29 12:34:10 -06001981 if (!list_empty(&req->list)) {
Jens Axboe11365042019-10-16 09:08:32 -06001982 struct io_kiocb *prev;
Jens Axboe5262f562019-09-17 12:26:57 -06001983
Jens Axboe11365042019-10-16 09:08:32 -06001984 /*
1985 * Adjust the reqs sequence before the current one because it
1986 * will consume a slot in the cq_ring and the the cq_tail
1987 * pointer will be increased, otherwise other timeout reqs may
1988 * return in advance without waiting for enough wait_nr.
1989 */
1990 prev = req;
1991 list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
1992 prev->sequence++;
Jens Axboe11365042019-10-16 09:08:32 -06001993 list_del_init(&req->list);
Jens Axboe11365042019-10-16 09:08:32 -06001994 }
Jens Axboe842f9612019-10-29 12:34:10 -06001995
1996 io_cqring_fill_event(ctx, req->user_data, -ETIME);
1997 io_commit_cqring(ctx);
Jens Axboe5262f562019-09-17 12:26:57 -06001998 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1999
Jens Axboe842f9612019-10-29 12:34:10 -06002000 io_cqring_ev_posted(ctx);
Jens Axboef1f40852019-11-05 20:33:16 -07002001 if (req->flags & REQ_F_LINK)
2002 req->flags |= REQ_F_FAIL_LINK;
Jens Axboe842f9612019-10-29 12:34:10 -06002003 io_put_req(req, NULL);
Jens Axboe11365042019-10-16 09:08:32 -06002004 return HRTIMER_NORESTART;
2005}
2006
2007/*
2008 * Remove or update an existing timeout command
2009 */
2010static int io_timeout_remove(struct io_kiocb *req,
2011 const struct io_uring_sqe *sqe)
2012{
2013 struct io_ring_ctx *ctx = req->ctx;
2014 struct io_kiocb *treq;
2015 int ret = -ENOENT;
2016 __u64 user_data;
2017 unsigned flags;
2018
2019 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2020 return -EINVAL;
2021 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
2022 return -EINVAL;
2023 flags = READ_ONCE(sqe->timeout_flags);
2024 if (flags)
2025 return -EINVAL;
2026
2027 user_data = READ_ONCE(sqe->addr);
2028 spin_lock_irq(&ctx->completion_lock);
2029 list_for_each_entry(treq, &ctx->timeout_list, list) {
2030 if (user_data == treq->user_data) {
2031 list_del_init(&treq->list);
2032 ret = 0;
2033 break;
2034 }
2035 }
2036
2037 /* didn't find timeout */
2038 if (ret) {
2039fill_ev:
2040 io_cqring_fill_event(ctx, req->user_data, ret);
2041 io_commit_cqring(ctx);
2042 spin_unlock_irq(&ctx->completion_lock);
2043 io_cqring_ev_posted(ctx);
Jens Axboef1f40852019-11-05 20:33:16 -07002044 if (req->flags & REQ_F_LINK)
2045 req->flags |= REQ_F_FAIL_LINK;
Jens Axboe11365042019-10-16 09:08:32 -06002046 io_put_req(req, NULL);
2047 return 0;
2048 }
2049
2050 ret = hrtimer_try_to_cancel(&treq->timeout.timer);
2051 if (ret == -1) {
2052 ret = -EBUSY;
2053 goto fill_ev;
2054 }
2055
2056 io_cqring_fill_event(ctx, req->user_data, 0);
2057 io_cqring_fill_event(ctx, treq->user_data, -ECANCELED);
2058 io_commit_cqring(ctx);
2059 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06002060 io_cqring_ev_posted(ctx);
2061
Jens Axboe11365042019-10-16 09:08:32 -06002062 io_put_req(treq, NULL);
Jens Axboeba816ad2019-09-28 11:36:45 -06002063 io_put_req(req, NULL);
Jens Axboe11365042019-10-16 09:08:32 -06002064 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06002065}
2066
2067static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2068{
yangerkun5da0fb12019-10-15 21:59:29 +08002069 unsigned count;
Jens Axboe5262f562019-09-17 12:26:57 -06002070 struct io_ring_ctx *ctx = req->ctx;
2071 struct list_head *entry;
Jens Axboea41525a2019-10-15 16:48:15 -06002072 enum hrtimer_mode mode;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06002073 struct timespec64 ts;
zhangyi (F)a1f58ba2019-10-23 15:10:09 +08002074 unsigned span = 0;
Jens Axboea41525a2019-10-15 16:48:15 -06002075 unsigned flags;
Jens Axboe5262f562019-09-17 12:26:57 -06002076
2077 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2078 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06002079 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len != 1)
2080 return -EINVAL;
2081 flags = READ_ONCE(sqe->timeout_flags);
2082 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06002083 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06002084
2085 if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06002086 return -EFAULT;
2087
Jens Axboe11365042019-10-16 09:08:32 -06002088 if (flags & IORING_TIMEOUT_ABS)
2089 mode = HRTIMER_MODE_ABS;
2090 else
2091 mode = HRTIMER_MODE_REL;
2092
2093 hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, mode);
2094
Jens Axboe5262f562019-09-17 12:26:57 -06002095 /*
2096 * sqe->off holds how many events that need to occur for this
2097 * timeout event to be satisfied.
2098 */
2099 count = READ_ONCE(sqe->off);
2100 if (!count)
2101 count = 1;
2102
2103 req->sequence = ctx->cached_sq_head + count - 1;
yangerkun5da0fb12019-10-15 21:59:29 +08002104 /* reuse it to store the count */
2105 req->submit.sequence = count;
Jens Axboe5262f562019-09-17 12:26:57 -06002106 req->flags |= REQ_F_TIMEOUT;
2107
2108 /*
2109 * Insertion sort, ensuring the first entry in the list is always
2110 * the one we need first.
2111 */
Jens Axboe5262f562019-09-17 12:26:57 -06002112 spin_lock_irq(&ctx->completion_lock);
2113 list_for_each_prev(entry, &ctx->timeout_list) {
2114 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
yangerkun5da0fb12019-10-15 21:59:29 +08002115 unsigned nxt_sq_head;
2116 long long tmp, tmp_nxt;
Jens Axboe5262f562019-09-17 12:26:57 -06002117
yangerkun5da0fb12019-10-15 21:59:29 +08002118 /*
2119 * Since cached_sq_head + count - 1 can overflow, use type long
2120 * long to store it.
2121 */
2122 tmp = (long long)ctx->cached_sq_head + count - 1;
2123 nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
2124 tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
2125
2126 /*
2127 * cached_sq_head may overflow, and it will never overflow twice
2128 * once there is some timeout req still be valid.
2129 */
2130 if (ctx->cached_sq_head < nxt_sq_head)
yangerkun8b07a652019-10-17 12:12:35 +08002131 tmp += UINT_MAX;
yangerkun5da0fb12019-10-15 21:59:29 +08002132
zhangyi (F)a1f58ba2019-10-23 15:10:09 +08002133 if (tmp > tmp_nxt)
Jens Axboe5262f562019-09-17 12:26:57 -06002134 break;
zhangyi (F)a1f58ba2019-10-23 15:10:09 +08002135
2136 /*
2137 * Sequence of reqs after the insert one and itself should
2138 * be adjusted because each timeout req consumes a slot.
2139 */
2140 span++;
2141 nxt->sequence++;
Jens Axboe5262f562019-09-17 12:26:57 -06002142 }
zhangyi (F)a1f58ba2019-10-23 15:10:09 +08002143 req->sequence -= span;
Jens Axboe5262f562019-09-17 12:26:57 -06002144 list_add(&req->list, entry);
Jens Axboe5262f562019-09-17 12:26:57 -06002145 req->timeout.timer.function = io_timeout_fn;
Jens Axboea41525a2019-10-15 16:48:15 -06002146 hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts), mode);
Jens Axboe842f9612019-10-29 12:34:10 -06002147 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06002148 return 0;
2149}
2150
Jens Axboe62755e32019-10-28 21:49:21 -06002151static bool io_cancel_cb(struct io_wq_work *work, void *data)
2152{
2153 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2154
2155 return req->user_data == (unsigned long) data;
2156}
2157
2158static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2159 struct io_kiocb **nxt)
2160{
2161 struct io_ring_ctx *ctx = req->ctx;
2162 enum io_wq_cancel cancel_ret;
2163 void *sqe_addr;
2164 int ret = 0;
2165
2166 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2167 return -EINVAL;
2168 if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
2169 sqe->cancel_flags)
2170 return -EINVAL;
2171
2172 sqe_addr = (void *) (unsigned long) READ_ONCE(sqe->addr);
2173 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
2174 switch (cancel_ret) {
2175 case IO_WQ_CANCEL_OK:
2176 ret = 0;
2177 break;
2178 case IO_WQ_CANCEL_RUNNING:
2179 ret = -EALREADY;
2180 break;
2181 case IO_WQ_CANCEL_NOTFOUND:
2182 ret = -ENOENT;
2183 break;
2184 }
2185
2186 if (ret < 0 && (req->flags & REQ_F_LINK))
2187 req->flags |= REQ_F_FAIL_LINK;
2188 io_cqring_add_event(req->ctx, sqe->user_data, ret);
2189 io_put_req(req, nxt);
2190 return 0;
2191}
2192
Jens Axboede0617e2019-04-06 21:51:27 -06002193static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
2194 const struct io_uring_sqe *sqe)
2195{
2196 struct io_uring_sqe *sqe_copy;
2197
2198 if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
2199 return 0;
2200
2201 sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
2202 if (!sqe_copy)
2203 return -EAGAIN;
2204
2205 spin_lock_irq(&ctx->completion_lock);
2206 if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) {
2207 spin_unlock_irq(&ctx->completion_lock);
2208 kfree(sqe_copy);
2209 return 0;
2210 }
2211
2212 memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
2213 req->submit.sqe = sqe_copy;
2214
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02002215 trace_io_uring_defer(ctx, req, false);
Jens Axboede0617e2019-04-06 21:51:27 -06002216 list_add_tail(&req->list, &ctx->defer_list);
2217 spin_unlock_irq(&ctx->completion_lock);
2218 return -EIOCBQUEUED;
2219}
2220
Jens Axboe2b188cc2019-01-07 10:46:33 -07002221static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Jens Axboeba816ad2019-09-28 11:36:45 -06002222 const struct sqe_submit *s, struct io_kiocb **nxt,
2223 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002224{
Jens Axboee0c5c572019-03-12 10:18:47 -06002225 int ret, opcode;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002226
Jens Axboe9e645e112019-05-10 16:07:28 -06002227 req->user_data = READ_ONCE(s->sqe->user_data);
2228
Jens Axboe2b188cc2019-01-07 10:46:33 -07002229 opcode = READ_ONCE(s->sqe->opcode);
2230 switch (opcode) {
2231 case IORING_OP_NOP:
2232 ret = io_nop(req, req->user_data);
2233 break;
2234 case IORING_OP_READV:
Jens Axboeedafcce2019-01-09 09:16:05 -07002235 if (unlikely(s->sqe->buf_index))
2236 return -EINVAL;
Jens Axboeba816ad2019-09-28 11:36:45 -06002237 ret = io_read(req, s, nxt, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002238 break;
2239 case IORING_OP_WRITEV:
Jens Axboeedafcce2019-01-09 09:16:05 -07002240 if (unlikely(s->sqe->buf_index))
2241 return -EINVAL;
Jens Axboeba816ad2019-09-28 11:36:45 -06002242 ret = io_write(req, s, nxt, force_nonblock);
Jens Axboeedafcce2019-01-09 09:16:05 -07002243 break;
2244 case IORING_OP_READ_FIXED:
Jens Axboeba816ad2019-09-28 11:36:45 -06002245 ret = io_read(req, s, nxt, force_nonblock);
Jens Axboeedafcce2019-01-09 09:16:05 -07002246 break;
2247 case IORING_OP_WRITE_FIXED:
Jens Axboeba816ad2019-09-28 11:36:45 -06002248 ret = io_write(req, s, nxt, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002249 break;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002250 case IORING_OP_FSYNC:
Jens Axboeba816ad2019-09-28 11:36:45 -06002251 ret = io_fsync(req, s->sqe, nxt, force_nonblock);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002252 break;
Jens Axboe221c5eb2019-01-17 09:41:58 -07002253 case IORING_OP_POLL_ADD:
Jens Axboe89723d02019-11-05 15:32:58 -07002254 ret = io_poll_add(req, s->sqe, nxt);
Jens Axboe221c5eb2019-01-17 09:41:58 -07002255 break;
2256 case IORING_OP_POLL_REMOVE:
2257 ret = io_poll_remove(req, s->sqe);
2258 break;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06002259 case IORING_OP_SYNC_FILE_RANGE:
Jens Axboeba816ad2019-09-28 11:36:45 -06002260 ret = io_sync_file_range(req, s->sqe, nxt, force_nonblock);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06002261 break;
Jens Axboe0fa03c62019-04-19 13:34:07 -06002262 case IORING_OP_SENDMSG:
Jens Axboeba816ad2019-09-28 11:36:45 -06002263 ret = io_sendmsg(req, s->sqe, nxt, force_nonblock);
Jens Axboe0fa03c62019-04-19 13:34:07 -06002264 break;
Jens Axboeaa1fa282019-04-19 13:38:09 -06002265 case IORING_OP_RECVMSG:
Jens Axboeba816ad2019-09-28 11:36:45 -06002266 ret = io_recvmsg(req, s->sqe, nxt, force_nonblock);
Jens Axboeaa1fa282019-04-19 13:38:09 -06002267 break;
Jens Axboe5262f562019-09-17 12:26:57 -06002268 case IORING_OP_TIMEOUT:
2269 ret = io_timeout(req, s->sqe);
2270 break;
Jens Axboe11365042019-10-16 09:08:32 -06002271 case IORING_OP_TIMEOUT_REMOVE:
2272 ret = io_timeout_remove(req, s->sqe);
2273 break;
Jens Axboe17f2fe32019-10-17 14:42:58 -06002274 case IORING_OP_ACCEPT:
2275 ret = io_accept(req, s->sqe, nxt, force_nonblock);
2276 break;
Jens Axboe62755e32019-10-28 21:49:21 -06002277 case IORING_OP_ASYNC_CANCEL:
2278 ret = io_async_cancel(req, s->sqe, nxt);
2279 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002280 default:
2281 ret = -EINVAL;
2282 break;
2283 }
2284
Jens Axboedef596e2019-01-09 08:59:42 -07002285 if (ret)
2286 return ret;
2287
2288 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboe9e645e112019-05-10 16:07:28 -06002289 if (req->result == -EAGAIN)
Jens Axboedef596e2019-01-09 08:59:42 -07002290 return -EAGAIN;
2291
2292 /* workqueue context doesn't hold uring_lock, grab it now */
Jackie Liuba5290c2019-10-09 09:19:59 +08002293 if (s->in_async)
Jens Axboedef596e2019-01-09 08:59:42 -07002294 mutex_lock(&ctx->uring_lock);
2295 io_iopoll_req_issued(req);
Jackie Liuba5290c2019-10-09 09:19:59 +08002296 if (s->in_async)
Jens Axboedef596e2019-01-09 08:59:42 -07002297 mutex_unlock(&ctx->uring_lock);
2298 }
2299
2300 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002301}
2302
Jens Axboe561fb042019-10-24 07:25:42 -06002303static void io_wq_submit_work(struct io_wq_work **workptr)
Jens Axboe31b51512019-01-18 22:56:34 -07002304{
Jens Axboe561fb042019-10-24 07:25:42 -06002305 struct io_wq_work *work = *workptr;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002306 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002307 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe561fb042019-10-24 07:25:42 -06002308 struct sqe_submit *s = &req->submit;
2309 const struct io_uring_sqe *sqe = s->sqe;
2310 struct io_kiocb *nxt = NULL;
2311 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002312
Jens Axboe561fb042019-10-24 07:25:42 -06002313 /* Ensure we clear previously set non-block flag */
2314 req->rw.ki_flags &= ~IOCB_NOWAIT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002315
Jens Axboe561fb042019-10-24 07:25:42 -06002316 if (work->flags & IO_WQ_WORK_CANCEL)
2317 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07002318
Jens Axboe561fb042019-10-24 07:25:42 -06002319 if (!ret) {
2320 s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
2321 s->in_async = true;
2322 do {
2323 ret = __io_submit_sqe(ctx, req, s, &nxt, false);
2324 /*
2325 * We can get EAGAIN for polled IO even though we're
2326 * forcing a sync submission from here, since we can't
2327 * wait for request slots on the block side.
2328 */
2329 if (ret != -EAGAIN)
2330 break;
2331 cond_resched();
2332 } while (1);
2333 }
Jens Axboe31b51512019-01-18 22:56:34 -07002334
Jens Axboe561fb042019-10-24 07:25:42 -06002335 /* drop submission reference */
2336 io_put_req(req, NULL);
Jens Axboe817869d2019-04-30 14:44:05 -06002337
Jens Axboe561fb042019-10-24 07:25:42 -06002338 if (ret) {
Jens Axboef1f40852019-11-05 20:33:16 -07002339 if (req->flags & REQ_F_LINK)
2340 req->flags |= REQ_F_FAIL_LINK;
Jens Axboe561fb042019-10-24 07:25:42 -06002341 io_cqring_add_event(ctx, sqe->user_data, ret);
Jens Axboeba816ad2019-09-28 11:36:45 -06002342 io_put_req(req, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -07002343 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002344
Jens Axboe561fb042019-10-24 07:25:42 -06002345 /* async context always use a copy of the sqe */
2346 kfree(sqe);
2347
2348 /* if a dependent link is ready, pass it back */
2349 if (!ret && nxt) {
2350 io_prep_async_work(nxt);
2351 *workptr = &nxt->work;
Jens Axboeedafcce2019-01-09 09:16:05 -07002352 }
Jens Axboe31b51512019-01-18 22:56:34 -07002353}
Jens Axboe2b188cc2019-01-07 10:46:33 -07002354
Jens Axboe09bb8392019-03-13 12:39:28 -06002355static bool io_op_needs_file(const struct io_uring_sqe *sqe)
2356{
2357 int op = READ_ONCE(sqe->opcode);
2358
2359 switch (op) {
2360 case IORING_OP_NOP:
2361 case IORING_OP_POLL_REMOVE:
2362 return false;
2363 default:
2364 return true;
2365 }
2366}
2367
Jens Axboe65e19f52019-10-26 07:20:21 -06002368static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
2369 int index)
2370{
2371 struct fixed_file_table *table;
2372
2373 table = &ctx->file_table[index >> IORING_FILE_TABLE_SHIFT];
2374 return table->files[index & IORING_FILE_TABLE_MASK];
2375}
2376
Jens Axboe09bb8392019-03-13 12:39:28 -06002377static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
2378 struct io_submit_state *state, struct io_kiocb *req)
2379{
2380 unsigned flags;
2381 int fd;
2382
2383 flags = READ_ONCE(s->sqe->flags);
2384 fd = READ_ONCE(s->sqe->fd);
2385
Jackie Liu4fe2c962019-09-09 20:50:40 +08002386 if (flags & IOSQE_IO_DRAIN)
Jens Axboede0617e2019-04-06 21:51:27 -06002387 req->flags |= REQ_F_IO_DRAIN;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002388 /*
2389 * All io need record the previous position, if LINK vs DARIN,
2390 * it can be used to mark the position of the first IO in the
2391 * link list.
2392 */
2393 req->sequence = s->sequence;
Jens Axboede0617e2019-04-06 21:51:27 -06002394
Jens Axboe60c112b2019-06-21 10:20:18 -06002395 if (!io_op_needs_file(s->sqe))
Jens Axboe09bb8392019-03-13 12:39:28 -06002396 return 0;
Jens Axboe09bb8392019-03-13 12:39:28 -06002397
2398 if (flags & IOSQE_FIXED_FILE) {
Jens Axboe65e19f52019-10-26 07:20:21 -06002399 if (unlikely(!ctx->file_table ||
Jens Axboe09bb8392019-03-13 12:39:28 -06002400 (unsigned) fd >= ctx->nr_user_files))
2401 return -EBADF;
Jens Axboeb7620122019-10-26 07:22:55 -06002402 fd = array_index_nospec(fd, ctx->nr_user_files);
Jens Axboe65e19f52019-10-26 07:20:21 -06002403 req->file = io_file_from_index(ctx, fd);
2404 if (!req->file)
Jens Axboe08a45172019-10-03 08:11:03 -06002405 return -EBADF;
Jens Axboe09bb8392019-03-13 12:39:28 -06002406 req->flags |= REQ_F_FIXED_FILE;
2407 } else {
2408 if (s->needs_fixed_file)
2409 return -EBADF;
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02002410 trace_io_uring_file_get(ctx, fd);
Jens Axboe09bb8392019-03-13 12:39:28 -06002411 req->file = io_file_get(state, fd);
2412 if (unlikely(!req->file))
2413 return -EBADF;
2414 }
2415
2416 return 0;
2417}
2418
Jens Axboefcb323c2019-10-24 12:39:47 -06002419static int io_grab_files(struct io_ring_ctx *ctx, struct io_kiocb *req)
2420{
2421 int ret = -EBADF;
2422
2423 rcu_read_lock();
2424 spin_lock_irq(&ctx->inflight_lock);
2425 /*
2426 * We use the f_ops->flush() handler to ensure that we can flush
2427 * out work accessing these files if the fd is closed. Check if
2428 * the fd has changed since we started down this path, and disallow
2429 * this operation if it has.
2430 */
2431 if (fcheck(req->submit.ring_fd) == req->submit.ring_file) {
2432 list_add(&req->inflight_entry, &ctx->inflight_list);
2433 req->flags |= REQ_F_INFLIGHT;
2434 req->work.files = current->files;
2435 ret = 0;
2436 }
2437 spin_unlock_irq(&ctx->inflight_lock);
2438 rcu_read_unlock();
2439
2440 return ret;
2441}
2442
Jackie Liu4fe2c962019-09-09 20:50:40 +08002443static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Jens Axboebc808bc2019-10-22 13:14:37 -06002444 struct sqe_submit *s)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002445{
Jens Axboee0c5c572019-03-12 10:18:47 -06002446 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002447
Jens Axboeba816ad2019-09-28 11:36:45 -06002448 ret = __io_submit_sqe(ctx, req, s, NULL, true);
Jens Axboe491381ce2019-10-17 09:20:46 -06002449
2450 /*
2451 * We async punt it if the file wasn't marked NOWAIT, or if the file
2452 * doesn't support non-blocking read/write attempts
2453 */
2454 if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
2455 (req->flags & REQ_F_MUST_PUNT))) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07002456 struct io_uring_sqe *sqe_copy;
2457
Jackie Liu954dab12019-09-18 10:37:52 +08002458 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002459 if (sqe_copy) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07002460 s->sqe = sqe_copy;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002461 memcpy(&req->submit, s, sizeof(*s));
Jens Axboefcb323c2019-10-24 12:39:47 -06002462 if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) {
2463 ret = io_grab_files(ctx, req);
2464 if (ret) {
2465 kfree(sqe_copy);
2466 goto err;
2467 }
2468 }
Jens Axboee65ef562019-03-12 10:16:44 -06002469
2470 /*
2471 * Queued up for async execution, worker will release
Jens Axboe9e645e112019-05-10 16:07:28 -06002472 * submit reference when the iocb is actually submitted.
Jens Axboee65ef562019-03-12 10:16:44 -06002473 */
Jens Axboefcb323c2019-10-24 12:39:47 -06002474 io_queue_async_work(ctx, req);
Jens Axboee65ef562019-03-12 10:16:44 -06002475 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002476 }
2477 }
Jens Axboee65ef562019-03-12 10:16:44 -06002478
2479 /* drop submission reference */
Jens Axboefcb323c2019-10-24 12:39:47 -06002480err:
Jens Axboeba816ad2019-09-28 11:36:45 -06002481 io_put_req(req, NULL);
Jens Axboee65ef562019-03-12 10:16:44 -06002482
2483 /* and drop final reference, if we failed */
Jens Axboe9e645e112019-05-10 16:07:28 -06002484 if (ret) {
2485 io_cqring_add_event(ctx, req->user_data, ret);
2486 if (req->flags & REQ_F_LINK)
2487 req->flags |= REQ_F_FAIL_LINK;
Jens Axboeba816ad2019-09-28 11:36:45 -06002488 io_put_req(req, NULL);
Jens Axboe9e645e112019-05-10 16:07:28 -06002489 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002490
2491 return ret;
2492}
2493
Jackie Liu4fe2c962019-09-09 20:50:40 +08002494static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Jens Axboebc808bc2019-10-22 13:14:37 -06002495 struct sqe_submit *s)
Jackie Liu4fe2c962019-09-09 20:50:40 +08002496{
2497 int ret;
2498
2499 ret = io_req_defer(ctx, req, s->sqe);
2500 if (ret) {
2501 if (ret != -EIOCBQUEUED) {
Jens Axboeba816ad2019-09-28 11:36:45 -06002502 io_free_req(req, NULL);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002503 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2504 }
2505 return 0;
2506 }
2507
Jens Axboebc808bc2019-10-22 13:14:37 -06002508 return __io_queue_sqe(ctx, req, s);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002509}
2510
2511static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
Jens Axboebc808bc2019-10-22 13:14:37 -06002512 struct sqe_submit *s, struct io_kiocb *shadow)
Jackie Liu4fe2c962019-09-09 20:50:40 +08002513{
2514 int ret;
2515 int need_submit = false;
2516
2517 if (!shadow)
Jens Axboebc808bc2019-10-22 13:14:37 -06002518 return io_queue_sqe(ctx, req, s);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002519
2520 /*
2521 * Mark the first IO in link list as DRAIN, let all the following
2522 * IOs enter the defer list. all IO needs to be completed before link
2523 * list.
2524 */
2525 req->flags |= REQ_F_IO_DRAIN;
2526 ret = io_req_defer(ctx, req, s->sqe);
2527 if (ret) {
2528 if (ret != -EIOCBQUEUED) {
Jens Axboeba816ad2019-09-28 11:36:45 -06002529 io_free_req(req, NULL);
Pavel Begunkov7b202382019-10-27 22:10:36 +03002530 __io_free_req(shadow);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002531 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2532 return 0;
2533 }
2534 } else {
2535 /*
2536 * If ret == 0 means that all IOs in front of link io are
2537 * running done. let's queue link head.
2538 */
2539 need_submit = true;
2540 }
2541
2542 /* Insert shadow req to defer_list, blocking next IOs */
2543 spin_lock_irq(&ctx->completion_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02002544 trace_io_uring_defer(ctx, shadow, true);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002545 list_add_tail(&shadow->list, &ctx->defer_list);
2546 spin_unlock_irq(&ctx->completion_lock);
2547
2548 if (need_submit)
Jens Axboebc808bc2019-10-22 13:14:37 -06002549 return __io_queue_sqe(ctx, req, s);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002550
2551 return 0;
2552}
2553
Jens Axboe9e645e112019-05-10 16:07:28 -06002554#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
2555
2556static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
Jens Axboebc808bc2019-10-22 13:14:37 -06002557 struct io_submit_state *state, struct io_kiocb **link)
Jens Axboe9e645e112019-05-10 16:07:28 -06002558{
2559 struct io_uring_sqe *sqe_copy;
2560 struct io_kiocb *req;
2561 int ret;
2562
2563 /* enforce forwards compatibility on users */
2564 if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
2565 ret = -EINVAL;
2566 goto err;
2567 }
2568
2569 req = io_get_req(ctx, state);
2570 if (unlikely(!req)) {
2571 ret = -EAGAIN;
2572 goto err;
2573 }
2574
2575 ret = io_req_set_file(ctx, s, state, req);
2576 if (unlikely(ret)) {
2577err_req:
Jens Axboeba816ad2019-09-28 11:36:45 -06002578 io_free_req(req, NULL);
Jens Axboe9e645e112019-05-10 16:07:28 -06002579err:
2580 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2581 return;
2582 }
2583
Pavel Begunkov84d55dc2019-10-25 12:31:29 +03002584 req->user_data = s->sqe->user_data;
2585
Jens Axboe9e645e112019-05-10 16:07:28 -06002586 /*
2587 * If we already have a head request, queue this one for async
2588 * submittal once the head completes. If we don't have a head but
2589 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2590 * submitted sync once the chain is complete. If none of those
2591 * conditions are true (normal request), then just queue it.
2592 */
2593 if (*link) {
2594 struct io_kiocb *prev = *link;
2595
2596 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2597 if (!sqe_copy) {
2598 ret = -EAGAIN;
2599 goto err_req;
2600 }
2601
2602 s->sqe = sqe_copy;
2603 memcpy(&req->submit, s, sizeof(*s));
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02002604 trace_io_uring_link(ctx, req, prev);
Jens Axboe9e645e112019-05-10 16:07:28 -06002605 list_add_tail(&req->list, &prev->link_list);
2606 } else if (s->sqe->flags & IOSQE_IO_LINK) {
2607 req->flags |= REQ_F_LINK;
2608
2609 memcpy(&req->submit, s, sizeof(*s));
2610 INIT_LIST_HEAD(&req->link_list);
2611 *link = req;
2612 } else {
Jens Axboebc808bc2019-10-22 13:14:37 -06002613 io_queue_sqe(ctx, req, s);
Jens Axboe9e645e112019-05-10 16:07:28 -06002614 }
2615}
2616
Jens Axboe9a56a232019-01-09 09:06:50 -07002617/*
2618 * Batched submission is done, ensure local IO is flushed out.
2619 */
2620static void io_submit_state_end(struct io_submit_state *state)
2621{
2622 blk_finish_plug(&state->plug);
Jens Axboe3d6770f2019-04-13 11:50:54 -06002623 io_file_put(state);
Jens Axboe2579f912019-01-09 09:10:43 -07002624 if (state->free_reqs)
2625 kmem_cache_free_bulk(req_cachep, state->free_reqs,
2626 &state->reqs[state->cur_req]);
Jens Axboe9a56a232019-01-09 09:06:50 -07002627}
2628
2629/*
2630 * Start submission side cache.
2631 */
2632static void io_submit_state_start(struct io_submit_state *state,
2633 struct io_ring_ctx *ctx, unsigned max_ios)
2634{
2635 blk_start_plug(&state->plug);
Jens Axboe2579f912019-01-09 09:10:43 -07002636 state->free_reqs = 0;
Jens Axboe9a56a232019-01-09 09:06:50 -07002637 state->file = NULL;
2638 state->ios_left = max_ios;
2639}
2640
Jens Axboe2b188cc2019-01-07 10:46:33 -07002641static void io_commit_sqring(struct io_ring_ctx *ctx)
2642{
Hristo Venev75b28af2019-08-26 17:23:46 +00002643 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002644
Hristo Venev75b28af2019-08-26 17:23:46 +00002645 if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07002646 /*
2647 * Ensure any loads from the SQEs are done at this point,
2648 * since once we write the new head, the application could
2649 * write new data to them.
2650 */
Hristo Venev75b28af2019-08-26 17:23:46 +00002651 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002652 }
2653}
2654
2655/*
Jens Axboe2b188cc2019-01-07 10:46:33 -07002656 * Fetch an sqe, if one is available. Note that s->sqe will point to memory
2657 * that is mapped by userspace. This means that care needs to be taken to
2658 * ensure that reads are stable, as we cannot rely on userspace always
2659 * being a good citizen. If members of the sqe are validated and then later
2660 * used, it's important that those reads are done through READ_ONCE() to
2661 * prevent a re-load down the line.
2662 */
2663static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
2664{
Hristo Venev75b28af2019-08-26 17:23:46 +00002665 struct io_rings *rings = ctx->rings;
2666 u32 *sq_array = ctx->sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002667 unsigned head;
2668
2669 /*
2670 * The cached sq head (or cq tail) serves two purposes:
2671 *
2672 * 1) allows us to batch the cost of updating the user visible
2673 * head updates.
2674 * 2) allows the kernel side to track the head on its own, even
2675 * though the application is the one updating it.
2676 */
2677 head = ctx->cached_sq_head;
Stefan Bühlere523a292019-04-19 11:57:44 +02002678 /* make sure SQ entry isn't read before tail */
Hristo Venev75b28af2019-08-26 17:23:46 +00002679 if (head == smp_load_acquire(&rings->sq.tail))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002680 return false;
2681
Hristo Venev75b28af2019-08-26 17:23:46 +00002682 head = READ_ONCE(sq_array[head & ctx->sq_mask]);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002683 if (head < ctx->sq_entries) {
Jens Axboefcb323c2019-10-24 12:39:47 -06002684 s->ring_file = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002685 s->sqe = &ctx->sq_sqes[head];
Jackie Liu8776f3f2019-09-09 20:50:39 +08002686 s->sequence = ctx->cached_sq_head;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002687 ctx->cached_sq_head++;
2688 return true;
2689 }
2690
2691 /* drop invalid entries */
2692 ctx->cached_sq_head++;
Jens Axboe498ccd92019-10-25 10:04:25 -06002693 ctx->cached_sq_dropped++;
2694 WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002695 return false;
2696}
2697
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002698static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
Pavel Begunkov95a1b3ff2019-10-27 23:15:41 +03002699 struct mm_struct **mm)
Jens Axboe6c271ce2019-01-10 11:22:30 -07002700{
2701 struct io_submit_state state, *statep = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002702 struct io_kiocb *link = NULL;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002703 struct io_kiocb *shadow_req = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002704 bool prev_was_link = false;
2705 int i, submitted = 0;
Pavel Begunkov95a1b3ff2019-10-27 23:15:41 +03002706 bool mm_fault = false;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002707
2708 if (nr > IO_PLUG_THRESHOLD) {
2709 io_submit_state_start(&state, ctx, nr);
2710 statep = &state;
2711 }
2712
2713 for (i = 0; i < nr; i++) {
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002714 struct sqe_submit s;
2715
2716 if (!io_get_sqring(ctx, &s))
2717 break;
2718
Pavel Begunkov95a1b3ff2019-10-27 23:15:41 +03002719 if (io_sqe_needs_user(s.sqe) && !*mm) {
2720 mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
2721 if (!mm_fault) {
2722 use_mm(ctx->sqo_mm);
2723 *mm = ctx->sqo_mm;
2724 }
2725 }
2726
Jens Axboe9e645e112019-05-10 16:07:28 -06002727 /*
2728 * If previous wasn't linked and we have a linked command,
2729 * that's the end of the chain. Submit the previous link.
2730 */
2731 if (!prev_was_link && link) {
Jens Axboebc808bc2019-10-22 13:14:37 -06002732 io_queue_link_head(ctx, link, &link->submit, shadow_req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002733 link = NULL;
Jackie Liu5f5ad9c2019-09-18 10:37:53 +08002734 shadow_req = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002735 }
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002736 prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
Jens Axboe9e645e112019-05-10 16:07:28 -06002737
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002738 if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
Jackie Liu4fe2c962019-09-09 20:50:40 +08002739 if (!shadow_req) {
2740 shadow_req = io_get_req(ctx, NULL);
Jackie Liua1041c22019-09-18 17:25:52 +08002741 if (unlikely(!shadow_req))
2742 goto out;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002743 shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
2744 refcount_dec(&shadow_req->refs);
2745 }
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002746 shadow_req->sequence = s.sequence;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002747 }
2748
Jackie Liua1041c22019-09-18 17:25:52 +08002749out:
Pavel Begunkov95a1b3ff2019-10-27 23:15:41 +03002750 s.has_user = *mm != NULL;
2751 s.in_async = true;
2752 s.needs_fixed_file = true;
Jens Axboe51c3ff62019-11-03 06:52:50 -07002753 trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, true);
Pavel Begunkov95a1b3ff2019-10-27 23:15:41 +03002754 io_submit_sqe(ctx, &s, statep, &link);
2755 submitted++;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002756 }
2757
Jens Axboe9e645e112019-05-10 16:07:28 -06002758 if (link)
Jens Axboebc808bc2019-10-22 13:14:37 -06002759 io_queue_link_head(ctx, link, &link->submit, shadow_req);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002760 if (statep)
2761 io_submit_state_end(&state);
2762
2763 return submitted;
2764}
2765
2766static int io_sq_thread(void *data)
2767{
Jens Axboe6c271ce2019-01-10 11:22:30 -07002768 struct io_ring_ctx *ctx = data;
2769 struct mm_struct *cur_mm = NULL;
2770 mm_segment_t old_fs;
2771 DEFINE_WAIT(wait);
2772 unsigned inflight;
2773 unsigned long timeout;
2774
Jackie Liua4c0b3d2019-07-08 13:41:12 +08002775 complete(&ctx->sqo_thread_started);
2776
Jens Axboe6c271ce2019-01-10 11:22:30 -07002777 old_fs = get_fs();
2778 set_fs(USER_DS);
2779
2780 timeout = inflight = 0;
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002781 while (!kthread_should_park()) {
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002782 unsigned int to_submit;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002783
2784 if (inflight) {
2785 unsigned nr_events = 0;
2786
2787 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboe2b2ed972019-10-25 10:06:15 -06002788 /*
2789 * inflight is the count of the maximum possible
2790 * entries we submitted, but it can be smaller
2791 * if we dropped some of them. If we don't have
2792 * poll entries available, then we know that we
2793 * have nothing left to poll for. Reset the
2794 * inflight count to zero in that case.
2795 */
2796 mutex_lock(&ctx->uring_lock);
2797 if (!list_empty(&ctx->poll_list))
2798 __io_iopoll_check(ctx, &nr_events, 0);
2799 else
2800 inflight = 0;
2801 mutex_unlock(&ctx->uring_lock);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002802 } else {
2803 /*
2804 * Normal IO, just pretend everything completed.
2805 * We don't have to poll completions for that.
2806 */
2807 nr_events = inflight;
2808 }
2809
2810 inflight -= nr_events;
2811 if (!inflight)
2812 timeout = jiffies + ctx->sq_thread_idle;
2813 }
2814
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002815 to_submit = io_sqring_entries(ctx);
2816 if (!to_submit) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07002817 /*
2818 * We're polling. If we're within the defined idle
2819 * period, then let us spin without work before going
2820 * to sleep.
2821 */
2822 if (inflight || !time_after(jiffies, timeout)) {
Jens Axboe9831a902019-09-19 09:48:55 -06002823 cond_resched();
Jens Axboe6c271ce2019-01-10 11:22:30 -07002824 continue;
2825 }
2826
2827 /*
2828 * Drop cur_mm before scheduling, we can't hold it for
2829 * long periods (or over schedule()). Do this before
2830 * adding ourselves to the waitqueue, as the unuse/drop
2831 * may sleep.
2832 */
2833 if (cur_mm) {
2834 unuse_mm(cur_mm);
2835 mmput(cur_mm);
2836 cur_mm = NULL;
2837 }
2838
2839 prepare_to_wait(&ctx->sqo_wait, &wait,
2840 TASK_INTERRUPTIBLE);
2841
2842 /* Tell userspace we may need a wakeup call */
Hristo Venev75b28af2019-08-26 17:23:46 +00002843 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
Stefan Bühler0d7bae62019-04-19 11:57:45 +02002844 /* make sure to read SQ tail after writing flags */
2845 smp_mb();
Jens Axboe6c271ce2019-01-10 11:22:30 -07002846
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002847 to_submit = io_sqring_entries(ctx);
2848 if (!to_submit) {
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002849 if (kthread_should_park()) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07002850 finish_wait(&ctx->sqo_wait, &wait);
2851 break;
2852 }
2853 if (signal_pending(current))
2854 flush_signals(current);
2855 schedule();
2856 finish_wait(&ctx->sqo_wait, &wait);
2857
Hristo Venev75b28af2019-08-26 17:23:46 +00002858 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002859 continue;
2860 }
2861 finish_wait(&ctx->sqo_wait, &wait);
2862
Hristo Venev75b28af2019-08-26 17:23:46 +00002863 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002864 }
2865
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002866 to_submit = min(to_submit, ctx->sq_entries);
Pavel Begunkov95a1b3ff2019-10-27 23:15:41 +03002867 inflight += io_submit_sqes(ctx, to_submit, &cur_mm);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002868
2869 /* Commit SQ ring head once we've consumed all SQEs */
2870 io_commit_sqring(ctx);
2871 }
2872
2873 set_fs(old_fs);
2874 if (cur_mm) {
2875 unuse_mm(cur_mm);
2876 mmput(cur_mm);
2877 }
Jens Axboe06058632019-04-13 09:26:03 -06002878
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002879 kthread_parkme();
Jens Axboe06058632019-04-13 09:26:03 -06002880
Jens Axboe6c271ce2019-01-10 11:22:30 -07002881 return 0;
2882}
2883
Jens Axboefcb323c2019-10-24 12:39:47 -06002884static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
2885 struct file *ring_file, int ring_fd)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002886{
Jens Axboe9a56a232019-01-09 09:06:50 -07002887 struct io_submit_state state, *statep = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002888 struct io_kiocb *link = NULL;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002889 struct io_kiocb *shadow_req = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002890 bool prev_was_link = false;
Jens Axboe5c8b0b52019-04-30 10:16:07 -06002891 int i, submit = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002892
Jens Axboe9a56a232019-01-09 09:06:50 -07002893 if (to_submit > IO_PLUG_THRESHOLD) {
2894 io_submit_state_start(&state, ctx, to_submit);
2895 statep = &state;
2896 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002897
2898 for (i = 0; i < to_submit; i++) {
2899 struct sqe_submit s;
2900
2901 if (!io_get_sqring(ctx, &s))
2902 break;
2903
Jens Axboe9e645e112019-05-10 16:07:28 -06002904 /*
2905 * If previous wasn't linked and we have a linked command,
2906 * that's the end of the chain. Submit the previous link.
2907 */
2908 if (!prev_was_link && link) {
Jens Axboebc808bc2019-10-22 13:14:37 -06002909 io_queue_link_head(ctx, link, &link->submit, shadow_req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002910 link = NULL;
Jackie Liu5f5ad9c2019-09-18 10:37:53 +08002911 shadow_req = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002912 }
2913 prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
2914
Jackie Liu4fe2c962019-09-09 20:50:40 +08002915 if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
2916 if (!shadow_req) {
2917 shadow_req = io_get_req(ctx, NULL);
Jackie Liua1041c22019-09-18 17:25:52 +08002918 if (unlikely(!shadow_req))
2919 goto out;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002920 shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
2921 refcount_dec(&shadow_req->refs);
2922 }
2923 shadow_req->sequence = s.sequence;
2924 }
2925
Jackie Liua1041c22019-09-18 17:25:52 +08002926out:
Jens Axboefcb323c2019-10-24 12:39:47 -06002927 s.ring_file = ring_file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002928 s.has_user = true;
Jackie Liuba5290c2019-10-09 09:19:59 +08002929 s.in_async = false;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002930 s.needs_fixed_file = false;
Jens Axboefcb323c2019-10-24 12:39:47 -06002931 s.ring_fd = ring_fd;
Jens Axboe5c8b0b52019-04-30 10:16:07 -06002932 submit++;
Jens Axboe51c3ff62019-11-03 06:52:50 -07002933 trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, false);
Jens Axboebc808bc2019-10-22 13:14:37 -06002934 io_submit_sqe(ctx, &s, statep, &link);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002935 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002936
Jens Axboe9e645e112019-05-10 16:07:28 -06002937 if (link)
Jens Axboebc808bc2019-10-22 13:14:37 -06002938 io_queue_link_head(ctx, link, &link->submit, shadow_req);
Jens Axboe9a56a232019-01-09 09:06:50 -07002939 if (statep)
2940 io_submit_state_end(statep);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002941
Pavel Begunkov935d1e42019-10-25 12:31:31 +03002942 io_commit_sqring(ctx);
2943
Jens Axboe5c8b0b52019-04-30 10:16:07 -06002944 return submit;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002945}
2946
Jens Axboebda52162019-09-24 13:47:15 -06002947struct io_wait_queue {
2948 struct wait_queue_entry wq;
2949 struct io_ring_ctx *ctx;
2950 unsigned to_wait;
2951 unsigned nr_timeouts;
2952};
2953
2954static inline bool io_should_wake(struct io_wait_queue *iowq)
2955{
2956 struct io_ring_ctx *ctx = iowq->ctx;
2957
2958 /*
2959 * Wake up if we have enough events, or if a timeout occured since we
2960 * started waiting. For timeouts, we always want to return to userspace,
2961 * regardless of event count.
2962 */
2963 return io_cqring_events(ctx->rings) >= iowq->to_wait ||
2964 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
2965}
2966
2967static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
2968 int wake_flags, void *key)
2969{
2970 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
2971 wq);
2972
2973 if (!io_should_wake(iowq))
2974 return -1;
2975
2976 return autoremove_wake_function(curr, mode, wake_flags, key);
2977}
2978
Jens Axboe2b188cc2019-01-07 10:46:33 -07002979/*
2980 * Wait until events become available, if we don't already have some. The
2981 * application must reap them itself, as they reside on the shared cq ring.
2982 */
2983static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
2984 const sigset_t __user *sig, size_t sigsz)
2985{
Jens Axboebda52162019-09-24 13:47:15 -06002986 struct io_wait_queue iowq = {
2987 .wq = {
2988 .private = current,
2989 .func = io_wake_function,
2990 .entry = LIST_HEAD_INIT(iowq.wq.entry),
2991 },
2992 .ctx = ctx,
2993 .to_wait = min_events,
2994 };
Hristo Venev75b28af2019-08-26 17:23:46 +00002995 struct io_rings *rings = ctx->rings;
Jackie Liue9ffa5c2019-10-29 11:16:42 +08002996 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002997
Hristo Venev75b28af2019-08-26 17:23:46 +00002998 if (io_cqring_events(rings) >= min_events)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002999 return 0;
3000
3001 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01003002#ifdef CONFIG_COMPAT
3003 if (in_compat_syscall())
3004 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07003005 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01003006 else
3007#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07003008 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01003009
Jens Axboe2b188cc2019-01-07 10:46:33 -07003010 if (ret)
3011 return ret;
3012 }
3013
Jens Axboebda52162019-09-24 13:47:15 -06003014 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02003015 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06003016 do {
3017 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
3018 TASK_INTERRUPTIBLE);
3019 if (io_should_wake(&iowq))
3020 break;
3021 schedule();
3022 if (signal_pending(current)) {
Jackie Liue9ffa5c2019-10-29 11:16:42 +08003023 ret = -EINTR;
Jens Axboebda52162019-09-24 13:47:15 -06003024 break;
3025 }
3026 } while (1);
3027 finish_wait(&ctx->wait, &iowq.wq);
3028
Jackie Liue9ffa5c2019-10-29 11:16:42 +08003029 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003030
Hristo Venev75b28af2019-08-26 17:23:46 +00003031 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003032}
3033
Jens Axboe6b063142019-01-10 22:13:58 -07003034static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
3035{
3036#if defined(CONFIG_UNIX)
3037 if (ctx->ring_sock) {
3038 struct sock *sock = ctx->ring_sock->sk;
3039 struct sk_buff *skb;
3040
3041 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
3042 kfree_skb(skb);
3043 }
3044#else
3045 int i;
3046
Jens Axboe65e19f52019-10-26 07:20:21 -06003047 for (i = 0; i < ctx->nr_user_files; i++) {
3048 struct file *file;
3049
3050 file = io_file_from_index(ctx, i);
3051 if (file)
3052 fput(file);
3053 }
Jens Axboe6b063142019-01-10 22:13:58 -07003054#endif
3055}
3056
3057static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
3058{
Jens Axboe65e19f52019-10-26 07:20:21 -06003059 unsigned nr_tables, i;
3060
3061 if (!ctx->file_table)
Jens Axboe6b063142019-01-10 22:13:58 -07003062 return -ENXIO;
3063
3064 __io_sqe_files_unregister(ctx);
Jens Axboe65e19f52019-10-26 07:20:21 -06003065 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
3066 for (i = 0; i < nr_tables; i++)
3067 kfree(ctx->file_table[i].files);
3068 kfree(ctx->file_table);
3069 ctx->file_table = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07003070 ctx->nr_user_files = 0;
3071 return 0;
3072}
3073
Jens Axboe6c271ce2019-01-10 11:22:30 -07003074static void io_sq_thread_stop(struct io_ring_ctx *ctx)
3075{
3076 if (ctx->sqo_thread) {
Jackie Liua4c0b3d2019-07-08 13:41:12 +08003077 wait_for_completion(&ctx->sqo_thread_started);
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02003078 /*
3079 * The park is a bit of a work-around, without it we get
3080 * warning spews on shutdown with SQPOLL set and affinity
3081 * set to a single CPU.
3082 */
Jens Axboe06058632019-04-13 09:26:03 -06003083 kthread_park(ctx->sqo_thread);
Jens Axboe6c271ce2019-01-10 11:22:30 -07003084 kthread_stop(ctx->sqo_thread);
3085 ctx->sqo_thread = NULL;
3086 }
3087}
3088
Jens Axboe6b063142019-01-10 22:13:58 -07003089static void io_finish_async(struct io_ring_ctx *ctx)
3090{
Jens Axboe6c271ce2019-01-10 11:22:30 -07003091 io_sq_thread_stop(ctx);
3092
Jens Axboe561fb042019-10-24 07:25:42 -06003093 if (ctx->io_wq) {
3094 io_wq_destroy(ctx->io_wq);
3095 ctx->io_wq = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07003096 }
3097}
3098
3099#if defined(CONFIG_UNIX)
3100static void io_destruct_skb(struct sk_buff *skb)
3101{
3102 struct io_ring_ctx *ctx = skb->sk->sk_user_data;
3103
Jens Axboe561fb042019-10-24 07:25:42 -06003104 if (ctx->io_wq)
3105 io_wq_flush(ctx->io_wq);
Jens Axboe8a997342019-10-09 14:40:13 -06003106
Jens Axboe6b063142019-01-10 22:13:58 -07003107 unix_destruct_scm(skb);
3108}
3109
3110/*
3111 * Ensure the UNIX gc is aware of our file set, so we are certain that
3112 * the io_uring can be safely unregistered on process exit, even if we have
3113 * loops in the file referencing.
3114 */
3115static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
3116{
3117 struct sock *sk = ctx->ring_sock->sk;
3118 struct scm_fp_list *fpl;
3119 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06003120 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07003121
3122 if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
3123 unsigned long inflight = ctx->user->unix_inflight + nr;
3124
3125 if (inflight > task_rlimit(current, RLIMIT_NOFILE))
3126 return -EMFILE;
3127 }
3128
3129 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
3130 if (!fpl)
3131 return -ENOMEM;
3132
3133 skb = alloc_skb(0, GFP_KERNEL);
3134 if (!skb) {
3135 kfree(fpl);
3136 return -ENOMEM;
3137 }
3138
3139 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07003140
Jens Axboe08a45172019-10-03 08:11:03 -06003141 nr_files = 0;
Jens Axboe6b063142019-01-10 22:13:58 -07003142 fpl->user = get_uid(ctx->user);
3143 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06003144 struct file *file = io_file_from_index(ctx, i + offset);
3145
3146 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06003147 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06003148 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06003149 unix_inflight(fpl->user, fpl->fp[nr_files]);
3150 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07003151 }
3152
Jens Axboe08a45172019-10-03 08:11:03 -06003153 if (nr_files) {
3154 fpl->max = SCM_MAX_FD;
3155 fpl->count = nr_files;
3156 UNIXCB(skb).fp = fpl;
3157 skb->destructor = io_destruct_skb;
3158 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
3159 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07003160
Jens Axboe08a45172019-10-03 08:11:03 -06003161 for (i = 0; i < nr_files; i++)
3162 fput(fpl->fp[i]);
3163 } else {
3164 kfree_skb(skb);
3165 kfree(fpl);
3166 }
Jens Axboe6b063142019-01-10 22:13:58 -07003167
3168 return 0;
3169}
3170
3171/*
3172 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
3173 * causes regular reference counting to break down. We rely on the UNIX
3174 * garbage collection to take care of this problem for us.
3175 */
3176static int io_sqe_files_scm(struct io_ring_ctx *ctx)
3177{
3178 unsigned left, total;
3179 int ret = 0;
3180
3181 total = 0;
3182 left = ctx->nr_user_files;
3183 while (left) {
3184 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07003185
3186 ret = __io_sqe_files_scm(ctx, this_files, total);
3187 if (ret)
3188 break;
3189 left -= this_files;
3190 total += this_files;
3191 }
3192
3193 if (!ret)
3194 return 0;
3195
3196 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06003197 struct file *file = io_file_from_index(ctx, total);
3198
3199 if (file)
3200 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07003201 total++;
3202 }
3203
3204 return ret;
3205}
3206#else
3207static int io_sqe_files_scm(struct io_ring_ctx *ctx)
3208{
3209 return 0;
3210}
3211#endif
3212
Jens Axboe65e19f52019-10-26 07:20:21 -06003213static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
3214 unsigned nr_files)
3215{
3216 int i;
3217
3218 for (i = 0; i < nr_tables; i++) {
3219 struct fixed_file_table *table = &ctx->file_table[i];
3220 unsigned this_files;
3221
3222 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
3223 table->files = kcalloc(this_files, sizeof(struct file *),
3224 GFP_KERNEL);
3225 if (!table->files)
3226 break;
3227 nr_files -= this_files;
3228 }
3229
3230 if (i == nr_tables)
3231 return 0;
3232
3233 for (i = 0; i < nr_tables; i++) {
3234 struct fixed_file_table *table = &ctx->file_table[i];
3235 kfree(table->files);
3236 }
3237 return 1;
3238}
3239
Jens Axboe6b063142019-01-10 22:13:58 -07003240static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
3241 unsigned nr_args)
3242{
3243 __s32 __user *fds = (__s32 __user *) arg;
Jens Axboe65e19f52019-10-26 07:20:21 -06003244 unsigned nr_tables;
Jens Axboe6b063142019-01-10 22:13:58 -07003245 int fd, ret = 0;
3246 unsigned i;
3247
Jens Axboe65e19f52019-10-26 07:20:21 -06003248 if (ctx->file_table)
Jens Axboe6b063142019-01-10 22:13:58 -07003249 return -EBUSY;
3250 if (!nr_args)
3251 return -EINVAL;
3252 if (nr_args > IORING_MAX_FIXED_FILES)
3253 return -EMFILE;
3254
Jens Axboe65e19f52019-10-26 07:20:21 -06003255 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
3256 ctx->file_table = kcalloc(nr_tables, sizeof(struct fixed_file_table),
3257 GFP_KERNEL);
3258 if (!ctx->file_table)
Jens Axboe6b063142019-01-10 22:13:58 -07003259 return -ENOMEM;
3260
Jens Axboe65e19f52019-10-26 07:20:21 -06003261 if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
3262 kfree(ctx->file_table);
3263 return -ENOMEM;
3264 }
3265
Jens Axboe08a45172019-10-03 08:11:03 -06003266 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06003267 struct fixed_file_table *table;
3268 unsigned index;
3269
Jens Axboe6b063142019-01-10 22:13:58 -07003270 ret = -EFAULT;
3271 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
3272 break;
Jens Axboe08a45172019-10-03 08:11:03 -06003273 /* allow sparse sets */
3274 if (fd == -1) {
3275 ret = 0;
3276 continue;
3277 }
Jens Axboe6b063142019-01-10 22:13:58 -07003278
Jens Axboe65e19f52019-10-26 07:20:21 -06003279 table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
3280 index = i & IORING_FILE_TABLE_MASK;
3281 table->files[index] = fget(fd);
Jens Axboe6b063142019-01-10 22:13:58 -07003282
3283 ret = -EBADF;
Jens Axboe65e19f52019-10-26 07:20:21 -06003284 if (!table->files[index])
Jens Axboe6b063142019-01-10 22:13:58 -07003285 break;
3286 /*
3287 * Don't allow io_uring instances to be registered. If UNIX
3288 * isn't enabled, then this causes a reference cycle and this
3289 * instance can never get freed. If UNIX is enabled we'll
3290 * handle it just fine, but there's still no point in allowing
3291 * a ring fd as it doesn't support regular read/write anyway.
3292 */
Jens Axboe65e19f52019-10-26 07:20:21 -06003293 if (table->files[index]->f_op == &io_uring_fops) {
3294 fput(table->files[index]);
Jens Axboe6b063142019-01-10 22:13:58 -07003295 break;
3296 }
Jens Axboe6b063142019-01-10 22:13:58 -07003297 ret = 0;
3298 }
3299
3300 if (ret) {
Jens Axboe65e19f52019-10-26 07:20:21 -06003301 for (i = 0; i < ctx->nr_user_files; i++) {
3302 struct file *file;
Jens Axboe6b063142019-01-10 22:13:58 -07003303
Jens Axboe65e19f52019-10-26 07:20:21 -06003304 file = io_file_from_index(ctx, i);
3305 if (file)
3306 fput(file);
3307 }
3308 for (i = 0; i < nr_tables; i++)
3309 kfree(ctx->file_table[i].files);
3310
3311 kfree(ctx->file_table);
3312 ctx->file_table = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07003313 ctx->nr_user_files = 0;
3314 return ret;
3315 }
3316
3317 ret = io_sqe_files_scm(ctx);
3318 if (ret)
3319 io_sqe_files_unregister(ctx);
3320
3321 return ret;
3322}
3323
Jens Axboec3a31e62019-10-03 13:59:56 -06003324static void io_sqe_file_unregister(struct io_ring_ctx *ctx, int index)
3325{
3326#if defined(CONFIG_UNIX)
Jens Axboe65e19f52019-10-26 07:20:21 -06003327 struct file *file = io_file_from_index(ctx, index);
Jens Axboec3a31e62019-10-03 13:59:56 -06003328 struct sock *sock = ctx->ring_sock->sk;
3329 struct sk_buff_head list, *head = &sock->sk_receive_queue;
3330 struct sk_buff *skb;
3331 int i;
3332
3333 __skb_queue_head_init(&list);
3334
3335 /*
3336 * Find the skb that holds this file in its SCM_RIGHTS. When found,
3337 * remove this entry and rearrange the file array.
3338 */
3339 skb = skb_dequeue(head);
3340 while (skb) {
3341 struct scm_fp_list *fp;
3342
3343 fp = UNIXCB(skb).fp;
3344 for (i = 0; i < fp->count; i++) {
3345 int left;
3346
3347 if (fp->fp[i] != file)
3348 continue;
3349
3350 unix_notinflight(fp->user, fp->fp[i]);
3351 left = fp->count - 1 - i;
3352 if (left) {
3353 memmove(&fp->fp[i], &fp->fp[i + 1],
3354 left * sizeof(struct file *));
3355 }
3356 fp->count--;
3357 if (!fp->count) {
3358 kfree_skb(skb);
3359 skb = NULL;
3360 } else {
3361 __skb_queue_tail(&list, skb);
3362 }
3363 fput(file);
3364 file = NULL;
3365 break;
3366 }
3367
3368 if (!file)
3369 break;
3370
3371 __skb_queue_tail(&list, skb);
3372
3373 skb = skb_dequeue(head);
3374 }
3375
3376 if (skb_peek(&list)) {
3377 spin_lock_irq(&head->lock);
3378 while ((skb = __skb_dequeue(&list)) != NULL)
3379 __skb_queue_tail(head, skb);
3380 spin_unlock_irq(&head->lock);
3381 }
3382#else
Jens Axboe65e19f52019-10-26 07:20:21 -06003383 fput(io_file_from_index(ctx, index));
Jens Axboec3a31e62019-10-03 13:59:56 -06003384#endif
3385}
3386
3387static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
3388 int index)
3389{
3390#if defined(CONFIG_UNIX)
3391 struct sock *sock = ctx->ring_sock->sk;
3392 struct sk_buff_head *head = &sock->sk_receive_queue;
3393 struct sk_buff *skb;
3394
3395 /*
3396 * See if we can merge this file into an existing skb SCM_RIGHTS
3397 * file set. If there's no room, fall back to allocating a new skb
3398 * and filling it in.
3399 */
3400 spin_lock_irq(&head->lock);
3401 skb = skb_peek(head);
3402 if (skb) {
3403 struct scm_fp_list *fpl = UNIXCB(skb).fp;
3404
3405 if (fpl->count < SCM_MAX_FD) {
3406 __skb_unlink(skb, head);
3407 spin_unlock_irq(&head->lock);
3408 fpl->fp[fpl->count] = get_file(file);
3409 unix_inflight(fpl->user, fpl->fp[fpl->count]);
3410 fpl->count++;
3411 spin_lock_irq(&head->lock);
3412 __skb_queue_head(head, skb);
3413 } else {
3414 skb = NULL;
3415 }
3416 }
3417 spin_unlock_irq(&head->lock);
3418
3419 if (skb) {
3420 fput(file);
3421 return 0;
3422 }
3423
3424 return __io_sqe_files_scm(ctx, 1, index);
3425#else
3426 return 0;
3427#endif
3428}
3429
3430static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
3431 unsigned nr_args)
3432{
3433 struct io_uring_files_update up;
3434 __s32 __user *fds;
3435 int fd, i, err;
3436 __u32 done;
3437
Jens Axboe65e19f52019-10-26 07:20:21 -06003438 if (!ctx->file_table)
Jens Axboec3a31e62019-10-03 13:59:56 -06003439 return -ENXIO;
3440 if (!nr_args)
3441 return -EINVAL;
3442 if (copy_from_user(&up, arg, sizeof(up)))
3443 return -EFAULT;
3444 if (check_add_overflow(up.offset, nr_args, &done))
3445 return -EOVERFLOW;
3446 if (done > ctx->nr_user_files)
3447 return -EINVAL;
3448
3449 done = 0;
3450 fds = (__s32 __user *) up.fds;
3451 while (nr_args) {
Jens Axboe65e19f52019-10-26 07:20:21 -06003452 struct fixed_file_table *table;
3453 unsigned index;
3454
Jens Axboec3a31e62019-10-03 13:59:56 -06003455 err = 0;
3456 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
3457 err = -EFAULT;
3458 break;
3459 }
3460 i = array_index_nospec(up.offset, ctx->nr_user_files);
Jens Axboe65e19f52019-10-26 07:20:21 -06003461 table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
3462 index = i & IORING_FILE_TABLE_MASK;
3463 if (table->files[index]) {
Jens Axboec3a31e62019-10-03 13:59:56 -06003464 io_sqe_file_unregister(ctx, i);
Jens Axboe65e19f52019-10-26 07:20:21 -06003465 table->files[index] = NULL;
Jens Axboec3a31e62019-10-03 13:59:56 -06003466 }
3467 if (fd != -1) {
3468 struct file *file;
3469
3470 file = fget(fd);
3471 if (!file) {
3472 err = -EBADF;
3473 break;
3474 }
3475 /*
3476 * Don't allow io_uring instances to be registered. If
3477 * UNIX isn't enabled, then this causes a reference
3478 * cycle and this instance can never get freed. If UNIX
3479 * is enabled we'll handle it just fine, but there's
3480 * still no point in allowing a ring fd as it doesn't
3481 * support regular read/write anyway.
3482 */
3483 if (file->f_op == &io_uring_fops) {
3484 fput(file);
3485 err = -EBADF;
3486 break;
3487 }
Jens Axboe65e19f52019-10-26 07:20:21 -06003488 table->files[index] = file;
Jens Axboec3a31e62019-10-03 13:59:56 -06003489 err = io_sqe_file_register(ctx, file, i);
3490 if (err)
3491 break;
3492 }
3493 nr_args--;
3494 done++;
3495 up.offset++;
3496 }
3497
3498 return done ? done : err;
3499}
3500
Jens Axboe6c271ce2019-01-10 11:22:30 -07003501static int io_sq_offload_start(struct io_ring_ctx *ctx,
3502 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003503{
Jens Axboe561fb042019-10-24 07:25:42 -06003504 unsigned concurrency;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003505 int ret;
3506
Jens Axboe6c271ce2019-01-10 11:22:30 -07003507 init_waitqueue_head(&ctx->sqo_wait);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003508 mmgrab(current->mm);
3509 ctx->sqo_mm = current->mm;
3510
Jens Axboe6c271ce2019-01-10 11:22:30 -07003511 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe3ec482d2019-04-08 10:51:01 -06003512 ret = -EPERM;
3513 if (!capable(CAP_SYS_ADMIN))
3514 goto err;
3515
Jens Axboe917257d2019-04-13 09:28:55 -06003516 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
3517 if (!ctx->sq_thread_idle)
3518 ctx->sq_thread_idle = HZ;
3519
Jens Axboe6c271ce2019-01-10 11:22:30 -07003520 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06003521 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07003522
Jens Axboe917257d2019-04-13 09:28:55 -06003523 ret = -EINVAL;
Jens Axboe44a9bd12019-05-14 20:00:30 -06003524 if (cpu >= nr_cpu_ids)
3525 goto err;
Shenghui Wang7889f442019-05-07 16:03:19 +08003526 if (!cpu_online(cpu))
Jens Axboe917257d2019-04-13 09:28:55 -06003527 goto err;
3528
Jens Axboe6c271ce2019-01-10 11:22:30 -07003529 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
3530 ctx, cpu,
3531 "io_uring-sq");
3532 } else {
3533 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
3534 "io_uring-sq");
3535 }
3536 if (IS_ERR(ctx->sqo_thread)) {
3537 ret = PTR_ERR(ctx->sqo_thread);
3538 ctx->sqo_thread = NULL;
3539 goto err;
3540 }
3541 wake_up_process(ctx->sqo_thread);
3542 } else if (p->flags & IORING_SETUP_SQ_AFF) {
3543 /* Can't have SQ_AFF without SQPOLL */
3544 ret = -EINVAL;
3545 goto err;
3546 }
3547
Jens Axboe561fb042019-10-24 07:25:42 -06003548 /* Do QD, or 4 * CPUS, whatever is smallest */
3549 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
3550 ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm);
Jens Axboe975c99a52019-10-30 08:42:56 -06003551 if (IS_ERR(ctx->io_wq)) {
3552 ret = PTR_ERR(ctx->io_wq);
3553 ctx->io_wq = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003554 goto err;
3555 }
3556
3557 return 0;
3558err:
Jens Axboe54a91f32019-09-10 09:15:04 -06003559 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003560 mmdrop(ctx->sqo_mm);
3561 ctx->sqo_mm = NULL;
3562 return ret;
3563}
3564
3565static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
3566{
3567 atomic_long_sub(nr_pages, &user->locked_vm);
3568}
3569
3570static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
3571{
3572 unsigned long page_limit, cur_pages, new_pages;
3573
3574 /* Don't allow more pages than we can safely lock */
3575 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
3576
3577 do {
3578 cur_pages = atomic_long_read(&user->locked_vm);
3579 new_pages = cur_pages + nr_pages;
3580 if (new_pages > page_limit)
3581 return -ENOMEM;
3582 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
3583 new_pages) != cur_pages);
3584
3585 return 0;
3586}
3587
3588static void io_mem_free(void *ptr)
3589{
Mark Rutland52e04ef2019-04-30 17:30:21 +01003590 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003591
Mark Rutland52e04ef2019-04-30 17:30:21 +01003592 if (!ptr)
3593 return;
3594
3595 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003596 if (put_page_testzero(page))
3597 free_compound_page(page);
3598}
3599
3600static void *io_mem_alloc(size_t size)
3601{
3602 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
3603 __GFP_NORETRY;
3604
3605 return (void *) __get_free_pages(gfp_flags, get_order(size));
3606}
3607
Hristo Venev75b28af2019-08-26 17:23:46 +00003608static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
3609 size_t *sq_offset)
3610{
3611 struct io_rings *rings;
3612 size_t off, sq_array_size;
3613
3614 off = struct_size(rings, cqes, cq_entries);
3615 if (off == SIZE_MAX)
3616 return SIZE_MAX;
3617
3618#ifdef CONFIG_SMP
3619 off = ALIGN(off, SMP_CACHE_BYTES);
3620 if (off == 0)
3621 return SIZE_MAX;
3622#endif
3623
3624 sq_array_size = array_size(sizeof(u32), sq_entries);
3625 if (sq_array_size == SIZE_MAX)
3626 return SIZE_MAX;
3627
3628 if (check_add_overflow(off, sq_array_size, &off))
3629 return SIZE_MAX;
3630
3631 if (sq_offset)
3632 *sq_offset = off;
3633
3634 return off;
3635}
3636
Jens Axboe2b188cc2019-01-07 10:46:33 -07003637static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
3638{
Hristo Venev75b28af2019-08-26 17:23:46 +00003639 size_t pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003640
Hristo Venev75b28af2019-08-26 17:23:46 +00003641 pages = (size_t)1 << get_order(
3642 rings_size(sq_entries, cq_entries, NULL));
3643 pages += (size_t)1 << get_order(
3644 array_size(sizeof(struct io_uring_sqe), sq_entries));
Jens Axboe2b188cc2019-01-07 10:46:33 -07003645
Hristo Venev75b28af2019-08-26 17:23:46 +00003646 return pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003647}
3648
Jens Axboeedafcce2019-01-09 09:16:05 -07003649static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
3650{
3651 int i, j;
3652
3653 if (!ctx->user_bufs)
3654 return -ENXIO;
3655
3656 for (i = 0; i < ctx->nr_user_bufs; i++) {
3657 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3658
3659 for (j = 0; j < imu->nr_bvecs; j++)
John Hubbard27c4d3a2019-08-04 19:32:06 -07003660 put_user_page(imu->bvec[j].bv_page);
Jens Axboeedafcce2019-01-09 09:16:05 -07003661
3662 if (ctx->account_mem)
3663 io_unaccount_mem(ctx->user, imu->nr_bvecs);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003664 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07003665 imu->nr_bvecs = 0;
3666 }
3667
3668 kfree(ctx->user_bufs);
3669 ctx->user_bufs = NULL;
3670 ctx->nr_user_bufs = 0;
3671 return 0;
3672}
3673
3674static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
3675 void __user *arg, unsigned index)
3676{
3677 struct iovec __user *src;
3678
3679#ifdef CONFIG_COMPAT
3680 if (ctx->compat) {
3681 struct compat_iovec __user *ciovs;
3682 struct compat_iovec ciov;
3683
3684 ciovs = (struct compat_iovec __user *) arg;
3685 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
3686 return -EFAULT;
3687
3688 dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
3689 dst->iov_len = ciov.iov_len;
3690 return 0;
3691 }
3692#endif
3693 src = (struct iovec __user *) arg;
3694 if (copy_from_user(dst, &src[index], sizeof(*dst)))
3695 return -EFAULT;
3696 return 0;
3697}
3698
3699static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
3700 unsigned nr_args)
3701{
3702 struct vm_area_struct **vmas = NULL;
3703 struct page **pages = NULL;
3704 int i, j, got_pages = 0;
3705 int ret = -EINVAL;
3706
3707 if (ctx->user_bufs)
3708 return -EBUSY;
3709 if (!nr_args || nr_args > UIO_MAXIOV)
3710 return -EINVAL;
3711
3712 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
3713 GFP_KERNEL);
3714 if (!ctx->user_bufs)
3715 return -ENOMEM;
3716
3717 for (i = 0; i < nr_args; i++) {
3718 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3719 unsigned long off, start, end, ubuf;
3720 int pret, nr_pages;
3721 struct iovec iov;
3722 size_t size;
3723
3724 ret = io_copy_iov(ctx, &iov, arg, i);
3725 if (ret)
Pavel Begunkova2786822019-05-26 12:35:47 +03003726 goto err;
Jens Axboeedafcce2019-01-09 09:16:05 -07003727
3728 /*
3729 * Don't impose further limits on the size and buffer
3730 * constraints here, we'll -EINVAL later when IO is
3731 * submitted if they are wrong.
3732 */
3733 ret = -EFAULT;
3734 if (!iov.iov_base || !iov.iov_len)
3735 goto err;
3736
3737 /* arbitrary limit, but we need something */
3738 if (iov.iov_len > SZ_1G)
3739 goto err;
3740
3741 ubuf = (unsigned long) iov.iov_base;
3742 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3743 start = ubuf >> PAGE_SHIFT;
3744 nr_pages = end - start;
3745
3746 if (ctx->account_mem) {
3747 ret = io_account_mem(ctx->user, nr_pages);
3748 if (ret)
3749 goto err;
3750 }
3751
3752 ret = 0;
3753 if (!pages || nr_pages > got_pages) {
3754 kfree(vmas);
3755 kfree(pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003756 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
Jens Axboeedafcce2019-01-09 09:16:05 -07003757 GFP_KERNEL);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003758 vmas = kvmalloc_array(nr_pages,
Jens Axboeedafcce2019-01-09 09:16:05 -07003759 sizeof(struct vm_area_struct *),
3760 GFP_KERNEL);
3761 if (!pages || !vmas) {
3762 ret = -ENOMEM;
3763 if (ctx->account_mem)
3764 io_unaccount_mem(ctx->user, nr_pages);
3765 goto err;
3766 }
3767 got_pages = nr_pages;
3768 }
3769
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003770 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
Jens Axboeedafcce2019-01-09 09:16:05 -07003771 GFP_KERNEL);
3772 ret = -ENOMEM;
3773 if (!imu->bvec) {
3774 if (ctx->account_mem)
3775 io_unaccount_mem(ctx->user, nr_pages);
3776 goto err;
3777 }
3778
3779 ret = 0;
3780 down_read(&current->mm->mmap_sem);
Ira Weiny932f4a62019-05-13 17:17:03 -07003781 pret = get_user_pages(ubuf, nr_pages,
3782 FOLL_WRITE | FOLL_LONGTERM,
3783 pages, vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07003784 if (pret == nr_pages) {
3785 /* don't support file backed memory */
3786 for (j = 0; j < nr_pages; j++) {
3787 struct vm_area_struct *vma = vmas[j];
3788
3789 if (vma->vm_file &&
3790 !is_file_hugepages(vma->vm_file)) {
3791 ret = -EOPNOTSUPP;
3792 break;
3793 }
3794 }
3795 } else {
3796 ret = pret < 0 ? pret : -EFAULT;
3797 }
3798 up_read(&current->mm->mmap_sem);
3799 if (ret) {
3800 /*
3801 * if we did partial map, or found file backed vmas,
3802 * release any pages we did get
3803 */
John Hubbard27c4d3a2019-08-04 19:32:06 -07003804 if (pret > 0)
3805 put_user_pages(pages, pret);
Jens Axboeedafcce2019-01-09 09:16:05 -07003806 if (ctx->account_mem)
3807 io_unaccount_mem(ctx->user, nr_pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003808 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07003809 goto err;
3810 }
3811
3812 off = ubuf & ~PAGE_MASK;
3813 size = iov.iov_len;
3814 for (j = 0; j < nr_pages; j++) {
3815 size_t vec_len;
3816
3817 vec_len = min_t(size_t, size, PAGE_SIZE - off);
3818 imu->bvec[j].bv_page = pages[j];
3819 imu->bvec[j].bv_len = vec_len;
3820 imu->bvec[j].bv_offset = off;
3821 off = 0;
3822 size -= vec_len;
3823 }
3824 /* store original address for later verification */
3825 imu->ubuf = ubuf;
3826 imu->len = iov.iov_len;
3827 imu->nr_bvecs = nr_pages;
3828
3829 ctx->nr_user_bufs++;
3830 }
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003831 kvfree(pages);
3832 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07003833 return 0;
3834err:
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003835 kvfree(pages);
3836 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07003837 io_sqe_buffer_unregister(ctx);
3838 return ret;
3839}
3840
Jens Axboe9b402842019-04-11 11:45:41 -06003841static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
3842{
3843 __s32 __user *fds = arg;
3844 int fd;
3845
3846 if (ctx->cq_ev_fd)
3847 return -EBUSY;
3848
3849 if (copy_from_user(&fd, fds, sizeof(*fds)))
3850 return -EFAULT;
3851
3852 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
3853 if (IS_ERR(ctx->cq_ev_fd)) {
3854 int ret = PTR_ERR(ctx->cq_ev_fd);
3855 ctx->cq_ev_fd = NULL;
3856 return ret;
3857 }
3858
3859 return 0;
3860}
3861
3862static int io_eventfd_unregister(struct io_ring_ctx *ctx)
3863{
3864 if (ctx->cq_ev_fd) {
3865 eventfd_ctx_put(ctx->cq_ev_fd);
3866 ctx->cq_ev_fd = NULL;
3867 return 0;
3868 }
3869
3870 return -ENXIO;
3871}
3872
Jens Axboe2b188cc2019-01-07 10:46:33 -07003873static void io_ring_ctx_free(struct io_ring_ctx *ctx)
3874{
Jens Axboe6b063142019-01-10 22:13:58 -07003875 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003876 if (ctx->sqo_mm)
3877 mmdrop(ctx->sqo_mm);
Jens Axboedef596e2019-01-09 08:59:42 -07003878
3879 io_iopoll_reap_events(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -07003880 io_sqe_buffer_unregister(ctx);
Jens Axboe6b063142019-01-10 22:13:58 -07003881 io_sqe_files_unregister(ctx);
Jens Axboe9b402842019-04-11 11:45:41 -06003882 io_eventfd_unregister(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -07003883
Jens Axboe2b188cc2019-01-07 10:46:33 -07003884#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07003885 if (ctx->ring_sock) {
3886 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07003887 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07003888 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003889#endif
3890
Hristo Venev75b28af2019-08-26 17:23:46 +00003891 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003892 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003893
3894 percpu_ref_exit(&ctx->refs);
3895 if (ctx->account_mem)
3896 io_unaccount_mem(ctx->user,
3897 ring_pages(ctx->sq_entries, ctx->cq_entries));
3898 free_uid(ctx->user);
3899 kfree(ctx);
3900}
3901
3902static __poll_t io_uring_poll(struct file *file, poll_table *wait)
3903{
3904 struct io_ring_ctx *ctx = file->private_data;
3905 __poll_t mask = 0;
3906
3907 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02003908 /*
3909 * synchronizes with barrier from wq_has_sleeper call in
3910 * io_commit_cqring
3911 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07003912 smp_rmb();
Hristo Venev75b28af2019-08-26 17:23:46 +00003913 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
3914 ctx->rings->sq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003915 mask |= EPOLLOUT | EPOLLWRNORM;
yangerkundaa5de52019-09-24 20:53:34 +08003916 if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003917 mask |= EPOLLIN | EPOLLRDNORM;
3918
3919 return mask;
3920}
3921
3922static int io_uring_fasync(int fd, struct file *file, int on)
3923{
3924 struct io_ring_ctx *ctx = file->private_data;
3925
3926 return fasync_helper(fd, file, on, &ctx->cq_fasync);
3927}
3928
3929static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
3930{
3931 mutex_lock(&ctx->uring_lock);
3932 percpu_ref_kill(&ctx->refs);
3933 mutex_unlock(&ctx->uring_lock);
3934
Jens Axboe5262f562019-09-17 12:26:57 -06003935 io_kill_timeouts(ctx);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003936 io_poll_remove_all(ctx);
Jens Axboe561fb042019-10-24 07:25:42 -06003937
3938 if (ctx->io_wq)
3939 io_wq_cancel_all(ctx->io_wq);
3940
Jens Axboedef596e2019-01-09 08:59:42 -07003941 io_iopoll_reap_events(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003942 wait_for_completion(&ctx->ctx_done);
3943 io_ring_ctx_free(ctx);
3944}
3945
3946static int io_uring_release(struct inode *inode, struct file *file)
3947{
3948 struct io_ring_ctx *ctx = file->private_data;
3949
3950 file->private_data = NULL;
3951 io_ring_ctx_wait_and_kill(ctx);
3952 return 0;
3953}
3954
Jens Axboefcb323c2019-10-24 12:39:47 -06003955static void io_uring_cancel_files(struct io_ring_ctx *ctx,
3956 struct files_struct *files)
3957{
3958 struct io_kiocb *req;
3959 DEFINE_WAIT(wait);
3960
3961 while (!list_empty_careful(&ctx->inflight_list)) {
3962 enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
3963
3964 spin_lock_irq(&ctx->inflight_lock);
3965 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
3966 if (req->work.files == files) {
3967 ret = io_wq_cancel_work(ctx->io_wq, &req->work);
3968 break;
3969 }
3970 }
3971 if (ret == IO_WQ_CANCEL_RUNNING)
3972 prepare_to_wait(&ctx->inflight_wait, &wait,
3973 TASK_UNINTERRUPTIBLE);
3974
3975 spin_unlock_irq(&ctx->inflight_lock);
3976
3977 /*
3978 * We need to keep going until we get NOTFOUND. We only cancel
3979 * one work at the time.
3980 *
3981 * If we get CANCEL_RUNNING, then wait for a work to complete
3982 * before continuing.
3983 */
3984 if (ret == IO_WQ_CANCEL_OK)
3985 continue;
3986 else if (ret != IO_WQ_CANCEL_RUNNING)
3987 break;
3988 schedule();
3989 }
3990}
3991
3992static int io_uring_flush(struct file *file, void *data)
3993{
3994 struct io_ring_ctx *ctx = file->private_data;
3995
3996 io_uring_cancel_files(ctx, data);
3997 if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
3998 io_wq_cancel_all(ctx->io_wq);
3999 return 0;
4000}
4001
Jens Axboe2b188cc2019-01-07 10:46:33 -07004002static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
4003{
4004 loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
4005 unsigned long sz = vma->vm_end - vma->vm_start;
4006 struct io_ring_ctx *ctx = file->private_data;
4007 unsigned long pfn;
4008 struct page *page;
4009 void *ptr;
4010
4011 switch (offset) {
4012 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00004013 case IORING_OFF_CQ_RING:
4014 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004015 break;
4016 case IORING_OFF_SQES:
4017 ptr = ctx->sq_sqes;
4018 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004019 default:
4020 return -EINVAL;
4021 }
4022
4023 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07004024 if (sz > page_size(page))
Jens Axboe2b188cc2019-01-07 10:46:33 -07004025 return -EINVAL;
4026
4027 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
4028 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
4029}
4030
4031SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
4032 u32, min_complete, u32, flags, const sigset_t __user *, sig,
4033 size_t, sigsz)
4034{
4035 struct io_ring_ctx *ctx;
4036 long ret = -EBADF;
4037 int submitted = 0;
4038 struct fd f;
4039
Jens Axboe6c271ce2019-01-10 11:22:30 -07004040 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
Jens Axboe2b188cc2019-01-07 10:46:33 -07004041 return -EINVAL;
4042
4043 f = fdget(fd);
4044 if (!f.file)
4045 return -EBADF;
4046
4047 ret = -EOPNOTSUPP;
4048 if (f.file->f_op != &io_uring_fops)
4049 goto out_fput;
4050
4051 ret = -ENXIO;
4052 ctx = f.file->private_data;
4053 if (!percpu_ref_tryget(&ctx->refs))
4054 goto out_fput;
4055
Jens Axboe6c271ce2019-01-10 11:22:30 -07004056 /*
4057 * For SQ polling, the thread will do all submissions and completions.
4058 * Just return the requested submit count, and wake the thread if
4059 * we were asked to.
4060 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06004061 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07004062 if (ctx->flags & IORING_SETUP_SQPOLL) {
4063 if (flags & IORING_ENTER_SQ_WAKEUP)
4064 wake_up(&ctx->sqo_wait);
4065 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06004066 } else if (to_submit) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07004067 to_submit = min(to_submit, ctx->sq_entries);
4068
4069 mutex_lock(&ctx->uring_lock);
Jens Axboefcb323c2019-10-24 12:39:47 -06004070 submitted = io_ring_submit(ctx, to_submit, f.file, fd);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004071 mutex_unlock(&ctx->uring_lock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004072 }
4073 if (flags & IORING_ENTER_GETEVENTS) {
Jens Axboedef596e2019-01-09 08:59:42 -07004074 unsigned nr_events = 0;
4075
Jens Axboe2b188cc2019-01-07 10:46:33 -07004076 min_complete = min(min_complete, ctx->cq_entries);
4077
Jens Axboedef596e2019-01-09 08:59:42 -07004078 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07004079 ret = io_iopoll_check(ctx, &nr_events, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07004080 } else {
4081 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
4082 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07004083 }
4084
Pavel Begunkov6805b322019-10-08 02:18:42 +03004085 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004086out_fput:
4087 fdput(f);
4088 return submitted ? submitted : ret;
4089}
4090
4091static const struct file_operations io_uring_fops = {
4092 .release = io_uring_release,
Jens Axboefcb323c2019-10-24 12:39:47 -06004093 .flush = io_uring_flush,
Jens Axboe2b188cc2019-01-07 10:46:33 -07004094 .mmap = io_uring_mmap,
4095 .poll = io_uring_poll,
4096 .fasync = io_uring_fasync,
4097};
4098
4099static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
4100 struct io_uring_params *p)
4101{
Hristo Venev75b28af2019-08-26 17:23:46 +00004102 struct io_rings *rings;
4103 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004104
Hristo Venev75b28af2019-08-26 17:23:46 +00004105 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
4106 if (size == SIZE_MAX)
4107 return -EOVERFLOW;
4108
4109 rings = io_mem_alloc(size);
4110 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07004111 return -ENOMEM;
4112
Hristo Venev75b28af2019-08-26 17:23:46 +00004113 ctx->rings = rings;
4114 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
4115 rings->sq_ring_mask = p->sq_entries - 1;
4116 rings->cq_ring_mask = p->cq_entries - 1;
4117 rings->sq_ring_entries = p->sq_entries;
4118 rings->cq_ring_entries = p->cq_entries;
4119 ctx->sq_mask = rings->sq_ring_mask;
4120 ctx->cq_mask = rings->cq_ring_mask;
4121 ctx->sq_entries = rings->sq_ring_entries;
4122 ctx->cq_entries = rings->cq_ring_entries;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004123
4124 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
4125 if (size == SIZE_MAX)
4126 return -EOVERFLOW;
4127
4128 ctx->sq_sqes = io_mem_alloc(size);
Mark Rutland52e04ef2019-04-30 17:30:21 +01004129 if (!ctx->sq_sqes)
Jens Axboe2b188cc2019-01-07 10:46:33 -07004130 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004131
Jens Axboe2b188cc2019-01-07 10:46:33 -07004132 return 0;
4133}
4134
4135/*
4136 * Allocate an anonymous fd, this is what constitutes the application
4137 * visible backing of an io_uring instance. The application mmaps this
4138 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
4139 * we have to tie this fd to a socket for file garbage collection purposes.
4140 */
4141static int io_uring_get_fd(struct io_ring_ctx *ctx)
4142{
4143 struct file *file;
4144 int ret;
4145
4146#if defined(CONFIG_UNIX)
4147 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
4148 &ctx->ring_sock);
4149 if (ret)
4150 return ret;
4151#endif
4152
4153 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
4154 if (ret < 0)
4155 goto err;
4156
4157 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
4158 O_RDWR | O_CLOEXEC);
4159 if (IS_ERR(file)) {
4160 put_unused_fd(ret);
4161 ret = PTR_ERR(file);
4162 goto err;
4163 }
4164
4165#if defined(CONFIG_UNIX)
4166 ctx->ring_sock->file = file;
Jens Axboe6b063142019-01-10 22:13:58 -07004167 ctx->ring_sock->sk->sk_user_data = ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004168#endif
4169 fd_install(ret, file);
4170 return ret;
4171err:
4172#if defined(CONFIG_UNIX)
4173 sock_release(ctx->ring_sock);
4174 ctx->ring_sock = NULL;
4175#endif
4176 return ret;
4177}
4178
4179static int io_uring_create(unsigned entries, struct io_uring_params *p)
4180{
4181 struct user_struct *user = NULL;
4182 struct io_ring_ctx *ctx;
4183 bool account_mem;
4184 int ret;
4185
4186 if (!entries || entries > IORING_MAX_ENTRIES)
4187 return -EINVAL;
4188
4189 /*
4190 * Use twice as many entries for the CQ ring. It's possible for the
4191 * application to drive a higher depth than the size of the SQ ring,
4192 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06004193 * some flexibility in overcommitting a bit. If the application has
4194 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
4195 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07004196 */
4197 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06004198 if (p->flags & IORING_SETUP_CQSIZE) {
4199 /*
4200 * If IORING_SETUP_CQSIZE is set, we do the same roundup
4201 * to a power-of-two, if it isn't already. We do NOT impose
4202 * any cq vs sq ring sizing.
4203 */
4204 if (p->cq_entries < p->sq_entries || p->cq_entries > IORING_MAX_CQ_ENTRIES)
4205 return -EINVAL;
4206 p->cq_entries = roundup_pow_of_two(p->cq_entries);
4207 } else {
4208 p->cq_entries = 2 * p->sq_entries;
4209 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07004210
4211 user = get_uid(current_user());
4212 account_mem = !capable(CAP_IPC_LOCK);
4213
4214 if (account_mem) {
4215 ret = io_account_mem(user,
4216 ring_pages(p->sq_entries, p->cq_entries));
4217 if (ret) {
4218 free_uid(user);
4219 return ret;
4220 }
4221 }
4222
4223 ctx = io_ring_ctx_alloc(p);
4224 if (!ctx) {
4225 if (account_mem)
4226 io_unaccount_mem(user, ring_pages(p->sq_entries,
4227 p->cq_entries));
4228 free_uid(user);
4229 return -ENOMEM;
4230 }
4231 ctx->compat = in_compat_syscall();
4232 ctx->account_mem = account_mem;
4233 ctx->user = user;
4234
4235 ret = io_allocate_scq_urings(ctx, p);
4236 if (ret)
4237 goto err;
4238
Jens Axboe6c271ce2019-01-10 11:22:30 -07004239 ret = io_sq_offload_start(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004240 if (ret)
4241 goto err;
4242
Jens Axboe2b188cc2019-01-07 10:46:33 -07004243 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00004244 p->sq_off.head = offsetof(struct io_rings, sq.head);
4245 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
4246 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
4247 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
4248 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
4249 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
4250 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004251
4252 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00004253 p->cq_off.head = offsetof(struct io_rings, cq.head);
4254 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
4255 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
4256 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
4257 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
4258 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Jens Axboeac90f242019-09-06 10:26:21 -06004259
Jens Axboe044c1ab2019-10-28 09:15:33 -06004260 /*
4261 * Install ring fd as the very last thing, so we don't risk someone
4262 * having closed it before we finish setup
4263 */
4264 ret = io_uring_get_fd(ctx);
4265 if (ret < 0)
4266 goto err;
4267
Jens Axboeac90f242019-09-06 10:26:21 -06004268 p->features = IORING_FEAT_SINGLE_MMAP;
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02004269 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004270 return ret;
4271err:
4272 io_ring_ctx_wait_and_kill(ctx);
4273 return ret;
4274}
4275
4276/*
4277 * Sets up an aio uring context, and returns the fd. Applications asks for a
4278 * ring size, we return the actual sq/cq ring sizes (among other things) in the
4279 * params structure passed in.
4280 */
4281static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
4282{
4283 struct io_uring_params p;
4284 long ret;
4285 int i;
4286
4287 if (copy_from_user(&p, params, sizeof(p)))
4288 return -EFAULT;
4289 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
4290 if (p.resv[i])
4291 return -EINVAL;
4292 }
4293
Jens Axboe6c271ce2019-01-10 11:22:30 -07004294 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe33a107f2019-10-04 12:10:03 -06004295 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE))
Jens Axboe2b188cc2019-01-07 10:46:33 -07004296 return -EINVAL;
4297
4298 ret = io_uring_create(entries, &p);
4299 if (ret < 0)
4300 return ret;
4301
4302 if (copy_to_user(params, &p, sizeof(p)))
4303 return -EFAULT;
4304
4305 return ret;
4306}
4307
4308SYSCALL_DEFINE2(io_uring_setup, u32, entries,
4309 struct io_uring_params __user *, params)
4310{
4311 return io_uring_setup(entries, params);
4312}
4313
Jens Axboeedafcce2019-01-09 09:16:05 -07004314static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
4315 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -06004316 __releases(ctx->uring_lock)
4317 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -07004318{
4319 int ret;
4320
Jens Axboe35fa71a2019-04-22 10:23:23 -06004321 /*
4322 * We're inside the ring mutex, if the ref is already dying, then
4323 * someone else killed the ctx or is already going through
4324 * io_uring_register().
4325 */
4326 if (percpu_ref_is_dying(&ctx->refs))
4327 return -ENXIO;
4328
Jens Axboeedafcce2019-01-09 09:16:05 -07004329 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -06004330
4331 /*
4332 * Drop uring mutex before waiting for references to exit. If another
4333 * thread is currently inside io_uring_enter() it might need to grab
4334 * the uring_lock to make progress. If we hold it here across the drain
4335 * wait, then we can deadlock. It's safe to drop the mutex here, since
4336 * no new references will come in after we've killed the percpu ref.
4337 */
4338 mutex_unlock(&ctx->uring_lock);
Jens Axboeedafcce2019-01-09 09:16:05 -07004339 wait_for_completion(&ctx->ctx_done);
Jens Axboeb19062a2019-04-15 10:49:38 -06004340 mutex_lock(&ctx->uring_lock);
Jens Axboeedafcce2019-01-09 09:16:05 -07004341
4342 switch (opcode) {
4343 case IORING_REGISTER_BUFFERS:
4344 ret = io_sqe_buffer_register(ctx, arg, nr_args);
4345 break;
4346 case IORING_UNREGISTER_BUFFERS:
4347 ret = -EINVAL;
4348 if (arg || nr_args)
4349 break;
4350 ret = io_sqe_buffer_unregister(ctx);
4351 break;
Jens Axboe6b063142019-01-10 22:13:58 -07004352 case IORING_REGISTER_FILES:
4353 ret = io_sqe_files_register(ctx, arg, nr_args);
4354 break;
4355 case IORING_UNREGISTER_FILES:
4356 ret = -EINVAL;
4357 if (arg || nr_args)
4358 break;
4359 ret = io_sqe_files_unregister(ctx);
4360 break;
Jens Axboec3a31e62019-10-03 13:59:56 -06004361 case IORING_REGISTER_FILES_UPDATE:
4362 ret = io_sqe_files_update(ctx, arg, nr_args);
4363 break;
Jens Axboe9b402842019-04-11 11:45:41 -06004364 case IORING_REGISTER_EVENTFD:
4365 ret = -EINVAL;
4366 if (nr_args != 1)
4367 break;
4368 ret = io_eventfd_register(ctx, arg);
4369 break;
4370 case IORING_UNREGISTER_EVENTFD:
4371 ret = -EINVAL;
4372 if (arg || nr_args)
4373 break;
4374 ret = io_eventfd_unregister(ctx);
4375 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07004376 default:
4377 ret = -EINVAL;
4378 break;
4379 }
4380
4381 /* bring the ctx back to life */
4382 reinit_completion(&ctx->ctx_done);
4383 percpu_ref_reinit(&ctx->refs);
4384 return ret;
4385}
4386
4387SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
4388 void __user *, arg, unsigned int, nr_args)
4389{
4390 struct io_ring_ctx *ctx;
4391 long ret = -EBADF;
4392 struct fd f;
4393
4394 f = fdget(fd);
4395 if (!f.file)
4396 return -EBADF;
4397
4398 ret = -EOPNOTSUPP;
4399 if (f.file->f_op != &io_uring_fops)
4400 goto out_fput;
4401
4402 ctx = f.file->private_data;
4403
4404 mutex_lock(&ctx->uring_lock);
4405 ret = __io_uring_register(ctx, opcode, arg, nr_args);
4406 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02004407 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
4408 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07004409out_fput:
4410 fdput(f);
4411 return ret;
4412}
4413
Jens Axboe2b188cc2019-01-07 10:46:33 -07004414static int __init io_uring_init(void)
4415{
4416 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
4417 return 0;
4418};
4419__initcall(io_uring_init);