blob: d2cb277da2f44e94d73a13beab112d3b42e92fb0 [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
47#include <linux/refcount.h>
48#include <linux/uio.h>
49
50#include <linux/sched/signal.h>
51#include <linux/fs.h>
52#include <linux/file.h>
53#include <linux/fdtable.h>
54#include <linux/mm.h>
55#include <linux/mman.h>
56#include <linux/mmu_context.h>
57#include <linux/percpu.h>
58#include <linux/slab.h>
59#include <linux/workqueue.h>
Jens Axboe6c271ce2019-01-10 11:22:30 -070060#include <linux/kthread.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070061#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070062#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070063#include <linux/net.h>
64#include <net/sock.h>
65#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070066#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070067#include <linux/anon_inodes.h>
68#include <linux/sched/mm.h>
69#include <linux/uaccess.h>
70#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070071#include <linux/sizes.h>
72#include <linux/hugetlb.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070073
74#include <uapi/linux/io_uring.h>
75
76#include "internal.h"
77
Daniel Xu5277dea2019-09-14 14:23:45 -070078#define IORING_MAX_ENTRIES 32768
Jens Axboe6b063142019-01-10 22:13:58 -070079#define IORING_MAX_FIXED_FILES 1024
Jens Axboe2b188cc2019-01-07 10:46:33 -070080
81struct io_uring {
82 u32 head ____cacheline_aligned_in_smp;
83 u32 tail ____cacheline_aligned_in_smp;
84};
85
Stefan Bühler1e84b972019-04-24 23:54:16 +020086/*
Hristo Venev75b28af2019-08-26 17:23:46 +000087 * This data is shared with the application through the mmap at offsets
88 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +020089 *
90 * The offsets to the member fields are published through struct
91 * io_sqring_offsets when calling io_uring_setup.
92 */
Hristo Venev75b28af2019-08-26 17:23:46 +000093struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +020094 /*
95 * Head and tail offsets into the ring; the offsets need to be
96 * masked to get valid indices.
97 *
Hristo Venev75b28af2019-08-26 17:23:46 +000098 * The kernel controls head of the sq ring and the tail of the cq ring,
99 * and the application controls tail of the sq ring and the head of the
100 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200101 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000102 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200103 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000104 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200105 * ring_entries - 1)
106 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000107 u32 sq_ring_mask, cq_ring_mask;
108 /* Ring sizes (constant, power of 2) */
109 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200110 /*
111 * Number of invalid entries dropped by the kernel due to
112 * invalid index stored in array
113 *
114 * Written by the kernel, shouldn't be modified by the
115 * application (i.e. get number of "new events" by comparing to
116 * cached value).
117 *
118 * After a new SQ head value was read by the application this
119 * counter includes all submissions that were dropped reaching
120 * the new SQ head (and possibly more).
121 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000122 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200123 /*
124 * Runtime flags
125 *
126 * Written by the kernel, shouldn't be modified by the
127 * application.
128 *
129 * The application needs a full memory barrier before checking
130 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
131 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000132 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200133 /*
134 * Number of completion events lost because the queue was full;
135 * this should be avoided by the application by making sure
136 * there are not more requests pending thatn there is space in
137 * the completion queue.
138 *
139 * Written by the kernel, shouldn't be modified by the
140 * application (i.e. get number of "new events" by comparing to
141 * cached value).
142 *
143 * As completion events come in out of order this counter is not
144 * ordered with any other data.
145 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000146 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200147 /*
148 * Ring buffer of completion events.
149 *
150 * The kernel writes completion events fresh every time they are
151 * produced, so the application is allowed to modify pending
152 * entries.
153 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000154 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700155};
156
Jens Axboeedafcce2019-01-09 09:16:05 -0700157struct io_mapped_ubuf {
158 u64 ubuf;
159 size_t len;
160 struct bio_vec *bvec;
161 unsigned int nr_bvecs;
162};
163
Jens Axboe31b51512019-01-18 22:56:34 -0700164struct async_list {
165 spinlock_t lock;
166 atomic_t cnt;
167 struct list_head list;
168
169 struct file *file;
Jens Axboe6d5d5ac2019-09-11 10:16:13 -0600170 off_t io_start;
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +0800171 size_t io_len;
Jens Axboe31b51512019-01-18 22:56:34 -0700172};
173
Jens Axboe2b188cc2019-01-07 10:46:33 -0700174struct io_ring_ctx {
175 struct {
176 struct percpu_ref refs;
177 } ____cacheline_aligned_in_smp;
178
179 struct {
180 unsigned int flags;
181 bool compat;
182 bool account_mem;
183
Hristo Venev75b28af2019-08-26 17:23:46 +0000184 /*
185 * Ring buffer of indices into array of io_uring_sqe, which is
186 * mmapped by the application using the IORING_OFF_SQES offset.
187 *
188 * This indirection could e.g. be used to assign fixed
189 * io_uring_sqe entries to operations and only submit them to
190 * the queue when needed.
191 *
192 * The kernel modifies neither the indices array nor the entries
193 * array.
194 */
195 u32 *sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700196 unsigned cached_sq_head;
197 unsigned sq_entries;
198 unsigned sq_mask;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700199 unsigned sq_thread_idle;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700200 struct io_uring_sqe *sq_sqes;
Jens Axboede0617e2019-04-06 21:51:27 -0600201
202 struct list_head defer_list;
Jens Axboe5262f562019-09-17 12:26:57 -0600203 struct list_head timeout_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700204 } ____cacheline_aligned_in_smp;
205
206 /* IO offload */
Jens Axboe54a91f32019-09-10 09:15:04 -0600207 struct workqueue_struct *sqo_wq[2];
Jens Axboe6c271ce2019-01-10 11:22:30 -0700208 struct task_struct *sqo_thread; /* if using sq thread polling */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700209 struct mm_struct *sqo_mm;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700210 wait_queue_head_t sqo_wait;
Jackie Liua4c0b3d2019-07-08 13:41:12 +0800211 struct completion sqo_thread_started;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700212
213 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700214 unsigned cached_cq_tail;
215 unsigned cq_entries;
216 unsigned cq_mask;
217 struct wait_queue_head cq_wait;
218 struct fasync_struct *cq_fasync;
Jens Axboe9b402842019-04-11 11:45:41 -0600219 struct eventfd_ctx *cq_ev_fd;
Jens Axboe5262f562019-09-17 12:26:57 -0600220 atomic_t cq_timeouts;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700221 } ____cacheline_aligned_in_smp;
222
Hristo Venev75b28af2019-08-26 17:23:46 +0000223 struct io_rings *rings;
224
Jens Axboe6b063142019-01-10 22:13:58 -0700225 /*
226 * If used, fixed file set. Writers must ensure that ->refs is dead,
227 * readers must ensure that ->refs is alive as long as the file* is
228 * used. Only updated through io_uring_register(2).
229 */
230 struct file **user_files;
231 unsigned nr_user_files;
232
Jens Axboeedafcce2019-01-09 09:16:05 -0700233 /* if used, fixed mapped user buffers */
234 unsigned nr_user_bufs;
235 struct io_mapped_ubuf *user_bufs;
236
Jens Axboe2b188cc2019-01-07 10:46:33 -0700237 struct user_struct *user;
238
239 struct completion ctx_done;
240
241 struct {
242 struct mutex uring_lock;
243 wait_queue_head_t wait;
244 } ____cacheline_aligned_in_smp;
245
246 struct {
247 spinlock_t completion_lock;
Jens Axboedef596e2019-01-09 08:59:42 -0700248 bool poll_multi_file;
249 /*
250 * ->poll_list is protected by the ctx->uring_lock for
251 * io_uring instances that don't use IORING_SETUP_SQPOLL.
252 * For SQPOLL, only the single threaded io_sq_thread() will
253 * manipulate the list, hence no extra locking is needed there.
254 */
255 struct list_head poll_list;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700256 struct list_head cancel_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700257 } ____cacheline_aligned_in_smp;
258
Jens Axboe31b51512019-01-18 22:56:34 -0700259 struct async_list pending_async[2];
260
Jens Axboe2b188cc2019-01-07 10:46:33 -0700261#if defined(CONFIG_UNIX)
262 struct socket *ring_sock;
263#endif
264};
265
266struct sqe_submit {
267 const struct io_uring_sqe *sqe;
268 unsigned short index;
Jackie Liu8776f3f2019-09-09 20:50:39 +0800269 u32 sequence;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700270 bool has_user;
Jens Axboedef596e2019-01-09 08:59:42 -0700271 bool needs_lock;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700272 bool needs_fixed_file;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700273};
274
Jens Axboe09bb8392019-03-13 12:39:28 -0600275/*
276 * First field must be the file pointer in all the
277 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
278 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700279struct io_poll_iocb {
280 struct file *file;
281 struct wait_queue_head *head;
282 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600283 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700284 bool canceled;
285 struct wait_queue_entry wait;
286};
287
Jens Axboe5262f562019-09-17 12:26:57 -0600288struct io_timeout {
289 struct file *file;
290 struct hrtimer timer;
291};
292
Jens Axboe09bb8392019-03-13 12:39:28 -0600293/*
294 * NOTE! Each of the iocb union members has the file pointer
295 * as the first entry in their struct definition. So you can
296 * access the file pointer through any of the sub-structs,
297 * or directly as just 'ki_filp' in this struct.
298 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700299struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700300 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600301 struct file *file;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700302 struct kiocb rw;
303 struct io_poll_iocb poll;
Jens Axboe5262f562019-09-17 12:26:57 -0600304 struct io_timeout timeout;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700305 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700306
307 struct sqe_submit submit;
308
309 struct io_ring_ctx *ctx;
310 struct list_head list;
Jens Axboe9e645e112019-05-10 16:07:28 -0600311 struct list_head link_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700312 unsigned int flags;
Jens Axboec16361c2019-01-17 08:39:48 -0700313 refcount_t refs;
Stefan Bühler8449eed2019-04-27 20:34:19 +0200314#define REQ_F_NOWAIT 1 /* must not punt to workers */
Jens Axboedef596e2019-01-09 08:59:42 -0700315#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
Jens Axboe6b063142019-01-10 22:13:58 -0700316#define REQ_F_FIXED_FILE 4 /* ctx owns file */
Jens Axboe31b51512019-01-18 22:56:34 -0700317#define REQ_F_SEQ_PREV 8 /* sequential with previous */
Stefan Bühlere2033e32019-05-11 19:08:01 +0200318#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
319#define REQ_F_IO_DRAINED 32 /* drain done */
Jens Axboe9e645e112019-05-10 16:07:28 -0600320#define REQ_F_LINK 64 /* linked sqes */
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +0800321#define REQ_F_LINK_DONE 128 /* linked sqes done */
322#define REQ_F_FAIL_LINK 256 /* fail rest of links */
Jackie Liu4fe2c962019-09-09 20:50:40 +0800323#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
Jens Axboe5262f562019-09-17 12:26:57 -0600324#define REQ_F_TIMEOUT 1024 /* timeout request */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700325 u64 user_data;
Jens Axboe9e645e112019-05-10 16:07:28 -0600326 u32 result;
Jens Axboede0617e2019-04-06 21:51:27 -0600327 u32 sequence;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700328
329 struct work_struct work;
330};
331
332#define IO_PLUG_THRESHOLD 2
Jens Axboedef596e2019-01-09 08:59:42 -0700333#define IO_IOPOLL_BATCH 8
Jens Axboe2b188cc2019-01-07 10:46:33 -0700334
Jens Axboe9a56a232019-01-09 09:06:50 -0700335struct io_submit_state {
336 struct blk_plug plug;
337
338 /*
Jens Axboe2579f912019-01-09 09:10:43 -0700339 * io_kiocb alloc cache
340 */
341 void *reqs[IO_IOPOLL_BATCH];
342 unsigned int free_reqs;
343 unsigned int cur_req;
344
345 /*
Jens Axboe9a56a232019-01-09 09:06:50 -0700346 * File reference cache
347 */
348 struct file *file;
349 unsigned int fd;
350 unsigned int has_refs;
351 unsigned int used_refs;
352 unsigned int ios_left;
353};
354
Jens Axboede0617e2019-04-06 21:51:27 -0600355static void io_sq_wq_submit_work(struct work_struct *work);
Jens Axboe5262f562019-09-17 12:26:57 -0600356static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
357 long res);
Jackie Liu4fe2c962019-09-09 20:50:40 +0800358static void __io_free_req(struct io_kiocb *req);
Jens Axboede0617e2019-04-06 21:51:27 -0600359
Jens Axboe2b188cc2019-01-07 10:46:33 -0700360static struct kmem_cache *req_cachep;
361
362static const struct file_operations io_uring_fops;
363
364struct sock *io_uring_get_socket(struct file *file)
365{
366#if defined(CONFIG_UNIX)
367 if (file->f_op == &io_uring_fops) {
368 struct io_ring_ctx *ctx = file->private_data;
369
370 return ctx->ring_sock->sk;
371 }
372#endif
373 return NULL;
374}
375EXPORT_SYMBOL(io_uring_get_socket);
376
377static void io_ring_ctx_ref_free(struct percpu_ref *ref)
378{
379 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
380
381 complete(&ctx->ctx_done);
382}
383
384static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
385{
386 struct io_ring_ctx *ctx;
Jens Axboe31b51512019-01-18 22:56:34 -0700387 int i;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700388
389 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
390 if (!ctx)
391 return NULL;
392
Roman Gushchin21482892019-05-07 10:01:48 -0700393 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
394 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700395 kfree(ctx);
396 return NULL;
397 }
398
399 ctx->flags = p->flags;
400 init_waitqueue_head(&ctx->cq_wait);
401 init_completion(&ctx->ctx_done);
Jackie Liua4c0b3d2019-07-08 13:41:12 +0800402 init_completion(&ctx->sqo_thread_started);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700403 mutex_init(&ctx->uring_lock);
404 init_waitqueue_head(&ctx->wait);
Jens Axboe31b51512019-01-18 22:56:34 -0700405 for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) {
406 spin_lock_init(&ctx->pending_async[i].lock);
407 INIT_LIST_HEAD(&ctx->pending_async[i].list);
408 atomic_set(&ctx->pending_async[i].cnt, 0);
409 }
Jens Axboe2b188cc2019-01-07 10:46:33 -0700410 spin_lock_init(&ctx->completion_lock);
Jens Axboedef596e2019-01-09 08:59:42 -0700411 INIT_LIST_HEAD(&ctx->poll_list);
Jens Axboe221c5eb2019-01-17 09:41:58 -0700412 INIT_LIST_HEAD(&ctx->cancel_list);
Jens Axboede0617e2019-04-06 21:51:27 -0600413 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -0600414 INIT_LIST_HEAD(&ctx->timeout_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700415 return ctx;
416}
417
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600418static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
419 struct io_kiocb *req)
Jens Axboede0617e2019-04-06 21:51:27 -0600420{
Hristo Venev75b28af2019-08-26 17:23:46 +0000421 return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped;
Jens Axboede0617e2019-04-06 21:51:27 -0600422}
423
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600424static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
425 struct io_kiocb *req)
426{
427 if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
428 return false;
429
430 return __io_sequence_defer(ctx, req);
431}
432
433static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
Jens Axboede0617e2019-04-06 21:51:27 -0600434{
435 struct io_kiocb *req;
436
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600437 req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
438 if (req && !io_sequence_defer(ctx, req)) {
Jens Axboede0617e2019-04-06 21:51:27 -0600439 list_del_init(&req->list);
440 return req;
441 }
442
443 return NULL;
444}
445
Jens Axboe5262f562019-09-17 12:26:57 -0600446static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
447{
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600448 struct io_kiocb *req;
449
450 req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
451 if (req && !__io_sequence_defer(ctx, req)) {
452 list_del_init(&req->list);
453 return req;
454 }
455
456 return NULL;
Jens Axboe5262f562019-09-17 12:26:57 -0600457}
458
Jens Axboede0617e2019-04-06 21:51:27 -0600459static void __io_commit_cqring(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700460{
Hristo Venev75b28af2019-08-26 17:23:46 +0000461 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700462
Hristo Venev75b28af2019-08-26 17:23:46 +0000463 if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700464 /* order cqe stores with ring update */
Hristo Venev75b28af2019-08-26 17:23:46 +0000465 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700466
Jens Axboe2b188cc2019-01-07 10:46:33 -0700467 if (wq_has_sleeper(&ctx->cq_wait)) {
468 wake_up_interruptible(&ctx->cq_wait);
469 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
470 }
471 }
472}
473
Jens Axboe18d9be12019-09-10 09:13:05 -0600474static inline void io_queue_async_work(struct io_ring_ctx *ctx,
475 struct io_kiocb *req)
476{
Jens Axboe6cc47d12019-09-18 11:18:23 -0600477 int rw = 0;
Jens Axboe54a91f32019-09-10 09:15:04 -0600478
Jens Axboe6cc47d12019-09-18 11:18:23 -0600479 if (req->submit.sqe) {
480 switch (req->submit.sqe->opcode) {
481 case IORING_OP_WRITEV:
482 case IORING_OP_WRITE_FIXED:
483 rw = !(req->rw.ki_flags & IOCB_DIRECT);
484 break;
485 }
Jens Axboe54a91f32019-09-10 09:15:04 -0600486 }
487
488 queue_work(ctx->sqo_wq[rw], &req->work);
Jens Axboe18d9be12019-09-10 09:13:05 -0600489}
490
Jens Axboe5262f562019-09-17 12:26:57 -0600491static void io_kill_timeout(struct io_kiocb *req)
492{
493 int ret;
494
495 ret = hrtimer_try_to_cancel(&req->timeout.timer);
496 if (ret != -1) {
497 atomic_inc(&req->ctx->cq_timeouts);
498 list_del(&req->list);
499 io_cqring_fill_event(req->ctx, req->user_data, 0);
500 __io_free_req(req);
501 }
502}
503
504static void io_kill_timeouts(struct io_ring_ctx *ctx)
505{
506 struct io_kiocb *req, *tmp;
507
508 spin_lock_irq(&ctx->completion_lock);
509 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
510 io_kill_timeout(req);
511 spin_unlock_irq(&ctx->completion_lock);
512}
513
Jens Axboede0617e2019-04-06 21:51:27 -0600514static void io_commit_cqring(struct io_ring_ctx *ctx)
515{
516 struct io_kiocb *req;
517
Jens Axboe5262f562019-09-17 12:26:57 -0600518 while ((req = io_get_timeout_req(ctx)) != NULL)
519 io_kill_timeout(req);
520
Jens Axboede0617e2019-04-06 21:51:27 -0600521 __io_commit_cqring(ctx);
522
523 while ((req = io_get_deferred_req(ctx)) != NULL) {
Jackie Liu4fe2c962019-09-09 20:50:40 +0800524 if (req->flags & REQ_F_SHADOW_DRAIN) {
525 /* Just for drain, free it. */
526 __io_free_req(req);
527 continue;
528 }
Jens Axboede0617e2019-04-06 21:51:27 -0600529 req->flags |= REQ_F_IO_DRAINED;
Jens Axboe18d9be12019-09-10 09:13:05 -0600530 io_queue_async_work(ctx, req);
Jens Axboede0617e2019-04-06 21:51:27 -0600531 }
532}
533
Jens Axboe2b188cc2019-01-07 10:46:33 -0700534static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
535{
Hristo Venev75b28af2019-08-26 17:23:46 +0000536 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700537 unsigned tail;
538
539 tail = ctx->cached_cq_tail;
Stefan Bühler115e12e2019-04-24 23:54:18 +0200540 /*
541 * writes to the cq entry need to come after reading head; the
542 * control dependency is enough as we're using WRITE_ONCE to
543 * fill the cq entry
544 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000545 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700546 return NULL;
547
548 ctx->cached_cq_tail++;
Hristo Venev75b28af2019-08-26 17:23:46 +0000549 return &rings->cqes[tail & ctx->cq_mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -0700550}
551
552static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
Jens Axboec71ffb62019-05-13 20:58:29 -0600553 long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700554{
555 struct io_uring_cqe *cqe;
556
557 /*
558 * If we can't get a cq entry, userspace overflowed the
559 * submission (by quite a lot). Increment the overflow count in
560 * the ring.
561 */
562 cqe = io_get_cqring(ctx);
563 if (cqe) {
564 WRITE_ONCE(cqe->user_data, ki_user_data);
565 WRITE_ONCE(cqe->res, res);
Jens Axboec71ffb62019-05-13 20:58:29 -0600566 WRITE_ONCE(cqe->flags, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700567 } else {
Hristo Venev75b28af2019-08-26 17:23:46 +0000568 unsigned overflow = READ_ONCE(ctx->rings->cq_overflow);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700569
Hristo Venev75b28af2019-08-26 17:23:46 +0000570 WRITE_ONCE(ctx->rings->cq_overflow, overflow + 1);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700571 }
572}
573
Jens Axboe8c838782019-03-12 15:48:16 -0600574static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
575{
576 if (waitqueue_active(&ctx->wait))
577 wake_up(&ctx->wait);
578 if (waitqueue_active(&ctx->sqo_wait))
579 wake_up(&ctx->sqo_wait);
Jens Axboe9b402842019-04-11 11:45:41 -0600580 if (ctx->cq_ev_fd)
581 eventfd_signal(ctx->cq_ev_fd, 1);
Jens Axboe8c838782019-03-12 15:48:16 -0600582}
583
584static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
Jens Axboec71ffb62019-05-13 20:58:29 -0600585 long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700586{
587 unsigned long flags;
588
589 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboec71ffb62019-05-13 20:58:29 -0600590 io_cqring_fill_event(ctx, user_data, res);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700591 io_commit_cqring(ctx);
592 spin_unlock_irqrestore(&ctx->completion_lock, flags);
593
Jens Axboe8c838782019-03-12 15:48:16 -0600594 io_cqring_ev_posted(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700595}
596
Jens Axboe2579f912019-01-09 09:10:43 -0700597static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
598 struct io_submit_state *state)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700599{
Jens Axboefd6fab22019-03-14 16:30:06 -0600600 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700601 struct io_kiocb *req;
602
603 if (!percpu_ref_tryget(&ctx->refs))
604 return NULL;
605
Jens Axboe2579f912019-01-09 09:10:43 -0700606 if (!state) {
Jens Axboefd6fab22019-03-14 16:30:06 -0600607 req = kmem_cache_alloc(req_cachep, gfp);
Jens Axboe2579f912019-01-09 09:10:43 -0700608 if (unlikely(!req))
609 goto out;
610 } else if (!state->free_reqs) {
611 size_t sz;
612 int ret;
613
614 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
Jens Axboefd6fab22019-03-14 16:30:06 -0600615 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
616
617 /*
618 * Bulk alloc is all-or-nothing. If we fail to get a batch,
619 * retry single alloc to be on the safe side.
620 */
621 if (unlikely(ret <= 0)) {
622 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
623 if (!state->reqs[0])
624 goto out;
625 ret = 1;
626 }
Jens Axboe2579f912019-01-09 09:10:43 -0700627 state->free_reqs = ret - 1;
628 state->cur_req = 1;
629 req = state->reqs[0];
630 } else {
631 req = state->reqs[state->cur_req];
632 state->free_reqs--;
633 state->cur_req++;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700634 }
635
Jens Axboe60c112b2019-06-21 10:20:18 -0600636 req->file = NULL;
Jens Axboe2579f912019-01-09 09:10:43 -0700637 req->ctx = ctx;
638 req->flags = 0;
Jens Axboee65ef562019-03-12 10:16:44 -0600639 /* one is dropped after submission, the other at completion */
640 refcount_set(&req->refs, 2);
Jens Axboe9e645e112019-05-10 16:07:28 -0600641 req->result = 0;
Jens Axboe2579f912019-01-09 09:10:43 -0700642 return req;
643out:
Pavel Begunkov6805b322019-10-08 02:18:42 +0300644 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700645 return NULL;
646}
647
Jens Axboedef596e2019-01-09 08:59:42 -0700648static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
649{
650 if (*nr) {
651 kmem_cache_free_bulk(req_cachep, *nr, reqs);
Pavel Begunkov6805b322019-10-08 02:18:42 +0300652 percpu_ref_put_many(&ctx->refs, *nr);
Jens Axboedef596e2019-01-09 08:59:42 -0700653 *nr = 0;
654 }
655}
656
Jens Axboe9e645e112019-05-10 16:07:28 -0600657static void __io_free_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700658{
Jens Axboe09bb8392019-03-13 12:39:28 -0600659 if (req->file && !(req->flags & REQ_F_FIXED_FILE))
660 fput(req->file);
Pavel Begunkov6805b322019-10-08 02:18:42 +0300661 percpu_ref_put(&req->ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -0600662 kmem_cache_free(req_cachep, req);
663}
664
Jens Axboe9e645e112019-05-10 16:07:28 -0600665static void io_req_link_next(struct io_kiocb *req)
666{
667 struct io_kiocb *nxt;
668
669 /*
670 * The list should never be empty when we are called here. But could
671 * potentially happen if the chain is messed up, check to be on the
672 * safe side.
673 */
674 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
675 if (nxt) {
676 list_del(&nxt->list);
677 if (!list_empty(&req->link_list)) {
678 INIT_LIST_HEAD(&nxt->link_list);
679 list_splice(&req->link_list, &nxt->link_list);
680 nxt->flags |= REQ_F_LINK;
681 }
682
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +0800683 nxt->flags |= REQ_F_LINK_DONE;
Jens Axboe9e645e112019-05-10 16:07:28 -0600684 INIT_WORK(&nxt->work, io_sq_wq_submit_work);
Jens Axboe18d9be12019-09-10 09:13:05 -0600685 io_queue_async_work(req->ctx, nxt);
Jens Axboe9e645e112019-05-10 16:07:28 -0600686 }
687}
688
689/*
690 * Called if REQ_F_LINK is set, and we fail the head request
691 */
692static void io_fail_links(struct io_kiocb *req)
693{
694 struct io_kiocb *link;
695
696 while (!list_empty(&req->link_list)) {
697 link = list_first_entry(&req->link_list, struct io_kiocb, list);
698 list_del(&link->list);
699
700 io_cqring_add_event(req->ctx, link->user_data, -ECANCELED);
701 __io_free_req(link);
702 }
703}
704
705static void io_free_req(struct io_kiocb *req)
706{
707 /*
708 * If LINK is set, we have dependent requests in this chain. If we
709 * didn't fail this request, queue the first one up, moving any other
710 * dependencies to the next request. In case of failure, fail the rest
711 * of the chain.
712 */
713 if (req->flags & REQ_F_LINK) {
714 if (req->flags & REQ_F_FAIL_LINK)
715 io_fail_links(req);
716 else
717 io_req_link_next(req);
718 }
719
720 __io_free_req(req);
721}
722
Jens Axboee65ef562019-03-12 10:16:44 -0600723static void io_put_req(struct io_kiocb *req)
724{
725 if (refcount_dec_and_test(&req->refs))
726 io_free_req(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700727}
728
Hristo Venev75b28af2019-08-26 17:23:46 +0000729static unsigned io_cqring_events(struct io_rings *rings)
Jens Axboea3a0e432019-08-20 11:03:11 -0600730{
731 /* See comment at the top of this file */
732 smp_rmb();
Hristo Venev75b28af2019-08-26 17:23:46 +0000733 return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
Jens Axboea3a0e432019-08-20 11:03:11 -0600734}
735
Jens Axboedef596e2019-01-09 08:59:42 -0700736/*
737 * Find and free completed poll iocbs
738 */
739static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
740 struct list_head *done)
741{
742 void *reqs[IO_IOPOLL_BATCH];
743 struct io_kiocb *req;
Jens Axboe09bb8392019-03-13 12:39:28 -0600744 int to_free;
Jens Axboedef596e2019-01-09 08:59:42 -0700745
Jens Axboe09bb8392019-03-13 12:39:28 -0600746 to_free = 0;
Jens Axboedef596e2019-01-09 08:59:42 -0700747 while (!list_empty(done)) {
748 req = list_first_entry(done, struct io_kiocb, list);
749 list_del(&req->list);
750
Jens Axboe9e645e112019-05-10 16:07:28 -0600751 io_cqring_fill_event(ctx, req->user_data, req->result);
Jens Axboedef596e2019-01-09 08:59:42 -0700752 (*nr_events)++;
753
Jens Axboe09bb8392019-03-13 12:39:28 -0600754 if (refcount_dec_and_test(&req->refs)) {
755 /* If we're not using fixed files, we have to pair the
756 * completion part with the file put. Use regular
757 * completions for those, only batch free for fixed
Jens Axboe9e645e112019-05-10 16:07:28 -0600758 * file and non-linked commands.
Jens Axboe09bb8392019-03-13 12:39:28 -0600759 */
Jens Axboe9e645e112019-05-10 16:07:28 -0600760 if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
761 REQ_F_FIXED_FILE) {
Jens Axboe09bb8392019-03-13 12:39:28 -0600762 reqs[to_free++] = req;
763 if (to_free == ARRAY_SIZE(reqs))
764 io_free_req_many(ctx, reqs, &to_free);
Jens Axboe6b063142019-01-10 22:13:58 -0700765 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -0600766 io_free_req(req);
Jens Axboe6b063142019-01-10 22:13:58 -0700767 }
Jens Axboe9a56a232019-01-09 09:06:50 -0700768 }
Jens Axboedef596e2019-01-09 08:59:42 -0700769 }
Jens Axboedef596e2019-01-09 08:59:42 -0700770
Jens Axboe09bb8392019-03-13 12:39:28 -0600771 io_commit_cqring(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -0700772 io_free_req_many(ctx, reqs, &to_free);
773}
774
775static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
776 long min)
777{
778 struct io_kiocb *req, *tmp;
779 LIST_HEAD(done);
780 bool spin;
781 int ret;
782
783 /*
784 * Only spin for completions if we don't have multiple devices hanging
785 * off our complete list, and we're under the requested amount.
786 */
787 spin = !ctx->poll_multi_file && *nr_events < min;
788
789 ret = 0;
790 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
791 struct kiocb *kiocb = &req->rw;
792
793 /*
794 * Move completed entries to our local list. If we find a
795 * request that requires polling, break out and complete
796 * the done list first, if we have entries there.
797 */
798 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
799 list_move_tail(&req->list, &done);
800 continue;
801 }
802 if (!list_empty(&done))
803 break;
804
805 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
806 if (ret < 0)
807 break;
808
809 if (ret && spin)
810 spin = false;
811 ret = 0;
812 }
813
814 if (!list_empty(&done))
815 io_iopoll_complete(ctx, nr_events, &done);
816
817 return ret;
818}
819
820/*
821 * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a
822 * non-spinning poll check - we'll still enter the driver poll loop, but only
823 * as a non-spinning completion check.
824 */
825static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
826 long min)
827{
Jens Axboe08f54392019-08-21 22:19:11 -0600828 while (!list_empty(&ctx->poll_list) && !need_resched()) {
Jens Axboedef596e2019-01-09 08:59:42 -0700829 int ret;
830
831 ret = io_do_iopoll(ctx, nr_events, min);
832 if (ret < 0)
833 return ret;
834 if (!min || *nr_events >= min)
835 return 0;
836 }
837
838 return 1;
839}
840
841/*
842 * We can't just wait for polled events to come to us, we have to actively
843 * find and complete them.
844 */
845static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
846{
847 if (!(ctx->flags & IORING_SETUP_IOPOLL))
848 return;
849
850 mutex_lock(&ctx->uring_lock);
851 while (!list_empty(&ctx->poll_list)) {
852 unsigned int nr_events = 0;
853
854 io_iopoll_getevents(ctx, &nr_events, 1);
Jens Axboe08f54392019-08-21 22:19:11 -0600855
856 /*
857 * Ensure we allow local-to-the-cpu processing to take place,
858 * in this case we need to ensure that we reap all events.
859 */
860 cond_resched();
Jens Axboedef596e2019-01-09 08:59:42 -0700861 }
862 mutex_unlock(&ctx->uring_lock);
863}
864
865static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
866 long min)
867{
Jens Axboe500f9fb2019-08-19 12:15:59 -0600868 int iters, ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -0700869
Jens Axboe500f9fb2019-08-19 12:15:59 -0600870 /*
871 * We disallow the app entering submit/complete with polling, but we
872 * still need to lock the ring to prevent racing with polled issue
873 * that got punted to a workqueue.
874 */
875 mutex_lock(&ctx->uring_lock);
876
877 iters = 0;
Jens Axboedef596e2019-01-09 08:59:42 -0700878 do {
879 int tmin = 0;
880
Jens Axboe500f9fb2019-08-19 12:15:59 -0600881 /*
Jens Axboea3a0e432019-08-20 11:03:11 -0600882 * Don't enter poll loop if we already have events pending.
883 * If we do, we can potentially be spinning for commands that
884 * already triggered a CQE (eg in error).
885 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000886 if (io_cqring_events(ctx->rings))
Jens Axboea3a0e432019-08-20 11:03:11 -0600887 break;
888
889 /*
Jens Axboe500f9fb2019-08-19 12:15:59 -0600890 * If a submit got punted to a workqueue, we can have the
891 * application entering polling for a command before it gets
892 * issued. That app will hold the uring_lock for the duration
893 * of the poll right here, so we need to take a breather every
894 * now and then to ensure that the issue has a chance to add
895 * the poll to the issued list. Otherwise we can spin here
896 * forever, while the workqueue is stuck trying to acquire the
897 * very same mutex.
898 */
899 if (!(++iters & 7)) {
900 mutex_unlock(&ctx->uring_lock);
901 mutex_lock(&ctx->uring_lock);
902 }
903
Jens Axboedef596e2019-01-09 08:59:42 -0700904 if (*nr_events < min)
905 tmin = min - *nr_events;
906
907 ret = io_iopoll_getevents(ctx, nr_events, tmin);
908 if (ret <= 0)
909 break;
910 ret = 0;
911 } while (min && !*nr_events && !need_resched());
912
Jens Axboe500f9fb2019-08-19 12:15:59 -0600913 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -0700914 return ret;
915}
916
Jens Axboe2b188cc2019-01-07 10:46:33 -0700917static void kiocb_end_write(struct kiocb *kiocb)
918{
919 if (kiocb->ki_flags & IOCB_WRITE) {
920 struct inode *inode = file_inode(kiocb->ki_filp);
921
922 /*
923 * Tell lockdep we inherited freeze protection from submission
924 * thread.
925 */
926 if (S_ISREG(inode->i_mode))
927 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
928 file_end_write(kiocb->ki_filp);
929 }
930}
931
932static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
933{
934 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
935
936 kiocb_end_write(kiocb);
937
Jens Axboe9e645e112019-05-10 16:07:28 -0600938 if ((req->flags & REQ_F_LINK) && res != req->result)
939 req->flags |= REQ_F_FAIL_LINK;
Jens Axboec71ffb62019-05-13 20:58:29 -0600940 io_cqring_add_event(req->ctx, req->user_data, res);
Jens Axboee65ef562019-03-12 10:16:44 -0600941 io_put_req(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700942}
943
Jens Axboedef596e2019-01-09 08:59:42 -0700944static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
945{
946 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
947
948 kiocb_end_write(kiocb);
949
Jens Axboe9e645e112019-05-10 16:07:28 -0600950 if ((req->flags & REQ_F_LINK) && res != req->result)
951 req->flags |= REQ_F_FAIL_LINK;
952 req->result = res;
Jens Axboedef596e2019-01-09 08:59:42 -0700953 if (res != -EAGAIN)
954 req->flags |= REQ_F_IOPOLL_COMPLETED;
955}
956
957/*
958 * After the iocb has been issued, it's safe to be found on the poll list.
959 * Adding the kiocb to the list AFTER submission ensures that we don't
960 * find it from a io_iopoll_getevents() thread before the issuer is done
961 * accessing the kiocb cookie.
962 */
963static void io_iopoll_req_issued(struct io_kiocb *req)
964{
965 struct io_ring_ctx *ctx = req->ctx;
966
967 /*
968 * Track whether we have multiple files in our lists. This will impact
969 * how we do polling eventually, not spinning if we're on potentially
970 * different devices.
971 */
972 if (list_empty(&ctx->poll_list)) {
973 ctx->poll_multi_file = false;
974 } else if (!ctx->poll_multi_file) {
975 struct io_kiocb *list_req;
976
977 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
978 list);
979 if (list_req->rw.ki_filp != req->rw.ki_filp)
980 ctx->poll_multi_file = true;
981 }
982
983 /*
984 * For fast devices, IO may have already completed. If it has, add
985 * it to the front so we find it first.
986 */
987 if (req->flags & REQ_F_IOPOLL_COMPLETED)
988 list_add(&req->list, &ctx->poll_list);
989 else
990 list_add_tail(&req->list, &ctx->poll_list);
991}
992
Jens Axboe3d6770f2019-04-13 11:50:54 -0600993static void io_file_put(struct io_submit_state *state)
Jens Axboe9a56a232019-01-09 09:06:50 -0700994{
Jens Axboe3d6770f2019-04-13 11:50:54 -0600995 if (state->file) {
Jens Axboe9a56a232019-01-09 09:06:50 -0700996 int diff = state->has_refs - state->used_refs;
997
998 if (diff)
999 fput_many(state->file, diff);
1000 state->file = NULL;
1001 }
1002}
1003
1004/*
1005 * Get as many references to a file as we have IOs left in this submission,
1006 * assuming most submissions are for one file, or at least that each file
1007 * has more than one submission.
1008 */
1009static struct file *io_file_get(struct io_submit_state *state, int fd)
1010{
1011 if (!state)
1012 return fget(fd);
1013
1014 if (state->file) {
1015 if (state->fd == fd) {
1016 state->used_refs++;
1017 state->ios_left--;
1018 return state->file;
1019 }
Jens Axboe3d6770f2019-04-13 11:50:54 -06001020 io_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07001021 }
1022 state->file = fget_many(fd, state->ios_left);
1023 if (!state->file)
1024 return NULL;
1025
1026 state->fd = fd;
1027 state->has_refs = state->ios_left;
1028 state->used_refs = 1;
1029 state->ios_left--;
1030 return state->file;
1031}
1032
Jens Axboe2b188cc2019-01-07 10:46:33 -07001033/*
1034 * If we tracked the file through the SCM inflight mechanism, we could support
1035 * any file. For now, just ensure that anything potentially problematic is done
1036 * inline.
1037 */
1038static bool io_file_supports_async(struct file *file)
1039{
1040 umode_t mode = file_inode(file)->i_mode;
1041
1042 if (S_ISBLK(mode) || S_ISCHR(mode))
1043 return true;
1044 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
1045 return true;
1046
1047 return false;
1048}
1049
Jens Axboe6c271ce2019-01-10 11:22:30 -07001050static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
Jens Axboe8358e3a2019-04-23 08:17:58 -06001051 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001052{
Jens Axboe6c271ce2019-01-10 11:22:30 -07001053 const struct io_uring_sqe *sqe = s->sqe;
Jens Axboedef596e2019-01-09 08:59:42 -07001054 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001055 struct kiocb *kiocb = &req->rw;
Jens Axboe09bb8392019-03-13 12:39:28 -06001056 unsigned ioprio;
1057 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001058
Jens Axboe09bb8392019-03-13 12:39:28 -06001059 if (!req->file)
1060 return -EBADF;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001061
Jens Axboe09bb8392019-03-13 12:39:28 -06001062 if (force_nonblock && !io_file_supports_async(req->file))
1063 force_nonblock = false;
Jens Axboe6b063142019-01-10 22:13:58 -07001064
Jens Axboe2b188cc2019-01-07 10:46:33 -07001065 kiocb->ki_pos = READ_ONCE(sqe->off);
1066 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
1067 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
1068
1069 ioprio = READ_ONCE(sqe->ioprio);
1070 if (ioprio) {
1071 ret = ioprio_check_cap(ioprio);
1072 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06001073 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001074
1075 kiocb->ki_ioprio = ioprio;
1076 } else
1077 kiocb->ki_ioprio = get_current_ioprio();
1078
1079 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
1080 if (unlikely(ret))
Jens Axboe09bb8392019-03-13 12:39:28 -06001081 return ret;
Stefan Bühler8449eed2019-04-27 20:34:19 +02001082
1083 /* don't allow async punt if RWF_NOWAIT was requested */
1084 if (kiocb->ki_flags & IOCB_NOWAIT)
1085 req->flags |= REQ_F_NOWAIT;
1086
1087 if (force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001088 kiocb->ki_flags |= IOCB_NOWAIT;
Stefan Bühler8449eed2019-04-27 20:34:19 +02001089
Jens Axboedef596e2019-01-09 08:59:42 -07001090 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07001091 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
1092 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06001093 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001094
Jens Axboedef596e2019-01-09 08:59:42 -07001095 kiocb->ki_flags |= IOCB_HIPRI;
1096 kiocb->ki_complete = io_complete_rw_iopoll;
1097 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06001098 if (kiocb->ki_flags & IOCB_HIPRI)
1099 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07001100 kiocb->ki_complete = io_complete_rw;
1101 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001102 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001103}
1104
1105static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
1106{
1107 switch (ret) {
1108 case -EIOCBQUEUED:
1109 break;
1110 case -ERESTARTSYS:
1111 case -ERESTARTNOINTR:
1112 case -ERESTARTNOHAND:
1113 case -ERESTART_RESTARTBLOCK:
1114 /*
1115 * We can't just restart the syscall, since previously
1116 * submitted sqes may already be in progress. Just fail this
1117 * IO with EINTR.
1118 */
1119 ret = -EINTR;
1120 /* fall through */
1121 default:
1122 kiocb->ki_complete(kiocb, ret, 0);
1123 }
1124}
1125
Jens Axboeedafcce2019-01-09 09:16:05 -07001126static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
1127 const struct io_uring_sqe *sqe,
1128 struct iov_iter *iter)
1129{
1130 size_t len = READ_ONCE(sqe->len);
1131 struct io_mapped_ubuf *imu;
1132 unsigned index, buf_index;
1133 size_t offset;
1134 u64 buf_addr;
1135
1136 /* attempt to use fixed buffers without having provided iovecs */
1137 if (unlikely(!ctx->user_bufs))
1138 return -EFAULT;
1139
1140 buf_index = READ_ONCE(sqe->buf_index);
1141 if (unlikely(buf_index >= ctx->nr_user_bufs))
1142 return -EFAULT;
1143
1144 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
1145 imu = &ctx->user_bufs[index];
1146 buf_addr = READ_ONCE(sqe->addr);
1147
1148 /* overflow */
1149 if (buf_addr + len < buf_addr)
1150 return -EFAULT;
1151 /* not inside the mapped region */
1152 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
1153 return -EFAULT;
1154
1155 /*
1156 * May not be a start of buffer, set size appropriately
1157 * and advance us to the beginning.
1158 */
1159 offset = buf_addr - imu->ubuf;
1160 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06001161
1162 if (offset) {
1163 /*
1164 * Don't use iov_iter_advance() here, as it's really slow for
1165 * using the latter parts of a big fixed buffer - it iterates
1166 * over each segment manually. We can cheat a bit here, because
1167 * we know that:
1168 *
1169 * 1) it's a BVEC iter, we set it up
1170 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1171 * first and last bvec
1172 *
1173 * So just find our index, and adjust the iterator afterwards.
1174 * If the offset is within the first bvec (or the whole first
1175 * bvec, just use iov_iter_advance(). This makes it easier
1176 * since we can just skip the first segment, which may not
1177 * be PAGE_SIZE aligned.
1178 */
1179 const struct bio_vec *bvec = imu->bvec;
1180
1181 if (offset <= bvec->bv_len) {
1182 iov_iter_advance(iter, offset);
1183 } else {
1184 unsigned long seg_skip;
1185
1186 /* skip first vec */
1187 offset -= bvec->bv_len;
1188 seg_skip = 1 + (offset >> PAGE_SHIFT);
1189
1190 iter->bvec = bvec + seg_skip;
1191 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02001192 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06001193 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06001194 }
1195 }
1196
Jens Axboeedafcce2019-01-09 09:16:05 -07001197 return 0;
1198}
1199
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001200static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
1201 const struct sqe_submit *s, struct iovec **iovec,
1202 struct iov_iter *iter)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001203{
1204 const struct io_uring_sqe *sqe = s->sqe;
1205 void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1206 size_t sqe_len = READ_ONCE(sqe->len);
Jens Axboeedafcce2019-01-09 09:16:05 -07001207 u8 opcode;
1208
1209 /*
1210 * We're reading ->opcode for the second time, but the first read
1211 * doesn't care whether it's _FIXED or not, so it doesn't matter
1212 * whether ->opcode changes concurrently. The first read does care
1213 * about whether it is a READ or a WRITE, so we don't trust this read
1214 * for that purpose and instead let the caller pass in the read/write
1215 * flag.
1216 */
1217 opcode = READ_ONCE(sqe->opcode);
1218 if (opcode == IORING_OP_READ_FIXED ||
1219 opcode == IORING_OP_WRITE_FIXED) {
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001220 ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07001221 *iovec = NULL;
1222 return ret;
1223 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001224
1225 if (!s->has_user)
1226 return -EFAULT;
1227
1228#ifdef CONFIG_COMPAT
1229 if (ctx->compat)
1230 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
1231 iovec, iter);
1232#endif
1233
1234 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
1235}
1236
Jens Axboe6d5d5ac2019-09-11 10:16:13 -06001237static inline bool io_should_merge(struct async_list *al, struct kiocb *kiocb)
1238{
1239 if (al->file == kiocb->ki_filp) {
1240 off_t start, end;
1241
1242 /*
1243 * Allow merging if we're anywhere in the range of the same
1244 * page. Generally this happens for sub-page reads or writes,
1245 * and it's beneficial to allow the first worker to bring the
1246 * page in and the piggy backed work can then work on the
1247 * cached page.
1248 */
1249 start = al->io_start & PAGE_MASK;
1250 end = (al->io_start + al->io_len + PAGE_SIZE - 1) & PAGE_MASK;
1251 if (kiocb->ki_pos >= start && kiocb->ki_pos <= end)
1252 return true;
1253 }
1254
1255 al->file = NULL;
1256 return false;
1257}
1258
Jens Axboe31b51512019-01-18 22:56:34 -07001259/*
1260 * Make a note of the last file/offset/direction we punted to async
1261 * context. We'll use this information to see if we can piggy back a
1262 * sequential request onto the previous one, if it's still hasn't been
1263 * completed by the async worker.
1264 */
1265static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
1266{
1267 struct async_list *async_list = &req->ctx->pending_async[rw];
1268 struct kiocb *kiocb = &req->rw;
1269 struct file *filp = kiocb->ki_filp;
Jens Axboe31b51512019-01-18 22:56:34 -07001270
Jens Axboe6d5d5ac2019-09-11 10:16:13 -06001271 if (io_should_merge(async_list, kiocb)) {
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +08001272 unsigned long max_bytes;
Jens Axboe31b51512019-01-18 22:56:34 -07001273
1274 /* Use 8x RA size as a decent limiter for both reads/writes */
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +08001275 max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3);
1276 if (!max_bytes)
1277 max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3);
Jens Axboe31b51512019-01-18 22:56:34 -07001278
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +08001279 /* If max len are exceeded, reset the state */
1280 if (async_list->io_len + len <= max_bytes) {
Jens Axboe31b51512019-01-18 22:56:34 -07001281 req->flags |= REQ_F_SEQ_PREV;
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +08001282 async_list->io_len += len;
Jens Axboe31b51512019-01-18 22:56:34 -07001283 } else {
Jens Axboe6d5d5ac2019-09-11 10:16:13 -06001284 async_list->file = NULL;
Jens Axboe31b51512019-01-18 22:56:34 -07001285 }
1286 }
1287
1288 /* New file? Reset state. */
1289 if (async_list->file != filp) {
Jens Axboe6d5d5ac2019-09-11 10:16:13 -06001290 async_list->io_start = kiocb->ki_pos;
1291 async_list->io_len = len;
Jens Axboe31b51512019-01-18 22:56:34 -07001292 async_list->file = filp;
1293 }
Jens Axboe31b51512019-01-18 22:56:34 -07001294}
1295
Jens Axboe32960612019-09-23 11:05:34 -06001296/*
1297 * For files that don't have ->read_iter() and ->write_iter(), handle them
1298 * by looping over ->read() or ->write() manually.
1299 */
1300static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
1301 struct iov_iter *iter)
1302{
1303 ssize_t ret = 0;
1304
1305 /*
1306 * Don't support polled IO through this interface, and we can't
1307 * support non-blocking either. For the latter, this just causes
1308 * the kiocb to be handled from an async context.
1309 */
1310 if (kiocb->ki_flags & IOCB_HIPRI)
1311 return -EOPNOTSUPP;
1312 if (kiocb->ki_flags & IOCB_NOWAIT)
1313 return -EAGAIN;
1314
1315 while (iov_iter_count(iter)) {
1316 struct iovec iovec = iov_iter_iovec(iter);
1317 ssize_t nr;
1318
1319 if (rw == READ) {
1320 nr = file->f_op->read(file, iovec.iov_base,
1321 iovec.iov_len, &kiocb->ki_pos);
1322 } else {
1323 nr = file->f_op->write(file, iovec.iov_base,
1324 iovec.iov_len, &kiocb->ki_pos);
1325 }
1326
1327 if (nr < 0) {
1328 if (!ret)
1329 ret = nr;
1330 break;
1331 }
1332 ret += nr;
1333 if (nr != iovec.iov_len)
1334 break;
1335 iov_iter_advance(iter, nr);
1336 }
1337
1338 return ret;
1339}
1340
Jens Axboee0c5c572019-03-12 10:18:47 -06001341static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
Jens Axboe8358e3a2019-04-23 08:17:58 -06001342 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001343{
1344 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1345 struct kiocb *kiocb = &req->rw;
1346 struct iov_iter iter;
1347 struct file *file;
Jens Axboe31b51512019-01-18 22:56:34 -07001348 size_t iov_count;
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001349 ssize_t read_size, ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001350
Jens Axboe8358e3a2019-04-23 08:17:58 -06001351 ret = io_prep_rw(req, s, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001352 if (ret)
1353 return ret;
1354 file = kiocb->ki_filp;
1355
Jens Axboe2b188cc2019-01-07 10:46:33 -07001356 if (unlikely(!(file->f_mode & FMODE_READ)))
Jens Axboe09bb8392019-03-13 12:39:28 -06001357 return -EBADF;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001358
1359 ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001360 if (ret < 0)
Jens Axboe09bb8392019-03-13 12:39:28 -06001361 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001362
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001363 read_size = ret;
Jens Axboe9e645e112019-05-10 16:07:28 -06001364 if (req->flags & REQ_F_LINK)
1365 req->result = read_size;
1366
Jens Axboe31b51512019-01-18 22:56:34 -07001367 iov_count = iov_iter_count(&iter);
1368 ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001369 if (!ret) {
1370 ssize_t ret2;
1371
Jens Axboe32960612019-09-23 11:05:34 -06001372 if (file->f_op->read_iter)
1373 ret2 = call_read_iter(file, kiocb, &iter);
1374 else
1375 ret2 = loop_rw_iter(READ, file, kiocb, &iter);
1376
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001377 /*
1378 * In case of a short read, punt to async. This can happen
1379 * if we have data partially cached. Alternatively we can
1380 * return the short read, in which case the application will
1381 * need to issue another SQE and wait for it. That SQE will
1382 * need async punt anyway, so it's more efficient to do it
1383 * here.
1384 */
1385 if (force_nonblock && ret2 > 0 && ret2 < read_size)
1386 ret2 = -EAGAIN;
1387 /* Catch -EAGAIN return for forced non-blocking submission */
Jens Axboe31b51512019-01-18 22:56:34 -07001388 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07001389 io_rw_done(kiocb, ret2);
Jens Axboe31b51512019-01-18 22:56:34 -07001390 } else {
1391 /*
1392 * If ->needs_lock is true, we're already in async
1393 * context.
1394 */
1395 if (!s->needs_lock)
1396 io_async_list_note(READ, req, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001397 ret = -EAGAIN;
Jens Axboe31b51512019-01-18 22:56:34 -07001398 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001399 }
1400 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001401 return ret;
1402}
1403
Jens Axboee0c5c572019-03-12 10:18:47 -06001404static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
Jens Axboe8358e3a2019-04-23 08:17:58 -06001405 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001406{
1407 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1408 struct kiocb *kiocb = &req->rw;
1409 struct iov_iter iter;
1410 struct file *file;
Jens Axboe31b51512019-01-18 22:56:34 -07001411 size_t iov_count;
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001412 ssize_t ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001413
Jens Axboe8358e3a2019-04-23 08:17:58 -06001414 ret = io_prep_rw(req, s, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001415 if (ret)
1416 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001417
Jens Axboe2b188cc2019-01-07 10:46:33 -07001418 file = kiocb->ki_filp;
1419 if (unlikely(!(file->f_mode & FMODE_WRITE)))
Jens Axboe09bb8392019-03-13 12:39:28 -06001420 return -EBADF;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001421
1422 ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001423 if (ret < 0)
Jens Axboe09bb8392019-03-13 12:39:28 -06001424 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001425
Jens Axboe9e645e112019-05-10 16:07:28 -06001426 if (req->flags & REQ_F_LINK)
1427 req->result = ret;
1428
Jens Axboe31b51512019-01-18 22:56:34 -07001429 iov_count = iov_iter_count(&iter);
1430
1431 ret = -EAGAIN;
1432 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) {
1433 /* If ->needs_lock is true, we're already in async context. */
1434 if (!s->needs_lock)
1435 io_async_list_note(WRITE, req, iov_count);
1436 goto out_free;
1437 }
1438
1439 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001440 if (!ret) {
Roman Penyaev9bf79332019-03-25 20:09:24 +01001441 ssize_t ret2;
1442
Jens Axboe2b188cc2019-01-07 10:46:33 -07001443 /*
1444 * Open-code file_start_write here to grab freeze protection,
1445 * which will be released by another thread in
1446 * io_complete_rw(). Fool lockdep by telling it the lock got
1447 * released so that it doesn't complain about the held lock when
1448 * we return to userspace.
1449 */
1450 if (S_ISREG(file_inode(file)->i_mode)) {
1451 __sb_start_write(file_inode(file)->i_sb,
1452 SB_FREEZE_WRITE, true);
1453 __sb_writers_release(file_inode(file)->i_sb,
1454 SB_FREEZE_WRITE);
1455 }
1456 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01001457
Jens Axboe32960612019-09-23 11:05:34 -06001458 if (file->f_op->write_iter)
1459 ret2 = call_write_iter(file, kiocb, &iter);
1460 else
1461 ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
Roman Penyaev9bf79332019-03-25 20:09:24 +01001462 if (!force_nonblock || ret2 != -EAGAIN) {
1463 io_rw_done(kiocb, ret2);
1464 } else {
1465 /*
1466 * If ->needs_lock is true, we're already in async
1467 * context.
1468 */
1469 if (!s->needs_lock)
1470 io_async_list_note(WRITE, req, iov_count);
1471 ret = -EAGAIN;
1472 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001473 }
Jens Axboe31b51512019-01-18 22:56:34 -07001474out_free:
Jens Axboe2b188cc2019-01-07 10:46:33 -07001475 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001476 return ret;
1477}
1478
1479/*
1480 * IORING_OP_NOP just posts a completion event, nothing else.
1481 */
1482static int io_nop(struct io_kiocb *req, u64 user_data)
1483{
1484 struct io_ring_ctx *ctx = req->ctx;
1485 long err = 0;
1486
Jens Axboedef596e2019-01-09 08:59:42 -07001487 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1488 return -EINVAL;
1489
Jens Axboec71ffb62019-05-13 20:58:29 -06001490 io_cqring_add_event(ctx, user_data, err);
Jens Axboee65ef562019-03-12 10:16:44 -06001491 io_put_req(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001492 return 0;
1493}
1494
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001495static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1496{
Jens Axboe6b063142019-01-10 22:13:58 -07001497 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001498
Jens Axboe09bb8392019-03-13 12:39:28 -06001499 if (!req->file)
1500 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001501
Jens Axboe6b063142019-01-10 22:13:58 -07001502 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07001503 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07001504 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001505 return -EINVAL;
1506
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001507 return 0;
1508}
1509
1510static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1511 bool force_nonblock)
1512{
1513 loff_t sqe_off = READ_ONCE(sqe->off);
1514 loff_t sqe_len = READ_ONCE(sqe->len);
1515 loff_t end = sqe_off + sqe_len;
1516 unsigned fsync_flags;
1517 int ret;
1518
1519 fsync_flags = READ_ONCE(sqe->fsync_flags);
1520 if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
1521 return -EINVAL;
1522
1523 ret = io_prep_fsync(req, sqe);
1524 if (ret)
1525 return ret;
1526
1527 /* fsync always requires a blocking context */
1528 if (force_nonblock)
1529 return -EAGAIN;
1530
1531 ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
1532 end > 0 ? end : LLONG_MAX,
1533 fsync_flags & IORING_FSYNC_DATASYNC);
1534
Jens Axboe9e645e112019-05-10 16:07:28 -06001535 if (ret < 0 && (req->flags & REQ_F_LINK))
1536 req->flags |= REQ_F_FAIL_LINK;
Jens Axboec71ffb62019-05-13 20:58:29 -06001537 io_cqring_add_event(req->ctx, sqe->user_data, ret);
Jens Axboee65ef562019-03-12 10:16:44 -06001538 io_put_req(req);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001539 return 0;
1540}
1541
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001542static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1543{
1544 struct io_ring_ctx *ctx = req->ctx;
1545 int ret = 0;
1546
1547 if (!req->file)
1548 return -EBADF;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001549
1550 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1551 return -EINVAL;
1552 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
1553 return -EINVAL;
1554
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001555 return ret;
1556}
1557
1558static int io_sync_file_range(struct io_kiocb *req,
1559 const struct io_uring_sqe *sqe,
1560 bool force_nonblock)
1561{
1562 loff_t sqe_off;
1563 loff_t sqe_len;
1564 unsigned flags;
1565 int ret;
1566
1567 ret = io_prep_sfr(req, sqe);
1568 if (ret)
1569 return ret;
1570
1571 /* sync_file_range always requires a blocking context */
1572 if (force_nonblock)
1573 return -EAGAIN;
1574
1575 sqe_off = READ_ONCE(sqe->off);
1576 sqe_len = READ_ONCE(sqe->len);
1577 flags = READ_ONCE(sqe->sync_range_flags);
1578
1579 ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
1580
Jens Axboe9e645e112019-05-10 16:07:28 -06001581 if (ret < 0 && (req->flags & REQ_F_LINK))
1582 req->flags |= REQ_F_FAIL_LINK;
Jens Axboec71ffb62019-05-13 20:58:29 -06001583 io_cqring_add_event(req->ctx, sqe->user_data, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001584 io_put_req(req);
1585 return 0;
1586}
1587
Jens Axboe0fa03c62019-04-19 13:34:07 -06001588#if defined(CONFIG_NET)
Jens Axboeaa1fa282019-04-19 13:38:09 -06001589static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1590 bool force_nonblock,
1591 long (*fn)(struct socket *, struct user_msghdr __user *,
1592 unsigned int))
1593{
Jens Axboe0fa03c62019-04-19 13:34:07 -06001594 struct socket *sock;
1595 int ret;
1596
1597 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1598 return -EINVAL;
1599
1600 sock = sock_from_file(req->file, &ret);
1601 if (sock) {
1602 struct user_msghdr __user *msg;
1603 unsigned flags;
1604
1605 flags = READ_ONCE(sqe->msg_flags);
1606 if (flags & MSG_DONTWAIT)
1607 req->flags |= REQ_F_NOWAIT;
1608 else if (force_nonblock)
1609 flags |= MSG_DONTWAIT;
1610
1611 msg = (struct user_msghdr __user *) (unsigned long)
1612 READ_ONCE(sqe->addr);
1613
Jens Axboeaa1fa282019-04-19 13:38:09 -06001614 ret = fn(sock, msg, flags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06001615 if (force_nonblock && ret == -EAGAIN)
1616 return ret;
1617 }
1618
1619 io_cqring_add_event(req->ctx, sqe->user_data, ret);
1620 io_put_req(req);
1621 return 0;
Jens Axboeaa1fa282019-04-19 13:38:09 -06001622}
1623#endif
1624
1625static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1626 bool force_nonblock)
1627{
1628#if defined(CONFIG_NET)
1629 return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock);
1630#else
1631 return -EOPNOTSUPP;
1632#endif
1633}
1634
1635static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1636 bool force_nonblock)
1637{
1638#if defined(CONFIG_NET)
1639 return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock);
Jens Axboe0fa03c62019-04-19 13:34:07 -06001640#else
1641 return -EOPNOTSUPP;
1642#endif
1643}
1644
Jens Axboe221c5eb2019-01-17 09:41:58 -07001645static void io_poll_remove_one(struct io_kiocb *req)
1646{
1647 struct io_poll_iocb *poll = &req->poll;
1648
1649 spin_lock(&poll->head->lock);
1650 WRITE_ONCE(poll->canceled, true);
1651 if (!list_empty(&poll->wait.entry)) {
1652 list_del_init(&poll->wait.entry);
Jens Axboe18d9be12019-09-10 09:13:05 -06001653 io_queue_async_work(req->ctx, req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001654 }
1655 spin_unlock(&poll->head->lock);
1656
1657 list_del_init(&req->list);
1658}
1659
1660static void io_poll_remove_all(struct io_ring_ctx *ctx)
1661{
1662 struct io_kiocb *req;
1663
1664 spin_lock_irq(&ctx->completion_lock);
1665 while (!list_empty(&ctx->cancel_list)) {
1666 req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
1667 io_poll_remove_one(req);
1668 }
1669 spin_unlock_irq(&ctx->completion_lock);
1670}
1671
1672/*
1673 * Find a running poll command that matches one specified in sqe->addr,
1674 * and remove it if found.
1675 */
1676static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1677{
1678 struct io_ring_ctx *ctx = req->ctx;
1679 struct io_kiocb *poll_req, *next;
1680 int ret = -ENOENT;
1681
1682 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1683 return -EINVAL;
1684 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
1685 sqe->poll_events)
1686 return -EINVAL;
1687
1688 spin_lock_irq(&ctx->completion_lock);
1689 list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) {
1690 if (READ_ONCE(sqe->addr) == poll_req->user_data) {
1691 io_poll_remove_one(poll_req);
1692 ret = 0;
1693 break;
1694 }
1695 }
1696 spin_unlock_irq(&ctx->completion_lock);
1697
Jens Axboec71ffb62019-05-13 20:58:29 -06001698 io_cqring_add_event(req->ctx, sqe->user_data, ret);
Jens Axboee65ef562019-03-12 10:16:44 -06001699 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001700 return 0;
1701}
1702
Jens Axboe8c838782019-03-12 15:48:16 -06001703static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
1704 __poll_t mask)
Jens Axboe221c5eb2019-01-17 09:41:58 -07001705{
Jens Axboe8c838782019-03-12 15:48:16 -06001706 req->poll.done = true;
Jens Axboec71ffb62019-05-13 20:58:29 -06001707 io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask));
Jens Axboe8c838782019-03-12 15:48:16 -06001708 io_commit_cqring(ctx);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001709}
1710
1711static void io_poll_complete_work(struct work_struct *work)
1712{
1713 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1714 struct io_poll_iocb *poll = &req->poll;
1715 struct poll_table_struct pt = { ._key = poll->events };
1716 struct io_ring_ctx *ctx = req->ctx;
1717 __poll_t mask = 0;
1718
1719 if (!READ_ONCE(poll->canceled))
1720 mask = vfs_poll(poll->file, &pt) & poll->events;
1721
1722 /*
1723 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1724 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
1725 * synchronize with them. In the cancellation case the list_del_init
1726 * itself is not actually needed, but harmless so we keep it in to
1727 * avoid further branches in the fast path.
1728 */
1729 spin_lock_irq(&ctx->completion_lock);
1730 if (!mask && !READ_ONCE(poll->canceled)) {
1731 add_wait_queue(poll->head, &poll->wait);
1732 spin_unlock_irq(&ctx->completion_lock);
1733 return;
1734 }
1735 list_del_init(&req->list);
Jens Axboe8c838782019-03-12 15:48:16 -06001736 io_poll_complete(ctx, req, mask);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001737 spin_unlock_irq(&ctx->completion_lock);
1738
Jens Axboe8c838782019-03-12 15:48:16 -06001739 io_cqring_ev_posted(ctx);
1740 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001741}
1742
1743static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1744 void *key)
1745{
1746 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
1747 wait);
1748 struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
1749 struct io_ring_ctx *ctx = req->ctx;
1750 __poll_t mask = key_to_poll(key);
Jens Axboe8c838782019-03-12 15:48:16 -06001751 unsigned long flags;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001752
1753 /* for instances that support it check for an event match first: */
Jens Axboe8c838782019-03-12 15:48:16 -06001754 if (mask && !(mask & poll->events))
1755 return 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001756
1757 list_del_init(&poll->wait.entry);
Jens Axboe8c838782019-03-12 15:48:16 -06001758
1759 if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
1760 list_del(&req->list);
1761 io_poll_complete(ctx, req, mask);
1762 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1763
1764 io_cqring_ev_posted(ctx);
1765 io_put_req(req);
1766 } else {
Jens Axboe18d9be12019-09-10 09:13:05 -06001767 io_queue_async_work(ctx, req);
Jens Axboe8c838782019-03-12 15:48:16 -06001768 }
1769
Jens Axboe221c5eb2019-01-17 09:41:58 -07001770 return 1;
1771}
1772
1773struct io_poll_table {
1774 struct poll_table_struct pt;
1775 struct io_kiocb *req;
1776 int error;
1777};
1778
1779static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1780 struct poll_table_struct *p)
1781{
1782 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
1783
1784 if (unlikely(pt->req->poll.head)) {
1785 pt->error = -EINVAL;
1786 return;
1787 }
1788
1789 pt->error = 0;
1790 pt->req->poll.head = head;
1791 add_wait_queue(head, &pt->req->poll.wait);
1792}
1793
1794static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1795{
1796 struct io_poll_iocb *poll = &req->poll;
1797 struct io_ring_ctx *ctx = req->ctx;
1798 struct io_poll_table ipt;
Jens Axboe8c838782019-03-12 15:48:16 -06001799 bool cancel = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001800 __poll_t mask;
1801 u16 events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001802
1803 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1804 return -EINVAL;
1805 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1806 return -EINVAL;
Jens Axboe09bb8392019-03-13 12:39:28 -06001807 if (!poll->file)
1808 return -EBADF;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001809
Jens Axboe6cc47d12019-09-18 11:18:23 -06001810 req->submit.sqe = NULL;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001811 INIT_WORK(&req->work, io_poll_complete_work);
1812 events = READ_ONCE(sqe->poll_events);
1813 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
1814
Jens Axboe221c5eb2019-01-17 09:41:58 -07001815 poll->head = NULL;
Jens Axboe8c838782019-03-12 15:48:16 -06001816 poll->done = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001817 poll->canceled = false;
1818
1819 ipt.pt._qproc = io_poll_queue_proc;
1820 ipt.pt._key = poll->events;
1821 ipt.req = req;
1822 ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1823
1824 /* initialized the list so that we can do list_empty checks */
1825 INIT_LIST_HEAD(&poll->wait.entry);
1826 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
1827
Jens Axboe36703242019-07-25 10:20:18 -06001828 INIT_LIST_HEAD(&req->list);
1829
Jens Axboe221c5eb2019-01-17 09:41:58 -07001830 mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001831
1832 spin_lock_irq(&ctx->completion_lock);
Jens Axboe8c838782019-03-12 15:48:16 -06001833 if (likely(poll->head)) {
1834 spin_lock(&poll->head->lock);
1835 if (unlikely(list_empty(&poll->wait.entry))) {
1836 if (ipt.error)
1837 cancel = true;
1838 ipt.error = 0;
1839 mask = 0;
1840 }
1841 if (mask || ipt.error)
1842 list_del_init(&poll->wait.entry);
1843 else if (cancel)
1844 WRITE_ONCE(poll->canceled, true);
1845 else if (!poll->done) /* actually waiting for an event */
1846 list_add_tail(&req->list, &ctx->cancel_list);
1847 spin_unlock(&poll->head->lock);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001848 }
Jens Axboe8c838782019-03-12 15:48:16 -06001849 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06001850 ipt.error = 0;
1851 io_poll_complete(ctx, req, mask);
1852 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07001853 spin_unlock_irq(&ctx->completion_lock);
1854
Jens Axboe8c838782019-03-12 15:48:16 -06001855 if (mask) {
1856 io_cqring_ev_posted(ctx);
Jens Axboee65ef562019-03-12 10:16:44 -06001857 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001858 }
Jens Axboe8c838782019-03-12 15:48:16 -06001859 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001860}
1861
Jens Axboe5262f562019-09-17 12:26:57 -06001862static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
1863{
1864 struct io_ring_ctx *ctx;
1865 struct io_kiocb *req;
1866 unsigned long flags;
1867
1868 req = container_of(timer, struct io_kiocb, timeout.timer);
1869 ctx = req->ctx;
1870 atomic_inc(&ctx->cq_timeouts);
1871
1872 spin_lock_irqsave(&ctx->completion_lock, flags);
1873 list_del(&req->list);
1874
1875 io_cqring_fill_event(ctx, req->user_data, -ETIME);
1876 io_commit_cqring(ctx);
1877 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1878
1879 io_cqring_ev_posted(ctx);
1880
1881 io_put_req(req);
1882 return HRTIMER_NORESTART;
1883}
1884
1885static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1886{
yangerkun5da0fb12019-10-15 21:59:29 +08001887 unsigned count;
Jens Axboe5262f562019-09-17 12:26:57 -06001888 struct io_ring_ctx *ctx = req->ctx;
1889 struct list_head *entry;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06001890 struct timespec64 ts;
Jens Axboe5262f562019-09-17 12:26:57 -06001891
1892 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1893 return -EINVAL;
1894 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags ||
1895 sqe->len != 1)
1896 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06001897
1898 if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06001899 return -EFAULT;
1900
1901 /*
1902 * sqe->off holds how many events that need to occur for this
1903 * timeout event to be satisfied.
1904 */
1905 count = READ_ONCE(sqe->off);
1906 if (!count)
1907 count = 1;
1908
1909 req->sequence = ctx->cached_sq_head + count - 1;
yangerkun5da0fb12019-10-15 21:59:29 +08001910 /* reuse it to store the count */
1911 req->submit.sequence = count;
Jens Axboe5262f562019-09-17 12:26:57 -06001912 req->flags |= REQ_F_TIMEOUT;
1913
1914 /*
1915 * Insertion sort, ensuring the first entry in the list is always
1916 * the one we need first.
1917 */
Jens Axboe5262f562019-09-17 12:26:57 -06001918 spin_lock_irq(&ctx->completion_lock);
1919 list_for_each_prev(entry, &ctx->timeout_list) {
1920 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
yangerkun5da0fb12019-10-15 21:59:29 +08001921 unsigned nxt_sq_head;
1922 long long tmp, tmp_nxt;
Jens Axboe5262f562019-09-17 12:26:57 -06001923
yangerkun5da0fb12019-10-15 21:59:29 +08001924 /*
1925 * Since cached_sq_head + count - 1 can overflow, use type long
1926 * long to store it.
1927 */
1928 tmp = (long long)ctx->cached_sq_head + count - 1;
1929 nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
1930 tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
1931
1932 /*
1933 * cached_sq_head may overflow, and it will never overflow twice
1934 * once there is some timeout req still be valid.
1935 */
1936 if (ctx->cached_sq_head < nxt_sq_head)
1937 tmp_nxt += UINT_MAX;
1938
1939 if (tmp >= tmp_nxt)
Jens Axboe5262f562019-09-17 12:26:57 -06001940 break;
1941 }
1942 list_add(&req->list, entry);
1943 spin_unlock_irq(&ctx->completion_lock);
1944
1945 hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1946 req->timeout.timer.function = io_timeout_fn;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06001947 hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts),
Jens Axboe5262f562019-09-17 12:26:57 -06001948 HRTIMER_MODE_REL);
1949 return 0;
1950}
1951
Jens Axboede0617e2019-04-06 21:51:27 -06001952static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
1953 const struct io_uring_sqe *sqe)
1954{
1955 struct io_uring_sqe *sqe_copy;
1956
1957 if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
1958 return 0;
1959
1960 sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
1961 if (!sqe_copy)
1962 return -EAGAIN;
1963
1964 spin_lock_irq(&ctx->completion_lock);
1965 if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) {
1966 spin_unlock_irq(&ctx->completion_lock);
1967 kfree(sqe_copy);
1968 return 0;
1969 }
1970
1971 memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
1972 req->submit.sqe = sqe_copy;
1973
1974 INIT_WORK(&req->work, io_sq_wq_submit_work);
1975 list_add_tail(&req->list, &ctx->defer_list);
1976 spin_unlock_irq(&ctx->completion_lock);
1977 return -EIOCBQUEUED;
1978}
1979
Jens Axboe2b188cc2019-01-07 10:46:33 -07001980static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Jens Axboe8358e3a2019-04-23 08:17:58 -06001981 const struct sqe_submit *s, bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001982{
Jens Axboee0c5c572019-03-12 10:18:47 -06001983 int ret, opcode;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001984
Jens Axboe9e645e112019-05-10 16:07:28 -06001985 req->user_data = READ_ONCE(s->sqe->user_data);
1986
Jens Axboe2b188cc2019-01-07 10:46:33 -07001987 if (unlikely(s->index >= ctx->sq_entries))
1988 return -EINVAL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001989
1990 opcode = READ_ONCE(s->sqe->opcode);
1991 switch (opcode) {
1992 case IORING_OP_NOP:
1993 ret = io_nop(req, req->user_data);
1994 break;
1995 case IORING_OP_READV:
Jens Axboeedafcce2019-01-09 09:16:05 -07001996 if (unlikely(s->sqe->buf_index))
1997 return -EINVAL;
Jens Axboe8358e3a2019-04-23 08:17:58 -06001998 ret = io_read(req, s, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001999 break;
2000 case IORING_OP_WRITEV:
Jens Axboeedafcce2019-01-09 09:16:05 -07002001 if (unlikely(s->sqe->buf_index))
2002 return -EINVAL;
Jens Axboe8358e3a2019-04-23 08:17:58 -06002003 ret = io_write(req, s, force_nonblock);
Jens Axboeedafcce2019-01-09 09:16:05 -07002004 break;
2005 case IORING_OP_READ_FIXED:
Jens Axboe8358e3a2019-04-23 08:17:58 -06002006 ret = io_read(req, s, force_nonblock);
Jens Axboeedafcce2019-01-09 09:16:05 -07002007 break;
2008 case IORING_OP_WRITE_FIXED:
Jens Axboe8358e3a2019-04-23 08:17:58 -06002009 ret = io_write(req, s, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002010 break;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002011 case IORING_OP_FSYNC:
2012 ret = io_fsync(req, s->sqe, force_nonblock);
2013 break;
Jens Axboe221c5eb2019-01-17 09:41:58 -07002014 case IORING_OP_POLL_ADD:
2015 ret = io_poll_add(req, s->sqe);
2016 break;
2017 case IORING_OP_POLL_REMOVE:
2018 ret = io_poll_remove(req, s->sqe);
2019 break;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06002020 case IORING_OP_SYNC_FILE_RANGE:
2021 ret = io_sync_file_range(req, s->sqe, force_nonblock);
2022 break;
Jens Axboe0fa03c62019-04-19 13:34:07 -06002023 case IORING_OP_SENDMSG:
2024 ret = io_sendmsg(req, s->sqe, force_nonblock);
2025 break;
Jens Axboeaa1fa282019-04-19 13:38:09 -06002026 case IORING_OP_RECVMSG:
2027 ret = io_recvmsg(req, s->sqe, force_nonblock);
2028 break;
Jens Axboe5262f562019-09-17 12:26:57 -06002029 case IORING_OP_TIMEOUT:
2030 ret = io_timeout(req, s->sqe);
2031 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002032 default:
2033 ret = -EINVAL;
2034 break;
2035 }
2036
Jens Axboedef596e2019-01-09 08:59:42 -07002037 if (ret)
2038 return ret;
2039
2040 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboe9e645e112019-05-10 16:07:28 -06002041 if (req->result == -EAGAIN)
Jens Axboedef596e2019-01-09 08:59:42 -07002042 return -EAGAIN;
2043
2044 /* workqueue context doesn't hold uring_lock, grab it now */
2045 if (s->needs_lock)
2046 mutex_lock(&ctx->uring_lock);
2047 io_iopoll_req_issued(req);
2048 if (s->needs_lock)
2049 mutex_unlock(&ctx->uring_lock);
2050 }
2051
2052 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002053}
2054
Jens Axboe31b51512019-01-18 22:56:34 -07002055static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx,
2056 const struct io_uring_sqe *sqe)
2057{
2058 switch (sqe->opcode) {
2059 case IORING_OP_READV:
2060 case IORING_OP_READ_FIXED:
2061 return &ctx->pending_async[READ];
2062 case IORING_OP_WRITEV:
2063 case IORING_OP_WRITE_FIXED:
2064 return &ctx->pending_async[WRITE];
2065 default:
2066 return NULL;
2067 }
2068}
2069
Jens Axboeedafcce2019-01-09 09:16:05 -07002070static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
2071{
2072 u8 opcode = READ_ONCE(sqe->opcode);
2073
2074 return !(opcode == IORING_OP_READ_FIXED ||
2075 opcode == IORING_OP_WRITE_FIXED);
2076}
2077
Jens Axboe2b188cc2019-01-07 10:46:33 -07002078static void io_sq_wq_submit_work(struct work_struct *work)
2079{
2080 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002081 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe31b51512019-01-18 22:56:34 -07002082 struct mm_struct *cur_mm = NULL;
2083 struct async_list *async_list;
2084 LIST_HEAD(req_list);
Jens Axboeedafcce2019-01-09 09:16:05 -07002085 mm_segment_t old_fs;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002086 int ret;
2087
Jens Axboe31b51512019-01-18 22:56:34 -07002088 async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
2089restart:
2090 do {
2091 struct sqe_submit *s = &req->submit;
2092 const struct io_uring_sqe *sqe = s->sqe;
Jackie Liud0ee8792019-07-31 14:39:33 +08002093 unsigned int flags = req->flags;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002094
Stefan Bühler8449eed2019-04-27 20:34:19 +02002095 /* Ensure we clear previously set non-block flag */
Jens Axboe31b51512019-01-18 22:56:34 -07002096 req->rw.ki_flags &= ~IOCB_NOWAIT;
2097
2098 ret = 0;
2099 if (io_sqe_needs_user(sqe) && !cur_mm) {
2100 if (!mmget_not_zero(ctx->sqo_mm)) {
2101 ret = -EFAULT;
2102 } else {
2103 cur_mm = ctx->sqo_mm;
2104 use_mm(cur_mm);
2105 old_fs = get_fs();
2106 set_fs(USER_DS);
2107 }
2108 }
2109
2110 if (!ret) {
2111 s->has_user = cur_mm != NULL;
2112 s->needs_lock = true;
2113 do {
Jens Axboe8358e3a2019-04-23 08:17:58 -06002114 ret = __io_submit_sqe(ctx, req, s, false);
Jens Axboe31b51512019-01-18 22:56:34 -07002115 /*
2116 * We can get EAGAIN for polled IO even though
2117 * we're forcing a sync submission from here,
2118 * since we can't wait for request slots on the
2119 * block side.
2120 */
2121 if (ret != -EAGAIN)
2122 break;
2123 cond_resched();
2124 } while (1);
2125 }
Jens Axboe817869d2019-04-30 14:44:05 -06002126
2127 /* drop submission reference */
2128 io_put_req(req);
2129
Jens Axboe31b51512019-01-18 22:56:34 -07002130 if (ret) {
Jens Axboec71ffb62019-05-13 20:58:29 -06002131 io_cqring_add_event(ctx, sqe->user_data, ret);
Jens Axboee65ef562019-03-12 10:16:44 -06002132 io_put_req(req);
Jens Axboe31b51512019-01-18 22:56:34 -07002133 }
2134
2135 /* async context always use a copy of the sqe */
2136 kfree(sqe);
2137
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +08002138 /* req from defer and link list needn't decrease async cnt */
Jackie Liud0ee8792019-07-31 14:39:33 +08002139 if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +08002140 goto out;
2141
Jens Axboe31b51512019-01-18 22:56:34 -07002142 if (!async_list)
2143 break;
2144 if (!list_empty(&req_list)) {
2145 req = list_first_entry(&req_list, struct io_kiocb,
2146 list);
2147 list_del(&req->list);
2148 continue;
2149 }
2150 if (list_empty(&async_list->list))
2151 break;
2152
2153 req = NULL;
2154 spin_lock(&async_list->lock);
2155 if (list_empty(&async_list->list)) {
2156 spin_unlock(&async_list->lock);
2157 break;
2158 }
2159 list_splice_init(&async_list->list, &req_list);
2160 spin_unlock(&async_list->lock);
2161
2162 req = list_first_entry(&req_list, struct io_kiocb, list);
2163 list_del(&req->list);
2164 } while (req);
Jens Axboeedafcce2019-01-09 09:16:05 -07002165
2166 /*
Jens Axboe31b51512019-01-18 22:56:34 -07002167 * Rare case of racing with a submitter. If we find the count has
2168 * dropped to zero AND we have pending work items, then restart
2169 * the processing. This is a tiny race window.
Jens Axboeedafcce2019-01-09 09:16:05 -07002170 */
Jens Axboe31b51512019-01-18 22:56:34 -07002171 if (async_list) {
2172 ret = atomic_dec_return(&async_list->cnt);
2173 while (!ret && !list_empty(&async_list->list)) {
2174 spin_lock(&async_list->lock);
2175 atomic_inc(&async_list->cnt);
2176 list_splice_init(&async_list->list, &req_list);
2177 spin_unlock(&async_list->lock);
2178
2179 if (!list_empty(&req_list)) {
2180 req = list_first_entry(&req_list,
2181 struct io_kiocb, list);
2182 list_del(&req->list);
2183 goto restart;
2184 }
2185 ret = atomic_dec_return(&async_list->cnt);
Jens Axboeedafcce2019-01-09 09:16:05 -07002186 }
Jens Axboeedafcce2019-01-09 09:16:05 -07002187 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002188
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +08002189out:
Jens Axboe31b51512019-01-18 22:56:34 -07002190 if (cur_mm) {
Jens Axboeedafcce2019-01-09 09:16:05 -07002191 set_fs(old_fs);
Jens Axboe31b51512019-01-18 22:56:34 -07002192 unuse_mm(cur_mm);
2193 mmput(cur_mm);
Jens Axboeedafcce2019-01-09 09:16:05 -07002194 }
Jens Axboe31b51512019-01-18 22:56:34 -07002195}
Jens Axboe2b188cc2019-01-07 10:46:33 -07002196
Jens Axboe31b51512019-01-18 22:56:34 -07002197/*
2198 * See if we can piggy back onto previously submitted work, that is still
2199 * running. We currently only allow this if the new request is sequential
2200 * to the previous one we punted.
2201 */
2202static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
2203{
Jens Axboe6d5d5ac2019-09-11 10:16:13 -06002204 bool ret;
Jens Axboe31b51512019-01-18 22:56:34 -07002205
2206 if (!list)
2207 return false;
2208 if (!(req->flags & REQ_F_SEQ_PREV))
2209 return false;
2210 if (!atomic_read(&list->cnt))
2211 return false;
2212
2213 ret = true;
2214 spin_lock(&list->lock);
2215 list_add_tail(&req->list, &list->list);
Zhengyuan Liuc0e48f92019-07-18 20:44:00 +08002216 /*
2217 * Ensure we see a simultaneous modification from io_sq_wq_submit_work()
2218 */
2219 smp_mb();
Jens Axboe31b51512019-01-18 22:56:34 -07002220 if (!atomic_read(&list->cnt)) {
2221 list_del_init(&req->list);
2222 ret = false;
2223 }
2224 spin_unlock(&list->lock);
2225 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002226}
2227
Jens Axboe09bb8392019-03-13 12:39:28 -06002228static bool io_op_needs_file(const struct io_uring_sqe *sqe)
2229{
2230 int op = READ_ONCE(sqe->opcode);
2231
2232 switch (op) {
2233 case IORING_OP_NOP:
2234 case IORING_OP_POLL_REMOVE:
2235 return false;
2236 default:
2237 return true;
2238 }
2239}
2240
2241static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
2242 struct io_submit_state *state, struct io_kiocb *req)
2243{
2244 unsigned flags;
2245 int fd;
2246
2247 flags = READ_ONCE(s->sqe->flags);
2248 fd = READ_ONCE(s->sqe->fd);
2249
Jackie Liu4fe2c962019-09-09 20:50:40 +08002250 if (flags & IOSQE_IO_DRAIN)
Jens Axboede0617e2019-04-06 21:51:27 -06002251 req->flags |= REQ_F_IO_DRAIN;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002252 /*
2253 * All io need record the previous position, if LINK vs DARIN,
2254 * it can be used to mark the position of the first IO in the
2255 * link list.
2256 */
2257 req->sequence = s->sequence;
Jens Axboede0617e2019-04-06 21:51:27 -06002258
Jens Axboe60c112b2019-06-21 10:20:18 -06002259 if (!io_op_needs_file(s->sqe))
Jens Axboe09bb8392019-03-13 12:39:28 -06002260 return 0;
Jens Axboe09bb8392019-03-13 12:39:28 -06002261
2262 if (flags & IOSQE_FIXED_FILE) {
2263 if (unlikely(!ctx->user_files ||
2264 (unsigned) fd >= ctx->nr_user_files))
2265 return -EBADF;
2266 req->file = ctx->user_files[fd];
2267 req->flags |= REQ_F_FIXED_FILE;
2268 } else {
2269 if (s->needs_fixed_file)
2270 return -EBADF;
2271 req->file = io_file_get(state, fd);
2272 if (unlikely(!req->file))
2273 return -EBADF;
2274 }
2275
2276 return 0;
2277}
2278
Jackie Liu4fe2c962019-09-09 20:50:40 +08002279static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Jens Axboec57666682019-09-09 16:19:45 -06002280 struct sqe_submit *s, bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002281{
Jens Axboee0c5c572019-03-12 10:18:47 -06002282 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002283
Jens Axboec57666682019-09-09 16:19:45 -06002284 ret = __io_submit_sqe(ctx, req, s, force_nonblock);
Stefan Bühler8449eed2019-04-27 20:34:19 +02002285 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07002286 struct io_uring_sqe *sqe_copy;
2287
Jackie Liu954dab12019-09-18 10:37:52 +08002288 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002289 if (sqe_copy) {
Jens Axboe31b51512019-01-18 22:56:34 -07002290 struct async_list *list;
2291
Jens Axboe2b188cc2019-01-07 10:46:33 -07002292 s->sqe = sqe_copy;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002293 memcpy(&req->submit, s, sizeof(*s));
Jens Axboe31b51512019-01-18 22:56:34 -07002294 list = io_async_list_from_sqe(ctx, s->sqe);
2295 if (!io_add_to_prev_work(list, req)) {
2296 if (list)
2297 atomic_inc(&list->cnt);
2298 INIT_WORK(&req->work, io_sq_wq_submit_work);
Jens Axboe18d9be12019-09-10 09:13:05 -06002299 io_queue_async_work(ctx, req);
Jens Axboe31b51512019-01-18 22:56:34 -07002300 }
Jens Axboee65ef562019-03-12 10:16:44 -06002301
2302 /*
2303 * Queued up for async execution, worker will release
Jens Axboe9e645e112019-05-10 16:07:28 -06002304 * submit reference when the iocb is actually submitted.
Jens Axboee65ef562019-03-12 10:16:44 -06002305 */
2306 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002307 }
2308 }
Jens Axboee65ef562019-03-12 10:16:44 -06002309
2310 /* drop submission reference */
2311 io_put_req(req);
2312
2313 /* and drop final reference, if we failed */
Jens Axboe9e645e112019-05-10 16:07:28 -06002314 if (ret) {
2315 io_cqring_add_event(ctx, req->user_data, ret);
2316 if (req->flags & REQ_F_LINK)
2317 req->flags |= REQ_F_FAIL_LINK;
Jens Axboee65ef562019-03-12 10:16:44 -06002318 io_put_req(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002319 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002320
2321 return ret;
2322}
2323
Jackie Liu4fe2c962019-09-09 20:50:40 +08002324static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Jens Axboec57666682019-09-09 16:19:45 -06002325 struct sqe_submit *s, bool force_nonblock)
Jackie Liu4fe2c962019-09-09 20:50:40 +08002326{
2327 int ret;
2328
2329 ret = io_req_defer(ctx, req, s->sqe);
2330 if (ret) {
2331 if (ret != -EIOCBQUEUED) {
2332 io_free_req(req);
2333 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2334 }
2335 return 0;
2336 }
2337
Jens Axboec57666682019-09-09 16:19:45 -06002338 return __io_queue_sqe(ctx, req, s, force_nonblock);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002339}
2340
2341static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
Jens Axboec57666682019-09-09 16:19:45 -06002342 struct sqe_submit *s, struct io_kiocb *shadow,
2343 bool force_nonblock)
Jackie Liu4fe2c962019-09-09 20:50:40 +08002344{
2345 int ret;
2346 int need_submit = false;
2347
2348 if (!shadow)
Jens Axboec57666682019-09-09 16:19:45 -06002349 return io_queue_sqe(ctx, req, s, force_nonblock);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002350
2351 /*
2352 * Mark the first IO in link list as DRAIN, let all the following
2353 * IOs enter the defer list. all IO needs to be completed before link
2354 * list.
2355 */
2356 req->flags |= REQ_F_IO_DRAIN;
2357 ret = io_req_defer(ctx, req, s->sqe);
2358 if (ret) {
2359 if (ret != -EIOCBQUEUED) {
2360 io_free_req(req);
2361 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2362 return 0;
2363 }
2364 } else {
2365 /*
2366 * If ret == 0 means that all IOs in front of link io are
2367 * running done. let's queue link head.
2368 */
2369 need_submit = true;
2370 }
2371
2372 /* Insert shadow req to defer_list, blocking next IOs */
2373 spin_lock_irq(&ctx->completion_lock);
2374 list_add_tail(&shadow->list, &ctx->defer_list);
2375 spin_unlock_irq(&ctx->completion_lock);
2376
2377 if (need_submit)
Jens Axboec57666682019-09-09 16:19:45 -06002378 return __io_queue_sqe(ctx, req, s, force_nonblock);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002379
2380 return 0;
2381}
2382
Jens Axboe9e645e112019-05-10 16:07:28 -06002383#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
2384
2385static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
Jens Axboec57666682019-09-09 16:19:45 -06002386 struct io_submit_state *state, struct io_kiocb **link,
2387 bool force_nonblock)
Jens Axboe9e645e112019-05-10 16:07:28 -06002388{
2389 struct io_uring_sqe *sqe_copy;
2390 struct io_kiocb *req;
2391 int ret;
2392
2393 /* enforce forwards compatibility on users */
2394 if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
2395 ret = -EINVAL;
2396 goto err;
2397 }
2398
2399 req = io_get_req(ctx, state);
2400 if (unlikely(!req)) {
2401 ret = -EAGAIN;
2402 goto err;
2403 }
2404
2405 ret = io_req_set_file(ctx, s, state, req);
2406 if (unlikely(ret)) {
2407err_req:
2408 io_free_req(req);
2409err:
2410 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2411 return;
2412 }
2413
Jens Axboe9e645e112019-05-10 16:07:28 -06002414 /*
2415 * If we already have a head request, queue this one for async
2416 * submittal once the head completes. If we don't have a head but
2417 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2418 * submitted sync once the chain is complete. If none of those
2419 * conditions are true (normal request), then just queue it.
2420 */
2421 if (*link) {
2422 struct io_kiocb *prev = *link;
2423
2424 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2425 if (!sqe_copy) {
2426 ret = -EAGAIN;
2427 goto err_req;
2428 }
2429
2430 s->sqe = sqe_copy;
2431 memcpy(&req->submit, s, sizeof(*s));
2432 list_add_tail(&req->list, &prev->link_list);
2433 } else if (s->sqe->flags & IOSQE_IO_LINK) {
2434 req->flags |= REQ_F_LINK;
2435
2436 memcpy(&req->submit, s, sizeof(*s));
2437 INIT_LIST_HEAD(&req->link_list);
2438 *link = req;
2439 } else {
Jens Axboec57666682019-09-09 16:19:45 -06002440 io_queue_sqe(ctx, req, s, force_nonblock);
Jens Axboe9e645e112019-05-10 16:07:28 -06002441 }
2442}
2443
Jens Axboe9a56a232019-01-09 09:06:50 -07002444/*
2445 * Batched submission is done, ensure local IO is flushed out.
2446 */
2447static void io_submit_state_end(struct io_submit_state *state)
2448{
2449 blk_finish_plug(&state->plug);
Jens Axboe3d6770f2019-04-13 11:50:54 -06002450 io_file_put(state);
Jens Axboe2579f912019-01-09 09:10:43 -07002451 if (state->free_reqs)
2452 kmem_cache_free_bulk(req_cachep, state->free_reqs,
2453 &state->reqs[state->cur_req]);
Jens Axboe9a56a232019-01-09 09:06:50 -07002454}
2455
2456/*
2457 * Start submission side cache.
2458 */
2459static void io_submit_state_start(struct io_submit_state *state,
2460 struct io_ring_ctx *ctx, unsigned max_ios)
2461{
2462 blk_start_plug(&state->plug);
Jens Axboe2579f912019-01-09 09:10:43 -07002463 state->free_reqs = 0;
Jens Axboe9a56a232019-01-09 09:06:50 -07002464 state->file = NULL;
2465 state->ios_left = max_ios;
2466}
2467
Jens Axboe2b188cc2019-01-07 10:46:33 -07002468static void io_commit_sqring(struct io_ring_ctx *ctx)
2469{
Hristo Venev75b28af2019-08-26 17:23:46 +00002470 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002471
Hristo Venev75b28af2019-08-26 17:23:46 +00002472 if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07002473 /*
2474 * Ensure any loads from the SQEs are done at this point,
2475 * since once we write the new head, the application could
2476 * write new data to them.
2477 */
Hristo Venev75b28af2019-08-26 17:23:46 +00002478 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002479 }
2480}
2481
2482/*
Jens Axboe2b188cc2019-01-07 10:46:33 -07002483 * Fetch an sqe, if one is available. Note that s->sqe will point to memory
2484 * that is mapped by userspace. This means that care needs to be taken to
2485 * ensure that reads are stable, as we cannot rely on userspace always
2486 * being a good citizen. If members of the sqe are validated and then later
2487 * used, it's important that those reads are done through READ_ONCE() to
2488 * prevent a re-load down the line.
2489 */
2490static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
2491{
Hristo Venev75b28af2019-08-26 17:23:46 +00002492 struct io_rings *rings = ctx->rings;
2493 u32 *sq_array = ctx->sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002494 unsigned head;
2495
2496 /*
2497 * The cached sq head (or cq tail) serves two purposes:
2498 *
2499 * 1) allows us to batch the cost of updating the user visible
2500 * head updates.
2501 * 2) allows the kernel side to track the head on its own, even
2502 * though the application is the one updating it.
2503 */
2504 head = ctx->cached_sq_head;
Stefan Bühlere523a292019-04-19 11:57:44 +02002505 /* make sure SQ entry isn't read before tail */
Hristo Venev75b28af2019-08-26 17:23:46 +00002506 if (head == smp_load_acquire(&rings->sq.tail))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002507 return false;
2508
Hristo Venev75b28af2019-08-26 17:23:46 +00002509 head = READ_ONCE(sq_array[head & ctx->sq_mask]);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002510 if (head < ctx->sq_entries) {
2511 s->index = head;
2512 s->sqe = &ctx->sq_sqes[head];
Jackie Liu8776f3f2019-09-09 20:50:39 +08002513 s->sequence = ctx->cached_sq_head;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002514 ctx->cached_sq_head++;
2515 return true;
2516 }
2517
2518 /* drop invalid entries */
2519 ctx->cached_sq_head++;
Hristo Venev75b28af2019-08-26 17:23:46 +00002520 rings->sq_dropped++;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002521 return false;
2522}
2523
Jens Axboe6c271ce2019-01-10 11:22:30 -07002524static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
2525 unsigned int nr, bool has_user, bool mm_fault)
2526{
2527 struct io_submit_state state, *statep = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002528 struct io_kiocb *link = NULL;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002529 struct io_kiocb *shadow_req = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002530 bool prev_was_link = false;
2531 int i, submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002532
2533 if (nr > IO_PLUG_THRESHOLD) {
2534 io_submit_state_start(&state, ctx, nr);
2535 statep = &state;
2536 }
2537
2538 for (i = 0; i < nr; i++) {
Jens Axboe9e645e112019-05-10 16:07:28 -06002539 /*
2540 * If previous wasn't linked and we have a linked command,
2541 * that's the end of the chain. Submit the previous link.
2542 */
2543 if (!prev_was_link && link) {
Jens Axboec57666682019-09-09 16:19:45 -06002544 io_queue_link_head(ctx, link, &link->submit, shadow_req,
2545 true);
Jens Axboe9e645e112019-05-10 16:07:28 -06002546 link = NULL;
Jackie Liu5f5ad9c2019-09-18 10:37:53 +08002547 shadow_req = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002548 }
2549 prev_was_link = (sqes[i].sqe->flags & IOSQE_IO_LINK) != 0;
2550
Jackie Liu4fe2c962019-09-09 20:50:40 +08002551 if (link && (sqes[i].sqe->flags & IOSQE_IO_DRAIN)) {
2552 if (!shadow_req) {
2553 shadow_req = io_get_req(ctx, NULL);
Jackie Liua1041c22019-09-18 17:25:52 +08002554 if (unlikely(!shadow_req))
2555 goto out;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002556 shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
2557 refcount_dec(&shadow_req->refs);
2558 }
2559 shadow_req->sequence = sqes[i].sequence;
2560 }
2561
Jackie Liua1041c22019-09-18 17:25:52 +08002562out:
Jens Axboe6c271ce2019-01-10 11:22:30 -07002563 if (unlikely(mm_fault)) {
Jens Axboe9e645e112019-05-10 16:07:28 -06002564 io_cqring_add_event(ctx, sqes[i].sqe->user_data,
2565 -EFAULT);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002566 } else {
2567 sqes[i].has_user = has_user;
2568 sqes[i].needs_lock = true;
2569 sqes[i].needs_fixed_file = true;
Jens Axboec57666682019-09-09 16:19:45 -06002570 io_submit_sqe(ctx, &sqes[i], statep, &link, true);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002571 submitted++;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002572 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07002573 }
2574
Jens Axboe9e645e112019-05-10 16:07:28 -06002575 if (link)
Jens Axboec57666682019-09-09 16:19:45 -06002576 io_queue_link_head(ctx, link, &link->submit, shadow_req, true);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002577 if (statep)
2578 io_submit_state_end(&state);
2579
2580 return submitted;
2581}
2582
2583static int io_sq_thread(void *data)
2584{
2585 struct sqe_submit sqes[IO_IOPOLL_BATCH];
2586 struct io_ring_ctx *ctx = data;
2587 struct mm_struct *cur_mm = NULL;
2588 mm_segment_t old_fs;
2589 DEFINE_WAIT(wait);
2590 unsigned inflight;
2591 unsigned long timeout;
2592
Jackie Liua4c0b3d2019-07-08 13:41:12 +08002593 complete(&ctx->sqo_thread_started);
2594
Jens Axboe6c271ce2019-01-10 11:22:30 -07002595 old_fs = get_fs();
2596 set_fs(USER_DS);
2597
2598 timeout = inflight = 0;
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002599 while (!kthread_should_park()) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07002600 bool all_fixed, mm_fault = false;
2601 int i;
2602
2603 if (inflight) {
2604 unsigned nr_events = 0;
2605
2606 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07002607 io_iopoll_check(ctx, &nr_events, 0);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002608 } else {
2609 /*
2610 * Normal IO, just pretend everything completed.
2611 * We don't have to poll completions for that.
2612 */
2613 nr_events = inflight;
2614 }
2615
2616 inflight -= nr_events;
2617 if (!inflight)
2618 timeout = jiffies + ctx->sq_thread_idle;
2619 }
2620
2621 if (!io_get_sqring(ctx, &sqes[0])) {
2622 /*
2623 * We're polling. If we're within the defined idle
2624 * period, then let us spin without work before going
2625 * to sleep.
2626 */
2627 if (inflight || !time_after(jiffies, timeout)) {
Jens Axboe9831a902019-09-19 09:48:55 -06002628 cond_resched();
Jens Axboe6c271ce2019-01-10 11:22:30 -07002629 continue;
2630 }
2631
2632 /*
2633 * Drop cur_mm before scheduling, we can't hold it for
2634 * long periods (or over schedule()). Do this before
2635 * adding ourselves to the waitqueue, as the unuse/drop
2636 * may sleep.
2637 */
2638 if (cur_mm) {
2639 unuse_mm(cur_mm);
2640 mmput(cur_mm);
2641 cur_mm = NULL;
2642 }
2643
2644 prepare_to_wait(&ctx->sqo_wait, &wait,
2645 TASK_INTERRUPTIBLE);
2646
2647 /* Tell userspace we may need a wakeup call */
Hristo Venev75b28af2019-08-26 17:23:46 +00002648 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
Stefan Bühler0d7bae62019-04-19 11:57:45 +02002649 /* make sure to read SQ tail after writing flags */
2650 smp_mb();
Jens Axboe6c271ce2019-01-10 11:22:30 -07002651
2652 if (!io_get_sqring(ctx, &sqes[0])) {
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002653 if (kthread_should_park()) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07002654 finish_wait(&ctx->sqo_wait, &wait);
2655 break;
2656 }
2657 if (signal_pending(current))
2658 flush_signals(current);
2659 schedule();
2660 finish_wait(&ctx->sqo_wait, &wait);
2661
Hristo Venev75b28af2019-08-26 17:23:46 +00002662 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002663 continue;
2664 }
2665 finish_wait(&ctx->sqo_wait, &wait);
2666
Hristo Venev75b28af2019-08-26 17:23:46 +00002667 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002668 }
2669
2670 i = 0;
2671 all_fixed = true;
2672 do {
2673 if (all_fixed && io_sqe_needs_user(sqes[i].sqe))
2674 all_fixed = false;
2675
2676 i++;
2677 if (i == ARRAY_SIZE(sqes))
2678 break;
2679 } while (io_get_sqring(ctx, &sqes[i]));
2680
2681 /* Unless all new commands are FIXED regions, grab mm */
2682 if (!all_fixed && !cur_mm) {
2683 mm_fault = !mmget_not_zero(ctx->sqo_mm);
2684 if (!mm_fault) {
2685 use_mm(ctx->sqo_mm);
2686 cur_mm = ctx->sqo_mm;
2687 }
2688 }
2689
2690 inflight += io_submit_sqes(ctx, sqes, i, cur_mm != NULL,
2691 mm_fault);
2692
2693 /* Commit SQ ring head once we've consumed all SQEs */
2694 io_commit_sqring(ctx);
2695 }
2696
2697 set_fs(old_fs);
2698 if (cur_mm) {
2699 unuse_mm(cur_mm);
2700 mmput(cur_mm);
2701 }
Jens Axboe06058632019-04-13 09:26:03 -06002702
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002703 kthread_parkme();
Jens Axboe06058632019-04-13 09:26:03 -06002704
Jens Axboe6c271ce2019-01-10 11:22:30 -07002705 return 0;
2706}
2707
Jens Axboec57666682019-09-09 16:19:45 -06002708static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
2709 bool block_for_last)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002710{
Jens Axboe9a56a232019-01-09 09:06:50 -07002711 struct io_submit_state state, *statep = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002712 struct io_kiocb *link = NULL;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002713 struct io_kiocb *shadow_req = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002714 bool prev_was_link = false;
Jens Axboe5c8b0b52019-04-30 10:16:07 -06002715 int i, submit = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002716
Jens Axboe9a56a232019-01-09 09:06:50 -07002717 if (to_submit > IO_PLUG_THRESHOLD) {
2718 io_submit_state_start(&state, ctx, to_submit);
2719 statep = &state;
2720 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002721
2722 for (i = 0; i < to_submit; i++) {
Jens Axboec57666682019-09-09 16:19:45 -06002723 bool force_nonblock = true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002724 struct sqe_submit s;
2725
2726 if (!io_get_sqring(ctx, &s))
2727 break;
2728
Jens Axboe9e645e112019-05-10 16:07:28 -06002729 /*
2730 * If previous wasn't linked and we have a linked command,
2731 * that's the end of the chain. Submit the previous link.
2732 */
2733 if (!prev_was_link && link) {
Jens Axboec57666682019-09-09 16:19:45 -06002734 io_queue_link_head(ctx, link, &link->submit, shadow_req,
2735 force_nonblock);
Jens Axboe9e645e112019-05-10 16:07:28 -06002736 link = NULL;
Jackie Liu5f5ad9c2019-09-18 10:37:53 +08002737 shadow_req = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002738 }
2739 prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
2740
Jackie Liu4fe2c962019-09-09 20:50:40 +08002741 if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
2742 if (!shadow_req) {
2743 shadow_req = io_get_req(ctx, NULL);
Jackie Liua1041c22019-09-18 17:25:52 +08002744 if (unlikely(!shadow_req))
2745 goto out;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002746 shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
2747 refcount_dec(&shadow_req->refs);
2748 }
2749 shadow_req->sequence = s.sequence;
2750 }
2751
Jackie Liua1041c22019-09-18 17:25:52 +08002752out:
Jens Axboe2b188cc2019-01-07 10:46:33 -07002753 s.has_user = true;
Jens Axboedef596e2019-01-09 08:59:42 -07002754 s.needs_lock = false;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002755 s.needs_fixed_file = false;
Jens Axboe5c8b0b52019-04-30 10:16:07 -06002756 submit++;
Jens Axboec57666682019-09-09 16:19:45 -06002757
2758 /*
2759 * The caller will block for events after submit, submit the
2760 * last IO non-blocking. This is either the only IO it's
2761 * submitting, or it already submitted the previous ones. This
2762 * improves performance by avoiding an async punt that we don't
2763 * need to do.
2764 */
2765 if (block_for_last && submit == to_submit)
2766 force_nonblock = false;
2767
2768 io_submit_sqe(ctx, &s, statep, &link, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002769 }
2770 io_commit_sqring(ctx);
2771
Jens Axboe9e645e112019-05-10 16:07:28 -06002772 if (link)
Jens Axboec57666682019-09-09 16:19:45 -06002773 io_queue_link_head(ctx, link, &link->submit, shadow_req,
Pavel Begunkovbf7ec932019-10-04 17:01:08 +03002774 !block_for_last);
Jens Axboe9a56a232019-01-09 09:06:50 -07002775 if (statep)
2776 io_submit_state_end(statep);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002777
Jens Axboe5c8b0b52019-04-30 10:16:07 -06002778 return submit;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002779}
2780
Jens Axboe2b188cc2019-01-07 10:46:33 -07002781/*
2782 * Wait until events become available, if we don't already have some. The
2783 * application must reap them itself, as they reside on the shared cq ring.
2784 */
2785static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
2786 const sigset_t __user *sig, size_t sigsz)
2787{
Hristo Venev75b28af2019-08-26 17:23:46 +00002788 struct io_rings *rings = ctx->rings;
Jens Axboe5262f562019-09-17 12:26:57 -06002789 unsigned nr_timeouts;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002790 int ret;
2791
Hristo Venev75b28af2019-08-26 17:23:46 +00002792 if (io_cqring_events(rings) >= min_events)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002793 return 0;
2794
2795 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01002796#ifdef CONFIG_COMPAT
2797 if (in_compat_syscall())
2798 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07002799 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01002800 else
2801#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07002802 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01002803
Jens Axboe2b188cc2019-01-07 10:46:33 -07002804 if (ret)
2805 return ret;
2806 }
2807
Jens Axboe5262f562019-09-17 12:26:57 -06002808 nr_timeouts = atomic_read(&ctx->cq_timeouts);
2809 /*
2810 * Return if we have enough events, or if a timeout occured since
2811 * we started waiting. For timeouts, we always want to return to
2812 * userspace.
2813 */
2814 ret = wait_event_interruptible(ctx->wait,
2815 io_cqring_events(rings) >= min_events ||
2816 atomic_read(&ctx->cq_timeouts) != nr_timeouts);
Oleg Nesterovb7724342019-07-16 16:29:53 -07002817 restore_saved_sigmask_unless(ret == -ERESTARTSYS);
Oleg Nesterov97abc882019-06-28 12:06:50 -07002818 if (ret == -ERESTARTSYS)
2819 ret = -EINTR;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002820
Hristo Venev75b28af2019-08-26 17:23:46 +00002821 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002822}
2823
Jens Axboe6b063142019-01-10 22:13:58 -07002824static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
2825{
2826#if defined(CONFIG_UNIX)
2827 if (ctx->ring_sock) {
2828 struct sock *sock = ctx->ring_sock->sk;
2829 struct sk_buff *skb;
2830
2831 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
2832 kfree_skb(skb);
2833 }
2834#else
2835 int i;
2836
2837 for (i = 0; i < ctx->nr_user_files; i++)
2838 fput(ctx->user_files[i]);
2839#endif
2840}
2841
2842static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
2843{
2844 if (!ctx->user_files)
2845 return -ENXIO;
2846
2847 __io_sqe_files_unregister(ctx);
2848 kfree(ctx->user_files);
2849 ctx->user_files = NULL;
2850 ctx->nr_user_files = 0;
2851 return 0;
2852}
2853
Jens Axboe6c271ce2019-01-10 11:22:30 -07002854static void io_sq_thread_stop(struct io_ring_ctx *ctx)
2855{
2856 if (ctx->sqo_thread) {
Jackie Liua4c0b3d2019-07-08 13:41:12 +08002857 wait_for_completion(&ctx->sqo_thread_started);
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002858 /*
2859 * The park is a bit of a work-around, without it we get
2860 * warning spews on shutdown with SQPOLL set and affinity
2861 * set to a single CPU.
2862 */
Jens Axboe06058632019-04-13 09:26:03 -06002863 kthread_park(ctx->sqo_thread);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002864 kthread_stop(ctx->sqo_thread);
2865 ctx->sqo_thread = NULL;
2866 }
2867}
2868
Jens Axboe6b063142019-01-10 22:13:58 -07002869static void io_finish_async(struct io_ring_ctx *ctx)
2870{
Jens Axboe54a91f32019-09-10 09:15:04 -06002871 int i;
2872
Jens Axboe6c271ce2019-01-10 11:22:30 -07002873 io_sq_thread_stop(ctx);
2874
Jens Axboe54a91f32019-09-10 09:15:04 -06002875 for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++) {
2876 if (ctx->sqo_wq[i]) {
2877 destroy_workqueue(ctx->sqo_wq[i]);
2878 ctx->sqo_wq[i] = NULL;
2879 }
Jens Axboe6b063142019-01-10 22:13:58 -07002880 }
2881}
2882
2883#if defined(CONFIG_UNIX)
2884static void io_destruct_skb(struct sk_buff *skb)
2885{
2886 struct io_ring_ctx *ctx = skb->sk->sk_user_data;
Jens Axboe8a997342019-10-09 14:40:13 -06002887 int i;
Jens Axboe6b063142019-01-10 22:13:58 -07002888
Jens Axboe8a997342019-10-09 14:40:13 -06002889 for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++)
2890 if (ctx->sqo_wq[i])
2891 flush_workqueue(ctx->sqo_wq[i]);
2892
Jens Axboe6b063142019-01-10 22:13:58 -07002893 unix_destruct_scm(skb);
2894}
2895
2896/*
2897 * Ensure the UNIX gc is aware of our file set, so we are certain that
2898 * the io_uring can be safely unregistered on process exit, even if we have
2899 * loops in the file referencing.
2900 */
2901static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
2902{
2903 struct sock *sk = ctx->ring_sock->sk;
2904 struct scm_fp_list *fpl;
2905 struct sk_buff *skb;
2906 int i;
2907
2908 if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
2909 unsigned long inflight = ctx->user->unix_inflight + nr;
2910
2911 if (inflight > task_rlimit(current, RLIMIT_NOFILE))
2912 return -EMFILE;
2913 }
2914
2915 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
2916 if (!fpl)
2917 return -ENOMEM;
2918
2919 skb = alloc_skb(0, GFP_KERNEL);
2920 if (!skb) {
2921 kfree(fpl);
2922 return -ENOMEM;
2923 }
2924
2925 skb->sk = sk;
2926 skb->destructor = io_destruct_skb;
2927
2928 fpl->user = get_uid(ctx->user);
2929 for (i = 0; i < nr; i++) {
2930 fpl->fp[i] = get_file(ctx->user_files[i + offset]);
2931 unix_inflight(fpl->user, fpl->fp[i]);
2932 }
2933
2934 fpl->max = fpl->count = nr;
2935 UNIXCB(skb).fp = fpl;
2936 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
2937 skb_queue_head(&sk->sk_receive_queue, skb);
2938
2939 for (i = 0; i < nr; i++)
2940 fput(fpl->fp[i]);
2941
2942 return 0;
2943}
2944
2945/*
2946 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
2947 * causes regular reference counting to break down. We rely on the UNIX
2948 * garbage collection to take care of this problem for us.
2949 */
2950static int io_sqe_files_scm(struct io_ring_ctx *ctx)
2951{
2952 unsigned left, total;
2953 int ret = 0;
2954
2955 total = 0;
2956 left = ctx->nr_user_files;
2957 while (left) {
2958 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07002959
2960 ret = __io_sqe_files_scm(ctx, this_files, total);
2961 if (ret)
2962 break;
2963 left -= this_files;
2964 total += this_files;
2965 }
2966
2967 if (!ret)
2968 return 0;
2969
2970 while (total < ctx->nr_user_files) {
2971 fput(ctx->user_files[total]);
2972 total++;
2973 }
2974
2975 return ret;
2976}
2977#else
2978static int io_sqe_files_scm(struct io_ring_ctx *ctx)
2979{
2980 return 0;
2981}
2982#endif
2983
2984static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
2985 unsigned nr_args)
2986{
2987 __s32 __user *fds = (__s32 __user *) arg;
2988 int fd, ret = 0;
2989 unsigned i;
2990
2991 if (ctx->user_files)
2992 return -EBUSY;
2993 if (!nr_args)
2994 return -EINVAL;
2995 if (nr_args > IORING_MAX_FIXED_FILES)
2996 return -EMFILE;
2997
2998 ctx->user_files = kcalloc(nr_args, sizeof(struct file *), GFP_KERNEL);
2999 if (!ctx->user_files)
3000 return -ENOMEM;
3001
3002 for (i = 0; i < nr_args; i++) {
3003 ret = -EFAULT;
3004 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
3005 break;
3006
3007 ctx->user_files[i] = fget(fd);
3008
3009 ret = -EBADF;
3010 if (!ctx->user_files[i])
3011 break;
3012 /*
3013 * Don't allow io_uring instances to be registered. If UNIX
3014 * isn't enabled, then this causes a reference cycle and this
3015 * instance can never get freed. If UNIX is enabled we'll
3016 * handle it just fine, but there's still no point in allowing
3017 * a ring fd as it doesn't support regular read/write anyway.
3018 */
3019 if (ctx->user_files[i]->f_op == &io_uring_fops) {
3020 fput(ctx->user_files[i]);
3021 break;
3022 }
3023 ctx->nr_user_files++;
3024 ret = 0;
3025 }
3026
3027 if (ret) {
3028 for (i = 0; i < ctx->nr_user_files; i++)
3029 fput(ctx->user_files[i]);
3030
3031 kfree(ctx->user_files);
Jens Axboe25adf502019-04-03 09:52:40 -06003032 ctx->user_files = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07003033 ctx->nr_user_files = 0;
3034 return ret;
3035 }
3036
3037 ret = io_sqe_files_scm(ctx);
3038 if (ret)
3039 io_sqe_files_unregister(ctx);
3040
3041 return ret;
3042}
3043
Jens Axboe6c271ce2019-01-10 11:22:30 -07003044static int io_sq_offload_start(struct io_ring_ctx *ctx,
3045 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003046{
3047 int ret;
3048
Jens Axboe6c271ce2019-01-10 11:22:30 -07003049 init_waitqueue_head(&ctx->sqo_wait);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003050 mmgrab(current->mm);
3051 ctx->sqo_mm = current->mm;
3052
Jens Axboe6c271ce2019-01-10 11:22:30 -07003053 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe3ec482d2019-04-08 10:51:01 -06003054 ret = -EPERM;
3055 if (!capable(CAP_SYS_ADMIN))
3056 goto err;
3057
Jens Axboe917257d2019-04-13 09:28:55 -06003058 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
3059 if (!ctx->sq_thread_idle)
3060 ctx->sq_thread_idle = HZ;
3061
Jens Axboe6c271ce2019-01-10 11:22:30 -07003062 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06003063 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07003064
Jens Axboe917257d2019-04-13 09:28:55 -06003065 ret = -EINVAL;
Jens Axboe44a9bd12019-05-14 20:00:30 -06003066 if (cpu >= nr_cpu_ids)
3067 goto err;
Shenghui Wang7889f442019-05-07 16:03:19 +08003068 if (!cpu_online(cpu))
Jens Axboe917257d2019-04-13 09:28:55 -06003069 goto err;
3070
Jens Axboe6c271ce2019-01-10 11:22:30 -07003071 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
3072 ctx, cpu,
3073 "io_uring-sq");
3074 } else {
3075 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
3076 "io_uring-sq");
3077 }
3078 if (IS_ERR(ctx->sqo_thread)) {
3079 ret = PTR_ERR(ctx->sqo_thread);
3080 ctx->sqo_thread = NULL;
3081 goto err;
3082 }
3083 wake_up_process(ctx->sqo_thread);
3084 } else if (p->flags & IORING_SETUP_SQ_AFF) {
3085 /* Can't have SQ_AFF without SQPOLL */
3086 ret = -EINVAL;
3087 goto err;
3088 }
3089
Jens Axboe2b188cc2019-01-07 10:46:33 -07003090 /* Do QD, or 2 * CPUS, whatever is smallest */
Jens Axboe54a91f32019-09-10 09:15:04 -06003091 ctx->sqo_wq[0] = alloc_workqueue("io_ring-wq",
3092 WQ_UNBOUND | WQ_FREEZABLE,
Jens Axboe2b188cc2019-01-07 10:46:33 -07003093 min(ctx->sq_entries - 1, 2 * num_online_cpus()));
Jens Axboe54a91f32019-09-10 09:15:04 -06003094 if (!ctx->sqo_wq[0]) {
3095 ret = -ENOMEM;
3096 goto err;
3097 }
3098
3099 /*
3100 * This is for buffered writes, where we want to limit the parallelism
3101 * due to file locking in file systems. As "normal" buffered writes
3102 * should parellelize on writeout quite nicely, limit us to having 2
3103 * pending. This avoids massive contention on the inode when doing
3104 * buffered async writes.
3105 */
3106 ctx->sqo_wq[1] = alloc_workqueue("io_ring-write-wq",
3107 WQ_UNBOUND | WQ_FREEZABLE, 2);
3108 if (!ctx->sqo_wq[1]) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07003109 ret = -ENOMEM;
3110 goto err;
3111 }
3112
3113 return 0;
3114err:
Jens Axboe54a91f32019-09-10 09:15:04 -06003115 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003116 mmdrop(ctx->sqo_mm);
3117 ctx->sqo_mm = NULL;
3118 return ret;
3119}
3120
3121static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
3122{
3123 atomic_long_sub(nr_pages, &user->locked_vm);
3124}
3125
3126static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
3127{
3128 unsigned long page_limit, cur_pages, new_pages;
3129
3130 /* Don't allow more pages than we can safely lock */
3131 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
3132
3133 do {
3134 cur_pages = atomic_long_read(&user->locked_vm);
3135 new_pages = cur_pages + nr_pages;
3136 if (new_pages > page_limit)
3137 return -ENOMEM;
3138 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
3139 new_pages) != cur_pages);
3140
3141 return 0;
3142}
3143
3144static void io_mem_free(void *ptr)
3145{
Mark Rutland52e04ef2019-04-30 17:30:21 +01003146 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003147
Mark Rutland52e04ef2019-04-30 17:30:21 +01003148 if (!ptr)
3149 return;
3150
3151 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003152 if (put_page_testzero(page))
3153 free_compound_page(page);
3154}
3155
3156static void *io_mem_alloc(size_t size)
3157{
3158 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
3159 __GFP_NORETRY;
3160
3161 return (void *) __get_free_pages(gfp_flags, get_order(size));
3162}
3163
Hristo Venev75b28af2019-08-26 17:23:46 +00003164static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
3165 size_t *sq_offset)
3166{
3167 struct io_rings *rings;
3168 size_t off, sq_array_size;
3169
3170 off = struct_size(rings, cqes, cq_entries);
3171 if (off == SIZE_MAX)
3172 return SIZE_MAX;
3173
3174#ifdef CONFIG_SMP
3175 off = ALIGN(off, SMP_CACHE_BYTES);
3176 if (off == 0)
3177 return SIZE_MAX;
3178#endif
3179
3180 sq_array_size = array_size(sizeof(u32), sq_entries);
3181 if (sq_array_size == SIZE_MAX)
3182 return SIZE_MAX;
3183
3184 if (check_add_overflow(off, sq_array_size, &off))
3185 return SIZE_MAX;
3186
3187 if (sq_offset)
3188 *sq_offset = off;
3189
3190 return off;
3191}
3192
Jens Axboe2b188cc2019-01-07 10:46:33 -07003193static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
3194{
Hristo Venev75b28af2019-08-26 17:23:46 +00003195 size_t pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003196
Hristo Venev75b28af2019-08-26 17:23:46 +00003197 pages = (size_t)1 << get_order(
3198 rings_size(sq_entries, cq_entries, NULL));
3199 pages += (size_t)1 << get_order(
3200 array_size(sizeof(struct io_uring_sqe), sq_entries));
Jens Axboe2b188cc2019-01-07 10:46:33 -07003201
Hristo Venev75b28af2019-08-26 17:23:46 +00003202 return pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003203}
3204
Jens Axboeedafcce2019-01-09 09:16:05 -07003205static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
3206{
3207 int i, j;
3208
3209 if (!ctx->user_bufs)
3210 return -ENXIO;
3211
3212 for (i = 0; i < ctx->nr_user_bufs; i++) {
3213 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3214
3215 for (j = 0; j < imu->nr_bvecs; j++)
John Hubbard27c4d3a2019-08-04 19:32:06 -07003216 put_user_page(imu->bvec[j].bv_page);
Jens Axboeedafcce2019-01-09 09:16:05 -07003217
3218 if (ctx->account_mem)
3219 io_unaccount_mem(ctx->user, imu->nr_bvecs);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003220 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07003221 imu->nr_bvecs = 0;
3222 }
3223
3224 kfree(ctx->user_bufs);
3225 ctx->user_bufs = NULL;
3226 ctx->nr_user_bufs = 0;
3227 return 0;
3228}
3229
3230static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
3231 void __user *arg, unsigned index)
3232{
3233 struct iovec __user *src;
3234
3235#ifdef CONFIG_COMPAT
3236 if (ctx->compat) {
3237 struct compat_iovec __user *ciovs;
3238 struct compat_iovec ciov;
3239
3240 ciovs = (struct compat_iovec __user *) arg;
3241 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
3242 return -EFAULT;
3243
3244 dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
3245 dst->iov_len = ciov.iov_len;
3246 return 0;
3247 }
3248#endif
3249 src = (struct iovec __user *) arg;
3250 if (copy_from_user(dst, &src[index], sizeof(*dst)))
3251 return -EFAULT;
3252 return 0;
3253}
3254
3255static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
3256 unsigned nr_args)
3257{
3258 struct vm_area_struct **vmas = NULL;
3259 struct page **pages = NULL;
3260 int i, j, got_pages = 0;
3261 int ret = -EINVAL;
3262
3263 if (ctx->user_bufs)
3264 return -EBUSY;
3265 if (!nr_args || nr_args > UIO_MAXIOV)
3266 return -EINVAL;
3267
3268 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
3269 GFP_KERNEL);
3270 if (!ctx->user_bufs)
3271 return -ENOMEM;
3272
3273 for (i = 0; i < nr_args; i++) {
3274 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3275 unsigned long off, start, end, ubuf;
3276 int pret, nr_pages;
3277 struct iovec iov;
3278 size_t size;
3279
3280 ret = io_copy_iov(ctx, &iov, arg, i);
3281 if (ret)
Pavel Begunkova2786822019-05-26 12:35:47 +03003282 goto err;
Jens Axboeedafcce2019-01-09 09:16:05 -07003283
3284 /*
3285 * Don't impose further limits on the size and buffer
3286 * constraints here, we'll -EINVAL later when IO is
3287 * submitted if they are wrong.
3288 */
3289 ret = -EFAULT;
3290 if (!iov.iov_base || !iov.iov_len)
3291 goto err;
3292
3293 /* arbitrary limit, but we need something */
3294 if (iov.iov_len > SZ_1G)
3295 goto err;
3296
3297 ubuf = (unsigned long) iov.iov_base;
3298 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3299 start = ubuf >> PAGE_SHIFT;
3300 nr_pages = end - start;
3301
3302 if (ctx->account_mem) {
3303 ret = io_account_mem(ctx->user, nr_pages);
3304 if (ret)
3305 goto err;
3306 }
3307
3308 ret = 0;
3309 if (!pages || nr_pages > got_pages) {
3310 kfree(vmas);
3311 kfree(pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003312 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
Jens Axboeedafcce2019-01-09 09:16:05 -07003313 GFP_KERNEL);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003314 vmas = kvmalloc_array(nr_pages,
Jens Axboeedafcce2019-01-09 09:16:05 -07003315 sizeof(struct vm_area_struct *),
3316 GFP_KERNEL);
3317 if (!pages || !vmas) {
3318 ret = -ENOMEM;
3319 if (ctx->account_mem)
3320 io_unaccount_mem(ctx->user, nr_pages);
3321 goto err;
3322 }
3323 got_pages = nr_pages;
3324 }
3325
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003326 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
Jens Axboeedafcce2019-01-09 09:16:05 -07003327 GFP_KERNEL);
3328 ret = -ENOMEM;
3329 if (!imu->bvec) {
3330 if (ctx->account_mem)
3331 io_unaccount_mem(ctx->user, nr_pages);
3332 goto err;
3333 }
3334
3335 ret = 0;
3336 down_read(&current->mm->mmap_sem);
Ira Weiny932f4a62019-05-13 17:17:03 -07003337 pret = get_user_pages(ubuf, nr_pages,
3338 FOLL_WRITE | FOLL_LONGTERM,
3339 pages, vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07003340 if (pret == nr_pages) {
3341 /* don't support file backed memory */
3342 for (j = 0; j < nr_pages; j++) {
3343 struct vm_area_struct *vma = vmas[j];
3344
3345 if (vma->vm_file &&
3346 !is_file_hugepages(vma->vm_file)) {
3347 ret = -EOPNOTSUPP;
3348 break;
3349 }
3350 }
3351 } else {
3352 ret = pret < 0 ? pret : -EFAULT;
3353 }
3354 up_read(&current->mm->mmap_sem);
3355 if (ret) {
3356 /*
3357 * if we did partial map, or found file backed vmas,
3358 * release any pages we did get
3359 */
John Hubbard27c4d3a2019-08-04 19:32:06 -07003360 if (pret > 0)
3361 put_user_pages(pages, pret);
Jens Axboeedafcce2019-01-09 09:16:05 -07003362 if (ctx->account_mem)
3363 io_unaccount_mem(ctx->user, nr_pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003364 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07003365 goto err;
3366 }
3367
3368 off = ubuf & ~PAGE_MASK;
3369 size = iov.iov_len;
3370 for (j = 0; j < nr_pages; j++) {
3371 size_t vec_len;
3372
3373 vec_len = min_t(size_t, size, PAGE_SIZE - off);
3374 imu->bvec[j].bv_page = pages[j];
3375 imu->bvec[j].bv_len = vec_len;
3376 imu->bvec[j].bv_offset = off;
3377 off = 0;
3378 size -= vec_len;
3379 }
3380 /* store original address for later verification */
3381 imu->ubuf = ubuf;
3382 imu->len = iov.iov_len;
3383 imu->nr_bvecs = nr_pages;
3384
3385 ctx->nr_user_bufs++;
3386 }
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003387 kvfree(pages);
3388 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07003389 return 0;
3390err:
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003391 kvfree(pages);
3392 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07003393 io_sqe_buffer_unregister(ctx);
3394 return ret;
3395}
3396
Jens Axboe9b402842019-04-11 11:45:41 -06003397static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
3398{
3399 __s32 __user *fds = arg;
3400 int fd;
3401
3402 if (ctx->cq_ev_fd)
3403 return -EBUSY;
3404
3405 if (copy_from_user(&fd, fds, sizeof(*fds)))
3406 return -EFAULT;
3407
3408 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
3409 if (IS_ERR(ctx->cq_ev_fd)) {
3410 int ret = PTR_ERR(ctx->cq_ev_fd);
3411 ctx->cq_ev_fd = NULL;
3412 return ret;
3413 }
3414
3415 return 0;
3416}
3417
3418static int io_eventfd_unregister(struct io_ring_ctx *ctx)
3419{
3420 if (ctx->cq_ev_fd) {
3421 eventfd_ctx_put(ctx->cq_ev_fd);
3422 ctx->cq_ev_fd = NULL;
3423 return 0;
3424 }
3425
3426 return -ENXIO;
3427}
3428
Jens Axboe2b188cc2019-01-07 10:46:33 -07003429static void io_ring_ctx_free(struct io_ring_ctx *ctx)
3430{
Jens Axboe6b063142019-01-10 22:13:58 -07003431 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003432 if (ctx->sqo_mm)
3433 mmdrop(ctx->sqo_mm);
Jens Axboedef596e2019-01-09 08:59:42 -07003434
3435 io_iopoll_reap_events(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -07003436 io_sqe_buffer_unregister(ctx);
Jens Axboe6b063142019-01-10 22:13:58 -07003437 io_sqe_files_unregister(ctx);
Jens Axboe9b402842019-04-11 11:45:41 -06003438 io_eventfd_unregister(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -07003439
Jens Axboe2b188cc2019-01-07 10:46:33 -07003440#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07003441 if (ctx->ring_sock) {
3442 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07003443 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07003444 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003445#endif
3446
Hristo Venev75b28af2019-08-26 17:23:46 +00003447 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003448 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003449
3450 percpu_ref_exit(&ctx->refs);
3451 if (ctx->account_mem)
3452 io_unaccount_mem(ctx->user,
3453 ring_pages(ctx->sq_entries, ctx->cq_entries));
3454 free_uid(ctx->user);
3455 kfree(ctx);
3456}
3457
3458static __poll_t io_uring_poll(struct file *file, poll_table *wait)
3459{
3460 struct io_ring_ctx *ctx = file->private_data;
3461 __poll_t mask = 0;
3462
3463 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02003464 /*
3465 * synchronizes with barrier from wq_has_sleeper call in
3466 * io_commit_cqring
3467 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07003468 smp_rmb();
Hristo Venev75b28af2019-08-26 17:23:46 +00003469 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
3470 ctx->rings->sq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003471 mask |= EPOLLOUT | EPOLLWRNORM;
Hristo Venev75b28af2019-08-26 17:23:46 +00003472 if (READ_ONCE(ctx->rings->sq.head) != ctx->cached_cq_tail)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003473 mask |= EPOLLIN | EPOLLRDNORM;
3474
3475 return mask;
3476}
3477
3478static int io_uring_fasync(int fd, struct file *file, int on)
3479{
3480 struct io_ring_ctx *ctx = file->private_data;
3481
3482 return fasync_helper(fd, file, on, &ctx->cq_fasync);
3483}
3484
3485static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
3486{
3487 mutex_lock(&ctx->uring_lock);
3488 percpu_ref_kill(&ctx->refs);
3489 mutex_unlock(&ctx->uring_lock);
3490
Jens Axboe5262f562019-09-17 12:26:57 -06003491 io_kill_timeouts(ctx);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003492 io_poll_remove_all(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -07003493 io_iopoll_reap_events(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003494 wait_for_completion(&ctx->ctx_done);
3495 io_ring_ctx_free(ctx);
3496}
3497
3498static int io_uring_release(struct inode *inode, struct file *file)
3499{
3500 struct io_ring_ctx *ctx = file->private_data;
3501
3502 file->private_data = NULL;
3503 io_ring_ctx_wait_and_kill(ctx);
3504 return 0;
3505}
3506
3507static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3508{
3509 loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
3510 unsigned long sz = vma->vm_end - vma->vm_start;
3511 struct io_ring_ctx *ctx = file->private_data;
3512 unsigned long pfn;
3513 struct page *page;
3514 void *ptr;
3515
3516 switch (offset) {
3517 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00003518 case IORING_OFF_CQ_RING:
3519 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003520 break;
3521 case IORING_OFF_SQES:
3522 ptr = ctx->sq_sqes;
3523 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003524 default:
3525 return -EINVAL;
3526 }
3527
3528 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07003529 if (sz > page_size(page))
Jens Axboe2b188cc2019-01-07 10:46:33 -07003530 return -EINVAL;
3531
3532 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
3533 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
3534}
3535
3536SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3537 u32, min_complete, u32, flags, const sigset_t __user *, sig,
3538 size_t, sigsz)
3539{
3540 struct io_ring_ctx *ctx;
3541 long ret = -EBADF;
3542 int submitted = 0;
3543 struct fd f;
3544
Jens Axboe6c271ce2019-01-10 11:22:30 -07003545 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
Jens Axboe2b188cc2019-01-07 10:46:33 -07003546 return -EINVAL;
3547
3548 f = fdget(fd);
3549 if (!f.file)
3550 return -EBADF;
3551
3552 ret = -EOPNOTSUPP;
3553 if (f.file->f_op != &io_uring_fops)
3554 goto out_fput;
3555
3556 ret = -ENXIO;
3557 ctx = f.file->private_data;
3558 if (!percpu_ref_tryget(&ctx->refs))
3559 goto out_fput;
3560
Jens Axboe6c271ce2019-01-10 11:22:30 -07003561 /*
3562 * For SQ polling, the thread will do all submissions and completions.
3563 * Just return the requested submit count, and wake the thread if
3564 * we were asked to.
3565 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06003566 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07003567 if (ctx->flags & IORING_SETUP_SQPOLL) {
3568 if (flags & IORING_ENTER_SQ_WAKEUP)
3569 wake_up(&ctx->sqo_wait);
3570 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06003571 } else if (to_submit) {
Jens Axboec57666682019-09-09 16:19:45 -06003572 bool block_for_last = false;
3573
Jens Axboe2b188cc2019-01-07 10:46:33 -07003574 to_submit = min(to_submit, ctx->sq_entries);
3575
Jens Axboec57666682019-09-09 16:19:45 -06003576 /*
3577 * Allow last submission to block in a series, IFF the caller
3578 * asked to wait for events and we don't currently have
3579 * enough. This potentially avoids an async punt.
3580 */
3581 if (to_submit == min_complete &&
3582 io_cqring_events(ctx->rings) < min_complete)
3583 block_for_last = true;
3584
Jens Axboe2b188cc2019-01-07 10:46:33 -07003585 mutex_lock(&ctx->uring_lock);
Jens Axboec57666682019-09-09 16:19:45 -06003586 submitted = io_ring_submit(ctx, to_submit, block_for_last);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003587 mutex_unlock(&ctx->uring_lock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003588 }
3589 if (flags & IORING_ENTER_GETEVENTS) {
Jens Axboedef596e2019-01-09 08:59:42 -07003590 unsigned nr_events = 0;
3591
Jens Axboe2b188cc2019-01-07 10:46:33 -07003592 min_complete = min(min_complete, ctx->cq_entries);
3593
Jens Axboedef596e2019-01-09 08:59:42 -07003594 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07003595 ret = io_iopoll_check(ctx, &nr_events, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07003596 } else {
3597 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
3598 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003599 }
3600
Pavel Begunkov6805b322019-10-08 02:18:42 +03003601 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003602out_fput:
3603 fdput(f);
3604 return submitted ? submitted : ret;
3605}
3606
3607static const struct file_operations io_uring_fops = {
3608 .release = io_uring_release,
3609 .mmap = io_uring_mmap,
3610 .poll = io_uring_poll,
3611 .fasync = io_uring_fasync,
3612};
3613
3614static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3615 struct io_uring_params *p)
3616{
Hristo Venev75b28af2019-08-26 17:23:46 +00003617 struct io_rings *rings;
3618 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003619
Hristo Venev75b28af2019-08-26 17:23:46 +00003620 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
3621 if (size == SIZE_MAX)
3622 return -EOVERFLOW;
3623
3624 rings = io_mem_alloc(size);
3625 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003626 return -ENOMEM;
3627
Hristo Venev75b28af2019-08-26 17:23:46 +00003628 ctx->rings = rings;
3629 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
3630 rings->sq_ring_mask = p->sq_entries - 1;
3631 rings->cq_ring_mask = p->cq_entries - 1;
3632 rings->sq_ring_entries = p->sq_entries;
3633 rings->cq_ring_entries = p->cq_entries;
3634 ctx->sq_mask = rings->sq_ring_mask;
3635 ctx->cq_mask = rings->cq_ring_mask;
3636 ctx->sq_entries = rings->sq_ring_entries;
3637 ctx->cq_entries = rings->cq_ring_entries;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003638
3639 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
3640 if (size == SIZE_MAX)
3641 return -EOVERFLOW;
3642
3643 ctx->sq_sqes = io_mem_alloc(size);
Mark Rutland52e04ef2019-04-30 17:30:21 +01003644 if (!ctx->sq_sqes)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003645 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003646
Jens Axboe2b188cc2019-01-07 10:46:33 -07003647 return 0;
3648}
3649
3650/*
3651 * Allocate an anonymous fd, this is what constitutes the application
3652 * visible backing of an io_uring instance. The application mmaps this
3653 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
3654 * we have to tie this fd to a socket for file garbage collection purposes.
3655 */
3656static int io_uring_get_fd(struct io_ring_ctx *ctx)
3657{
3658 struct file *file;
3659 int ret;
3660
3661#if defined(CONFIG_UNIX)
3662 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
3663 &ctx->ring_sock);
3664 if (ret)
3665 return ret;
3666#endif
3667
3668 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
3669 if (ret < 0)
3670 goto err;
3671
3672 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
3673 O_RDWR | O_CLOEXEC);
3674 if (IS_ERR(file)) {
3675 put_unused_fd(ret);
3676 ret = PTR_ERR(file);
3677 goto err;
3678 }
3679
3680#if defined(CONFIG_UNIX)
3681 ctx->ring_sock->file = file;
Jens Axboe6b063142019-01-10 22:13:58 -07003682 ctx->ring_sock->sk->sk_user_data = ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003683#endif
3684 fd_install(ret, file);
3685 return ret;
3686err:
3687#if defined(CONFIG_UNIX)
3688 sock_release(ctx->ring_sock);
3689 ctx->ring_sock = NULL;
3690#endif
3691 return ret;
3692}
3693
3694static int io_uring_create(unsigned entries, struct io_uring_params *p)
3695{
3696 struct user_struct *user = NULL;
3697 struct io_ring_ctx *ctx;
3698 bool account_mem;
3699 int ret;
3700
3701 if (!entries || entries > IORING_MAX_ENTRIES)
3702 return -EINVAL;
3703
3704 /*
3705 * Use twice as many entries for the CQ ring. It's possible for the
3706 * application to drive a higher depth than the size of the SQ ring,
3707 * since the sqes are only used at submission time. This allows for
3708 * some flexibility in overcommitting a bit.
3709 */
3710 p->sq_entries = roundup_pow_of_two(entries);
3711 p->cq_entries = 2 * p->sq_entries;
3712
3713 user = get_uid(current_user());
3714 account_mem = !capable(CAP_IPC_LOCK);
3715
3716 if (account_mem) {
3717 ret = io_account_mem(user,
3718 ring_pages(p->sq_entries, p->cq_entries));
3719 if (ret) {
3720 free_uid(user);
3721 return ret;
3722 }
3723 }
3724
3725 ctx = io_ring_ctx_alloc(p);
3726 if (!ctx) {
3727 if (account_mem)
3728 io_unaccount_mem(user, ring_pages(p->sq_entries,
3729 p->cq_entries));
3730 free_uid(user);
3731 return -ENOMEM;
3732 }
3733 ctx->compat = in_compat_syscall();
3734 ctx->account_mem = account_mem;
3735 ctx->user = user;
3736
3737 ret = io_allocate_scq_urings(ctx, p);
3738 if (ret)
3739 goto err;
3740
Jens Axboe6c271ce2019-01-10 11:22:30 -07003741 ret = io_sq_offload_start(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003742 if (ret)
3743 goto err;
3744
3745 ret = io_uring_get_fd(ctx);
3746 if (ret < 0)
3747 goto err;
3748
3749 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00003750 p->sq_off.head = offsetof(struct io_rings, sq.head);
3751 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
3752 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
3753 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
3754 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
3755 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
3756 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003757
3758 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00003759 p->cq_off.head = offsetof(struct io_rings, cq.head);
3760 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
3761 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
3762 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
3763 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
3764 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Jens Axboeac90f242019-09-06 10:26:21 -06003765
3766 p->features = IORING_FEAT_SINGLE_MMAP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003767 return ret;
3768err:
3769 io_ring_ctx_wait_and_kill(ctx);
3770 return ret;
3771}
3772
3773/*
3774 * Sets up an aio uring context, and returns the fd. Applications asks for a
3775 * ring size, we return the actual sq/cq ring sizes (among other things) in the
3776 * params structure passed in.
3777 */
3778static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
3779{
3780 struct io_uring_params p;
3781 long ret;
3782 int i;
3783
3784 if (copy_from_user(&p, params, sizeof(p)))
3785 return -EFAULT;
3786 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
3787 if (p.resv[i])
3788 return -EINVAL;
3789 }
3790
Jens Axboe6c271ce2019-01-10 11:22:30 -07003791 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
3792 IORING_SETUP_SQ_AFF))
Jens Axboe2b188cc2019-01-07 10:46:33 -07003793 return -EINVAL;
3794
3795 ret = io_uring_create(entries, &p);
3796 if (ret < 0)
3797 return ret;
3798
3799 if (copy_to_user(params, &p, sizeof(p)))
3800 return -EFAULT;
3801
3802 return ret;
3803}
3804
3805SYSCALL_DEFINE2(io_uring_setup, u32, entries,
3806 struct io_uring_params __user *, params)
3807{
3808 return io_uring_setup(entries, params);
3809}
3810
Jens Axboeedafcce2019-01-09 09:16:05 -07003811static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
3812 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -06003813 __releases(ctx->uring_lock)
3814 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -07003815{
3816 int ret;
3817
Jens Axboe35fa71a2019-04-22 10:23:23 -06003818 /*
3819 * We're inside the ring mutex, if the ref is already dying, then
3820 * someone else killed the ctx or is already going through
3821 * io_uring_register().
3822 */
3823 if (percpu_ref_is_dying(&ctx->refs))
3824 return -ENXIO;
3825
Jens Axboeedafcce2019-01-09 09:16:05 -07003826 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -06003827
3828 /*
3829 * Drop uring mutex before waiting for references to exit. If another
3830 * thread is currently inside io_uring_enter() it might need to grab
3831 * the uring_lock to make progress. If we hold it here across the drain
3832 * wait, then we can deadlock. It's safe to drop the mutex here, since
3833 * no new references will come in after we've killed the percpu ref.
3834 */
3835 mutex_unlock(&ctx->uring_lock);
Jens Axboeedafcce2019-01-09 09:16:05 -07003836 wait_for_completion(&ctx->ctx_done);
Jens Axboeb19062a2019-04-15 10:49:38 -06003837 mutex_lock(&ctx->uring_lock);
Jens Axboeedafcce2019-01-09 09:16:05 -07003838
3839 switch (opcode) {
3840 case IORING_REGISTER_BUFFERS:
3841 ret = io_sqe_buffer_register(ctx, arg, nr_args);
3842 break;
3843 case IORING_UNREGISTER_BUFFERS:
3844 ret = -EINVAL;
3845 if (arg || nr_args)
3846 break;
3847 ret = io_sqe_buffer_unregister(ctx);
3848 break;
Jens Axboe6b063142019-01-10 22:13:58 -07003849 case IORING_REGISTER_FILES:
3850 ret = io_sqe_files_register(ctx, arg, nr_args);
3851 break;
3852 case IORING_UNREGISTER_FILES:
3853 ret = -EINVAL;
3854 if (arg || nr_args)
3855 break;
3856 ret = io_sqe_files_unregister(ctx);
3857 break;
Jens Axboe9b402842019-04-11 11:45:41 -06003858 case IORING_REGISTER_EVENTFD:
3859 ret = -EINVAL;
3860 if (nr_args != 1)
3861 break;
3862 ret = io_eventfd_register(ctx, arg);
3863 break;
3864 case IORING_UNREGISTER_EVENTFD:
3865 ret = -EINVAL;
3866 if (arg || nr_args)
3867 break;
3868 ret = io_eventfd_unregister(ctx);
3869 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07003870 default:
3871 ret = -EINVAL;
3872 break;
3873 }
3874
3875 /* bring the ctx back to life */
3876 reinit_completion(&ctx->ctx_done);
3877 percpu_ref_reinit(&ctx->refs);
3878 return ret;
3879}
3880
3881SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
3882 void __user *, arg, unsigned int, nr_args)
3883{
3884 struct io_ring_ctx *ctx;
3885 long ret = -EBADF;
3886 struct fd f;
3887
3888 f = fdget(fd);
3889 if (!f.file)
3890 return -EBADF;
3891
3892 ret = -EOPNOTSUPP;
3893 if (f.file->f_op != &io_uring_fops)
3894 goto out_fput;
3895
3896 ctx = f.file->private_data;
3897
3898 mutex_lock(&ctx->uring_lock);
3899 ret = __io_uring_register(ctx, opcode, arg, nr_args);
3900 mutex_unlock(&ctx->uring_lock);
3901out_fput:
3902 fdput(f);
3903 return ret;
3904}
3905
Jens Axboe2b188cc2019-01-07 10:46:33 -07003906static int __init io_uring_init(void)
3907{
3908 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3909 return 0;
3910};
3911__initcall(io_uring_init);