blob: dce73e40eecbc29b7897063818acc885c6f032b2 [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe6c271ce2019-01-10 11:22:30 -070060#include <linux/kthread.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070061#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070062#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070063#include <linux/net.h>
64#include <net/sock.h>
65#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070066#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070067#include <linux/anon_inodes.h>
68#include <linux/sched/mm.h>
69#include <linux/uaccess.h>
70#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070071#include <linux/sizes.h>
72#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070073#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070074#include <linux/namei.h>
75#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070076#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070077#include <linux/eventpoll.h>
Jens Axboeff002b32020-02-07 16:05:21 -070078#include <linux/fs_struct.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030079#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070080#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060081#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060082#include <linux/io_uring.h>
Dennis Zhou91d8f512020-09-16 13:41:05 -070083#include <linux/blk-cgroup.h>
Jens Axboe4ea33a92020-10-15 13:46:44 -060084#include <linux/audit.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070085
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020086#define CREATE_TRACE_POINTS
87#include <trace/events/io_uring.h>
88
Jens Axboe2b188cc2019-01-07 10:46:33 -070089#include <uapi/linux/io_uring.h>
90
91#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060092#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070093
Daniel Xu5277dea2019-09-14 14:23:45 -070094#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060095#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Jens Axboe65e19f52019-10-26 07:20:21 -060096
97/*
98 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
99 */
100#define IORING_FILE_TABLE_SHIFT 9
101#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
102#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
103#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200104#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
105 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700106
107struct io_uring {
108 u32 head ____cacheline_aligned_in_smp;
109 u32 tail ____cacheline_aligned_in_smp;
110};
111
Stefan Bühler1e84b972019-04-24 23:54:16 +0200112/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000113 * This data is shared with the application through the mmap at offsets
114 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200115 *
116 * The offsets to the member fields are published through struct
117 * io_sqring_offsets when calling io_uring_setup.
118 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000119struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200120 /*
121 * Head and tail offsets into the ring; the offsets need to be
122 * masked to get valid indices.
123 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000124 * The kernel controls head of the sq ring and the tail of the cq ring,
125 * and the application controls tail of the sq ring and the head of the
126 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200127 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000128 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200129 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000130 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200131 * ring_entries - 1)
132 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000133 u32 sq_ring_mask, cq_ring_mask;
134 /* Ring sizes (constant, power of 2) */
135 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200136 /*
137 * Number of invalid entries dropped by the kernel due to
138 * invalid index stored in array
139 *
140 * Written by the kernel, shouldn't be modified by the
141 * application (i.e. get number of "new events" by comparing to
142 * cached value).
143 *
144 * After a new SQ head value was read by the application this
145 * counter includes all submissions that were dropped reaching
146 * the new SQ head (and possibly more).
147 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000148 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200149 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200150 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200151 *
152 * Written by the kernel, shouldn't be modified by the
153 * application.
154 *
155 * The application needs a full memory barrier before checking
156 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
157 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000158 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200159 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200160 * Runtime CQ flags
161 *
162 * Written by the application, shouldn't be modified by the
163 * kernel.
164 */
165 u32 cq_flags;
166 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200167 * Number of completion events lost because the queue was full;
168 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800169 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200170 * the completion queue.
171 *
172 * Written by the kernel, shouldn't be modified by the
173 * application (i.e. get number of "new events" by comparing to
174 * cached value).
175 *
176 * As completion events come in out of order this counter is not
177 * ordered with any other data.
178 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000179 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200180 /*
181 * Ring buffer of completion events.
182 *
183 * The kernel writes completion events fresh every time they are
184 * produced, so the application is allowed to modify pending
185 * entries.
186 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000187 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700188};
189
Jens Axboeedafcce2019-01-09 09:16:05 -0700190struct io_mapped_ubuf {
191 u64 ubuf;
192 size_t len;
193 struct bio_vec *bvec;
194 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600195 unsigned long acct_pages;
Jens Axboeedafcce2019-01-09 09:16:05 -0700196};
197
Jens Axboe65e19f52019-10-26 07:20:21 -0600198struct fixed_file_table {
199 struct file **files;
Jens Axboe31b51512019-01-18 22:56:34 -0700200};
201
Xiaoguang Wang05589552020-03-31 14:05:18 +0800202struct fixed_file_ref_node {
203 struct percpu_ref refs;
204 struct list_head node;
205 struct list_head file_list;
206 struct fixed_file_data *file_data;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600207 struct llist_node llist;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800208};
209
Jens Axboe05f3fb32019-12-09 11:22:50 -0700210struct fixed_file_data {
211 struct fixed_file_table *table;
212 struct io_ring_ctx *ctx;
213
Pavel Begunkovb2e96852020-10-10 18:34:16 +0100214 struct fixed_file_ref_node *node;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700215 struct percpu_ref refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700216 struct completion done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800217 struct list_head ref_list;
218 spinlock_t lock;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700219};
220
Jens Axboe5a2e7452020-02-23 16:23:11 -0700221struct io_buffer {
222 struct list_head list;
223 __u64 addr;
224 __s32 len;
225 __u16 bid;
226};
227
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200228struct io_restriction {
229 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
230 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
231 u8 sqe_flags_allowed;
232 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200233 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200234};
235
Jens Axboe534ca6d2020-09-02 13:52:19 -0600236struct io_sq_data {
237 refcount_t refs;
Jens Axboe69fb2132020-09-14 11:16:23 -0600238 struct mutex lock;
239
240 /* ctx's that are using this sqd */
241 struct list_head ctx_list;
242 struct list_head ctx_new_list;
243 struct mutex ctx_lock;
244
Jens Axboe534ca6d2020-09-02 13:52:19 -0600245 struct task_struct *thread;
246 struct wait_queue_head wait;
247};
248
Jens Axboe2b188cc2019-01-07 10:46:33 -0700249struct io_ring_ctx {
250 struct {
251 struct percpu_ref refs;
252 } ____cacheline_aligned_in_smp;
253
254 struct {
255 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800256 unsigned int compat: 1;
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -0700257 unsigned int limit_mem: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800258 unsigned int cq_overflow_flushed: 1;
259 unsigned int drain_next: 1;
260 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200261 unsigned int restricted: 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700262
Hristo Venev75b28af2019-08-26 17:23:46 +0000263 /*
264 * Ring buffer of indices into array of io_uring_sqe, which is
265 * mmapped by the application using the IORING_OFF_SQES offset.
266 *
267 * This indirection could e.g. be used to assign fixed
268 * io_uring_sqe entries to operations and only submit them to
269 * the queue when needed.
270 *
271 * The kernel modifies neither the indices array nor the entries
272 * array.
273 */
274 u32 *sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700275 unsigned cached_sq_head;
276 unsigned sq_entries;
277 unsigned sq_mask;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700278 unsigned sq_thread_idle;
Jens Axboe498ccd92019-10-25 10:04:25 -0600279 unsigned cached_sq_dropped;
Jens Axboe206aefd2019-11-07 18:27:42 -0700280 atomic_t cached_cq_overflow;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700281 unsigned long sq_check_overflow;
Jens Axboede0617e2019-04-06 21:51:27 -0600282
283 struct list_head defer_list;
Jens Axboe5262f562019-09-17 12:26:57 -0600284 struct list_head timeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700285 struct list_head cq_overflow_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700286
Jens Axboefcb323c2019-10-24 12:39:47 -0600287 wait_queue_head_t inflight_wait;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700288 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700289 } ____cacheline_aligned_in_smp;
290
Hristo Venev75b28af2019-08-26 17:23:46 +0000291 struct io_rings *rings;
292
Jens Axboe2b188cc2019-01-07 10:46:33 -0700293 /* IO offload */
Jens Axboe561fb042019-10-24 07:25:42 -0600294 struct io_wq *io_wq;
Jens Axboe2aede0e2020-09-14 10:45:53 -0600295
296 /*
297 * For SQPOLL usage - we hold a reference to the parent task, so we
298 * have access to the ->files
299 */
300 struct task_struct *sqo_task;
301
302 /* Only used for accounting purposes */
303 struct mm_struct *mm_account;
304
Dennis Zhou91d8f512020-09-16 13:41:05 -0700305#ifdef CONFIG_BLK_CGROUP
306 struct cgroup_subsys_state *sqo_blkcg_css;
307#endif
308
Jens Axboe534ca6d2020-09-02 13:52:19 -0600309 struct io_sq_data *sq_data; /* if using sq thread polling */
310
Jens Axboe90554202020-09-03 12:12:41 -0600311 struct wait_queue_head sqo_sq_wait;
Jens Axboe6a779382020-09-02 12:21:41 -0600312 struct wait_queue_entry sqo_wait_entry;
Jens Axboe69fb2132020-09-14 11:16:23 -0600313 struct list_head sqd_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700314
Jens Axboe6b063142019-01-10 22:13:58 -0700315 /*
316 * If used, fixed file set. Writers must ensure that ->refs is dead,
317 * readers must ensure that ->refs is alive as long as the file* is
318 * used. Only updated through io_uring_register(2).
319 */
Jens Axboe05f3fb32019-12-09 11:22:50 -0700320 struct fixed_file_data *file_data;
Jens Axboe6b063142019-01-10 22:13:58 -0700321 unsigned nr_user_files;
322
Jens Axboeedafcce2019-01-09 09:16:05 -0700323 /* if used, fixed mapped user buffers */
324 unsigned nr_user_bufs;
325 struct io_mapped_ubuf *user_bufs;
326
Jens Axboe2b188cc2019-01-07 10:46:33 -0700327 struct user_struct *user;
328
Jens Axboe0b8c0ec2019-12-02 08:50:00 -0700329 const struct cred *creds;
Jens Axboe181e4482019-11-25 08:52:30 -0700330
Jens Axboe4ea33a92020-10-15 13:46:44 -0600331#ifdef CONFIG_AUDIT
332 kuid_t loginuid;
333 unsigned int sessionid;
334#endif
335
Jens Axboe0f158b42020-05-14 17:18:39 -0600336 struct completion ref_comp;
337 struct completion sq_thread_comp;
Jens Axboe206aefd2019-11-07 18:27:42 -0700338
Jens Axboe0ddf92e2019-11-08 08:52:53 -0700339 /* if all else fails... */
340 struct io_kiocb *fallback_req;
341
Jens Axboe206aefd2019-11-07 18:27:42 -0700342#if defined(CONFIG_UNIX)
343 struct socket *ring_sock;
344#endif
345
Jens Axboe5a2e7452020-02-23 16:23:11 -0700346 struct idr io_buffer_idr;
347
Jens Axboe071698e2020-01-28 10:04:42 -0700348 struct idr personality_idr;
349
Jens Axboe206aefd2019-11-07 18:27:42 -0700350 struct {
351 unsigned cached_cq_tail;
352 unsigned cq_entries;
353 unsigned cq_mask;
354 atomic_t cq_timeouts;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700355 unsigned long cq_check_overflow;
Jens Axboe206aefd2019-11-07 18:27:42 -0700356 struct wait_queue_head cq_wait;
357 struct fasync_struct *cq_fasync;
358 struct eventfd_ctx *cq_ev_fd;
359 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700360
361 struct {
362 struct mutex uring_lock;
363 wait_queue_head_t wait;
364 } ____cacheline_aligned_in_smp;
365
366 struct {
367 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700368
Jens Axboedef596e2019-01-09 08:59:42 -0700369 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300370 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700371 * io_uring instances that don't use IORING_SETUP_SQPOLL.
372 * For SQPOLL, only the single threaded io_sq_thread() will
373 * manipulate the list, hence no extra locking is needed there.
374 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300375 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700376 struct hlist_head *cancel_hash;
377 unsigned cancel_hash_bits;
Jens Axboee94f1412019-12-19 12:06:02 -0700378 bool poll_multi_file;
Jens Axboefcb323c2019-10-24 12:39:47 -0600379
380 spinlock_t inflight_lock;
381 struct list_head inflight_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700382 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600383
Jens Axboe4a38aed22020-05-14 17:21:15 -0600384 struct delayed_work file_put_work;
385 struct llist_head file_put_llist;
386
Jens Axboe85faa7b2020-04-09 18:14:00 -0600387 struct work_struct exit_work;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200388 struct io_restriction restrictions;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700389};
390
Jens Axboe09bb8392019-03-13 12:39:28 -0600391/*
392 * First field must be the file pointer in all the
393 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
394 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700395struct io_poll_iocb {
396 struct file *file;
Jens Axboe0969e782019-12-17 18:40:57 -0700397 union {
398 struct wait_queue_head *head;
399 u64 addr;
400 };
Jens Axboe221c5eb2019-01-17 09:41:58 -0700401 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600402 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700403 bool canceled;
Jens Axboe392edb42019-12-09 17:52:20 -0700404 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700405};
406
Jens Axboeb5dba592019-12-11 14:02:38 -0700407struct io_close {
408 struct file *file;
409 struct file *put_file;
410 int fd;
411};
412
Jens Axboead8a48a2019-11-15 08:49:11 -0700413struct io_timeout_data {
414 struct io_kiocb *req;
415 struct hrtimer timer;
416 struct timespec64 ts;
417 enum hrtimer_mode mode;
418};
419
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700420struct io_accept {
421 struct file *file;
422 struct sockaddr __user *addr;
423 int __user *addr_len;
424 int flags;
Jens Axboe09952e32020-03-19 20:16:56 -0600425 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700426};
427
428struct io_sync {
429 struct file *file;
430 loff_t len;
431 loff_t off;
432 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700433 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700434};
435
Jens Axboefbf23842019-12-17 18:45:56 -0700436struct io_cancel {
437 struct file *file;
438 u64 addr;
439};
440
Jens Axboeb29472e2019-12-17 18:50:29 -0700441struct io_timeout {
442 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300443 u32 off;
444 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300445 struct list_head list;
Jens Axboeb29472e2019-12-17 18:50:29 -0700446};
447
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100448struct io_timeout_rem {
449 struct file *file;
450 u64 addr;
451};
452
Jens Axboe9adbd452019-12-20 08:45:55 -0700453struct io_rw {
454 /* NOTE: kiocb has the file as the first member, so don't do it here */
455 struct kiocb kiocb;
456 u64 addr;
457 u64 len;
458};
459
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700460struct io_connect {
461 struct file *file;
462 struct sockaddr __user *addr;
463 int addr_len;
464};
465
Jens Axboee47293f2019-12-20 08:58:21 -0700466struct io_sr_msg {
467 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700468 union {
Pavel Begunkov270a5942020-07-12 20:41:04 +0300469 struct user_msghdr __user *umsg;
Jens Axboefddafac2020-01-04 20:19:44 -0700470 void __user *buf;
471 };
Jens Axboee47293f2019-12-20 08:58:21 -0700472 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700473 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700474 size_t len;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700475 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700476};
477
Jens Axboe15b71ab2019-12-11 11:20:36 -0700478struct io_open {
479 struct file *file;
480 int dfd;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700481 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700482 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600483 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700484};
485
Jens Axboe05f3fb32019-12-09 11:22:50 -0700486struct io_files_update {
487 struct file *file;
488 u64 arg;
489 u32 nr_args;
490 u32 offset;
491};
492
Jens Axboe4840e412019-12-25 22:03:45 -0700493struct io_fadvise {
494 struct file *file;
495 u64 offset;
496 u32 len;
497 u32 advice;
498};
499
Jens Axboec1ca7572019-12-25 22:18:28 -0700500struct io_madvise {
501 struct file *file;
502 u64 addr;
503 u32 len;
504 u32 advice;
505};
506
Jens Axboe3e4827b2020-01-08 15:18:09 -0700507struct io_epoll {
508 struct file *file;
509 int epfd;
510 int op;
511 int fd;
512 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700513};
514
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300515struct io_splice {
516 struct file *file_out;
517 struct file *file_in;
518 loff_t off_out;
519 loff_t off_in;
520 u64 len;
521 unsigned int flags;
522};
523
Jens Axboeddf0322d2020-02-23 16:41:33 -0700524struct io_provide_buf {
525 struct file *file;
526 __u64 addr;
527 __s32 len;
528 __u32 bgid;
529 __u16 nbufs;
530 __u16 bid;
531};
532
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700533struct io_statx {
534 struct file *file;
535 int dfd;
536 unsigned int mask;
537 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700538 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700539 struct statx __user *buffer;
540};
541
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300542struct io_completion {
543 struct file *file;
544 struct list_head list;
Pavel Begunkov0f7e4662020-07-13 23:37:16 +0300545 int cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300546};
547
Jens Axboef499a022019-12-02 16:28:46 -0700548struct io_async_connect {
549 struct sockaddr_storage address;
550};
551
Jens Axboe03b12302019-12-02 18:50:25 -0700552struct io_async_msghdr {
553 struct iovec fast_iov[UIO_FASTIOV];
554 struct iovec *iov;
555 struct sockaddr __user *uaddr;
556 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700557 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700558};
559
Jens Axboef67676d2019-12-02 11:03:47 -0700560struct io_async_rw {
561 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600562 const struct iovec *free_iovec;
563 struct iov_iter iter;
Jens Axboe227c0c92020-08-13 11:51:40 -0600564 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600565 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700566};
567
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300568enum {
569 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
570 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
571 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
572 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
573 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700574 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300575
Pavel Begunkovdea3b492020-04-12 02:05:04 +0300576 REQ_F_LINK_HEAD_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300577 REQ_F_FAIL_LINK_BIT,
578 REQ_F_INFLIGHT_BIT,
579 REQ_F_CUR_POS_BIT,
580 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300581 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300582 REQ_F_ISREG_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300583 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700584 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700585 REQ_F_BUFFER_SELECTED_BIT,
Jens Axboe5b0bbee2020-04-27 10:41:22 -0600586 REQ_F_NO_FILE_TABLE_BIT,
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +0800587 REQ_F_WORK_INITIALIZED_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700588
589 /* not a real bit, just to check we're not overflowing the space */
590 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300591};
592
593enum {
594 /* ctx owns file */
595 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
596 /* drain existing IO first */
597 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
598 /* linked sqes */
599 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
600 /* doesn't sever on completion < 0 */
601 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
602 /* IOSQE_ASYNC */
603 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700604 /* IOSQE_BUFFER_SELECT */
605 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300606
Pavel Begunkovdea3b492020-04-12 02:05:04 +0300607 /* head of a link */
608 REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300609 /* fail rest of links */
610 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
611 /* on inflight list */
612 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
613 /* read/write uses file position */
614 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
615 /* must not punt to workers */
616 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300617 /* has linked timeout */
618 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300619 /* regular file */
620 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300621 /* needs cleanup */
622 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700623 /* already went through poll handler */
624 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700625 /* buffer already selected */
626 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Jens Axboe5b0bbee2020-04-27 10:41:22 -0600627 /* doesn't need file table for this request */
628 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +0800629 /* io_wq_work is initialized */
630 REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700631};
632
633struct async_poll {
634 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600635 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300636};
637
Jens Axboe09bb8392019-03-13 12:39:28 -0600638/*
639 * NOTE! Each of the iocb union members has the file pointer
640 * as the first entry in their struct definition. So you can
641 * access the file pointer through any of the sub-structs,
642 * or directly as just 'ki_filp' in this struct.
643 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700644struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700645 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600646 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700647 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700648 struct io_poll_iocb poll;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700649 struct io_accept accept;
650 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700651 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700652 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100653 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700654 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700655 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700656 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700657 struct io_close close;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700658 struct io_files_update files_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700659 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700660 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700661 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300662 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700663 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700664 struct io_statx statx;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300665 /* use only after cleaning per-op data, see io_clean_op() */
666 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700667 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700668
Jens Axboee8c2bc12020-08-15 18:44:09 -0700669 /* opcode allocated if it needs to store data for async defer */
670 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700671 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800672 /* polled IO has completed */
673 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700674
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700675 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300676 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700677
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300678 struct io_ring_ctx *ctx;
679 unsigned int flags;
680 refcount_t refs;
681 struct task_struct *task;
682 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700683
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300684 struct list_head link_list;
Jens Axboed7718a92020-02-14 22:23:12 -0700685
Pavel Begunkovd21ffe72020-07-13 23:37:10 +0300686 /*
687 * 1. used with ctx->iopoll_list with reads/writes
688 * 2. to track reqs with ->files (see io_op_def::file_table)
689 */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300690 struct list_head inflight_entry;
Jens Axboefcb323c2019-10-24 12:39:47 -0600691
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300692 struct percpu_ref *fixed_file_refs;
693 struct callback_head task_work;
694 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
695 struct hlist_node hash_node;
696 struct async_poll *apoll;
697 struct io_wq_work work;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700698};
699
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300700struct io_defer_entry {
701 struct list_head list;
702 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300703 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300704};
705
Jens Axboedef596e2019-01-09 08:59:42 -0700706#define IO_IOPOLL_BATCH 8
Jens Axboe2b188cc2019-01-07 10:46:33 -0700707
Jens Axboe013538b2020-06-22 09:29:15 -0600708struct io_comp_state {
709 unsigned int nr;
710 struct list_head list;
711 struct io_ring_ctx *ctx;
712};
713
Jens Axboe9a56a232019-01-09 09:06:50 -0700714struct io_submit_state {
715 struct blk_plug plug;
716
717 /*
Jens Axboe2579f912019-01-09 09:10:43 -0700718 * io_kiocb alloc cache
719 */
720 void *reqs[IO_IOPOLL_BATCH];
Pavel Begunkov6c8a3132020-02-01 03:58:00 +0300721 unsigned int free_reqs;
Jens Axboe2579f912019-01-09 09:10:43 -0700722
723 /*
Jens Axboe013538b2020-06-22 09:29:15 -0600724 * Batch completion logic
725 */
726 struct io_comp_state comp;
727
728 /*
Jens Axboe9a56a232019-01-09 09:06:50 -0700729 * File reference cache
730 */
731 struct file *file;
732 unsigned int fd;
733 unsigned int has_refs;
Jens Axboe9a56a232019-01-09 09:06:50 -0700734 unsigned int ios_left;
735};
736
Jens Axboed3656342019-12-18 09:50:26 -0700737struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700738 /* needs req->file assigned */
739 unsigned needs_file : 1;
Jens Axboefd2206e2020-06-02 16:40:47 -0600740 /* don't fail if file grab fails */
741 unsigned needs_file_no_error : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700742 /* hash wq insertion if file is a regular file */
743 unsigned hash_reg_file : 1;
744 /* unbound wq insertion if file is a non-regular file */
745 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700746 /* opcode is not supported by this kernel */
747 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700748 /* set if opcode supports polled "wait" */
749 unsigned pollin : 1;
750 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700751 /* op supports buffer selection */
752 unsigned buffer_select : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700753 /* needs rlimit(RLIMIT_FSIZE) assigned */
Pavel Begunkov57f1a642020-07-15 12:46:52 +0300754 unsigned needs_fsize : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700755 /* must always have async data allocated */
756 unsigned needs_async_data : 1;
757 /* size of async data needed, if any */
758 unsigned short async_size;
Jens Axboe0f203762020-10-14 09:23:55 -0600759 unsigned work_flags;
Jens Axboed3656342019-12-18 09:50:26 -0700760};
761
Jens Axboe09186822020-10-13 15:01:40 -0600762static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300763 [IORING_OP_NOP] = {},
764 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700765 .needs_file = 1,
766 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700767 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700768 .buffer_select = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700769 .needs_async_data = 1,
770 .async_size = sizeof(struct io_async_rw),
Jens Axboe0f203762020-10-14 09:23:55 -0600771 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700772 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300773 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700774 .needs_file = 1,
775 .hash_reg_file = 1,
776 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700777 .pollout = 1,
Pavel Begunkov57f1a642020-07-15 12:46:52 +0300778 .needs_fsize = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700779 .needs_async_data = 1,
780 .async_size = sizeof(struct io_async_rw),
Jens Axboe0f203762020-10-14 09:23:55 -0600781 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700782 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300783 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700784 .needs_file = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600785 .work_flags = IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700786 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300787 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700788 .needs_file = 1,
789 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700790 .pollin = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700791 .async_size = sizeof(struct io_async_rw),
Jens Axboe0f203762020-10-14 09:23:55 -0600792 .work_flags = IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700793 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300794 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700795 .needs_file = 1,
796 .hash_reg_file = 1,
797 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700798 .pollout = 1,
Pavel Begunkov57f1a642020-07-15 12:46:52 +0300799 .needs_fsize = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700800 .async_size = sizeof(struct io_async_rw),
Jens Axboe0f203762020-10-14 09:23:55 -0600801 .work_flags = IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700802 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300803 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700804 .needs_file = 1,
805 .unbound_nonreg_file = 1,
806 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300807 [IORING_OP_POLL_REMOVE] = {},
808 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700809 .needs_file = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600810 .work_flags = IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700811 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300812 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700813 .needs_file = 1,
814 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700815 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700816 .needs_async_data = 1,
817 .async_size = sizeof(struct io_async_msghdr),
Jens Axboe0f203762020-10-14 09:23:55 -0600818 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
819 IO_WQ_WORK_FS,
Jens Axboed3656342019-12-18 09:50:26 -0700820 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300821 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700822 .needs_file = 1,
823 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700824 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700825 .buffer_select = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700826 .needs_async_data = 1,
827 .async_size = sizeof(struct io_async_msghdr),
Jens Axboe0f203762020-10-14 09:23:55 -0600828 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
829 IO_WQ_WORK_FS,
Jens Axboed3656342019-12-18 09:50:26 -0700830 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300831 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700832 .needs_async_data = 1,
833 .async_size = sizeof(struct io_timeout_data),
Jens Axboe0f203762020-10-14 09:23:55 -0600834 .work_flags = IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700835 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300836 [IORING_OP_TIMEOUT_REMOVE] = {},
837 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700838 .needs_file = 1,
839 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700840 .pollin = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600841 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES,
Jens Axboed3656342019-12-18 09:50:26 -0700842 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300843 [IORING_OP_ASYNC_CANCEL] = {},
844 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700845 .needs_async_data = 1,
846 .async_size = sizeof(struct io_timeout_data),
Jens Axboe0f203762020-10-14 09:23:55 -0600847 .work_flags = IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700848 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300849 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700850 .needs_file = 1,
851 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700852 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700853 .needs_async_data = 1,
854 .async_size = sizeof(struct io_async_connect),
Jens Axboe0f203762020-10-14 09:23:55 -0600855 .work_flags = IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700856 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300857 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700858 .needs_file = 1,
Pavel Begunkov57f1a642020-07-15 12:46:52 +0300859 .needs_fsize = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600860 .work_flags = IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700861 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300862 [IORING_OP_OPENAT] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600863 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG |
864 IO_WQ_WORK_FS,
Jens Axboed3656342019-12-18 09:50:26 -0700865 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300866 [IORING_OP_CLOSE] = {
Jens Axboefd2206e2020-06-02 16:40:47 -0600867 .needs_file = 1,
868 .needs_file_no_error = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600869 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700870 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300871 [IORING_OP_FILES_UPDATE] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600872 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700873 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300874 [IORING_OP_STATX] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600875 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM |
876 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700877 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300878 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700879 .needs_file = 1,
880 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700881 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700882 .buffer_select = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700883 .async_size = sizeof(struct io_async_rw),
Jens Axboe0f203762020-10-14 09:23:55 -0600884 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboe3a6820f2019-12-22 15:19:35 -0700885 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300886 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700887 .needs_file = 1,
888 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700889 .pollout = 1,
Pavel Begunkov57f1a642020-07-15 12:46:52 +0300890 .needs_fsize = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700891 .async_size = sizeof(struct io_async_rw),
Jens Axboe0f203762020-10-14 09:23:55 -0600892 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboe3a6820f2019-12-22 15:19:35 -0700893 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300894 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -0700895 .needs_file = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600896 .work_flags = IO_WQ_WORK_BLKCG,
Jens Axboe4840e412019-12-25 22:03:45 -0700897 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300898 [IORING_OP_MADVISE] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600899 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboec1ca7572019-12-25 22:18:28 -0700900 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300901 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700902 .needs_file = 1,
903 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700904 .pollout = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600905 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboefddafac2020-01-04 20:19:44 -0700906 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300907 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700908 .needs_file = 1,
909 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700910 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700911 .buffer_select = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600912 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboefddafac2020-01-04 20:19:44 -0700913 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300914 [IORING_OP_OPENAT2] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600915 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_FS |
916 IO_WQ_WORK_BLKCG,
Jens Axboecebdb982020-01-08 17:59:24 -0700917 },
Jens Axboe3e4827b2020-01-08 15:18:09 -0700918 [IORING_OP_EPOLL_CTL] = {
919 .unbound_nonreg_file = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600920 .work_flags = IO_WQ_WORK_FILES,
Jens Axboe3e4827b2020-01-08 15:18:09 -0700921 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300922 [IORING_OP_SPLICE] = {
923 .needs_file = 1,
924 .hash_reg_file = 1,
925 .unbound_nonreg_file = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600926 .work_flags = IO_WQ_WORK_BLKCG,
Jens Axboeddf0322d2020-02-23 16:41:33 -0700927 },
928 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -0700929 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +0300930 [IORING_OP_TEE] = {
931 .needs_file = 1,
932 .hash_reg_file = 1,
933 .unbound_nonreg_file = 1,
934 },
Jens Axboed3656342019-12-18 09:50:26 -0700935};
936
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -0700937enum io_mem_account {
938 ACCT_LOCKED,
939 ACCT_PINNED,
940};
941
Pavel Begunkov81b68a52020-07-30 18:43:46 +0300942static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
943 struct io_comp_state *cs);
Jens Axboe78e19bb2019-11-06 15:21:34 -0700944static void io_cqring_fill_event(struct io_kiocb *req, long res);
Jackie Liuec9c02a2019-11-08 23:50:36 +0800945static void io_put_req(struct io_kiocb *req);
Pavel Begunkov216578e2020-10-13 09:44:00 +0100946static void io_put_req_deferred(struct io_kiocb *req, int nr);
Jens Axboec40f6372020-06-25 15:39:59 -0600947static void io_double_put_req(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -0700948static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
Jens Axboe7271ef32020-08-10 09:55:22 -0600949static void __io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -0700950static void io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe05f3fb32019-12-09 11:22:50 -0700951static int __io_sqe_files_update(struct io_ring_ctx *ctx,
952 struct io_uring_files_update *ip,
953 unsigned nr_args);
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300954static void __io_clean_op(struct io_kiocb *req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +0100955static struct file *io_file_get(struct io_submit_state *state,
956 struct io_kiocb *req, int fd, bool fixed);
Pavel Begunkovc1379e22020-09-30 22:57:56 +0300957static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs);
Jens Axboe4349f302020-07-09 15:07:01 -0600958static void io_file_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -0600959
Jens Axboeb63534c2020-06-04 11:28:00 -0600960static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
961 struct iovec **iovec, struct iov_iter *iter,
962 bool needs_lock);
Jens Axboeff6165b2020-08-13 09:47:43 -0600963static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
964 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -0600965 struct iov_iter *iter, bool force);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700966
967static struct kmem_cache *req_cachep;
968
Jens Axboe09186822020-10-13 15:01:40 -0600969static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700970
971struct sock *io_uring_get_socket(struct file *file)
972{
973#if defined(CONFIG_UNIX)
974 if (file->f_op == &io_uring_fops) {
975 struct io_ring_ctx *ctx = file->private_data;
976
977 return ctx->ring_sock->sk;
978 }
979#endif
980 return NULL;
981}
982EXPORT_SYMBOL(io_uring_get_socket);
983
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300984static inline void io_clean_op(struct io_kiocb *req)
985{
Pavel Begunkovbb175342020-08-20 11:33:35 +0300986 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
987 REQ_F_INFLIGHT))
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300988 __io_clean_op(req);
989}
990
Jens Axboe4349f302020-07-09 15:07:01 -0600991static void io_sq_thread_drop_mm(void)
Jens Axboec40f6372020-06-25 15:39:59 -0600992{
993 struct mm_struct *mm = current->mm;
994
995 if (mm) {
996 kthread_unuse_mm(mm);
997 mmput(mm);
998 }
999}
1000
1001static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
1002{
1003 if (!current->mm) {
Pavel Begunkovcbcf7212020-07-18 11:31:21 +03001004 if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL) ||
Jens Axboe2aede0e2020-09-14 10:45:53 -06001005 !ctx->sqo_task->mm ||
1006 !mmget_not_zero(ctx->sqo_task->mm)))
Jens Axboec40f6372020-06-25 15:39:59 -06001007 return -EFAULT;
Jens Axboe2aede0e2020-09-14 10:45:53 -06001008 kthread_use_mm(ctx->sqo_task->mm);
Jens Axboec40f6372020-06-25 15:39:59 -06001009 }
1010
1011 return 0;
1012}
1013
1014static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
1015 struct io_kiocb *req)
1016{
Jens Axboe0f203762020-10-14 09:23:55 -06001017 if (!(io_op_defs[req->opcode].work_flags & IO_WQ_WORK_MM))
Jens Axboec40f6372020-06-25 15:39:59 -06001018 return 0;
1019 return __io_sq_thread_acquire_mm(ctx);
1020}
1021
Dennis Zhou91d8f512020-09-16 13:41:05 -07001022static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
1023 struct cgroup_subsys_state **cur_css)
1024
1025{
1026#ifdef CONFIG_BLK_CGROUP
1027 /* puts the old one when swapping */
1028 if (*cur_css != ctx->sqo_blkcg_css) {
1029 kthread_associate_blkcg(ctx->sqo_blkcg_css);
1030 *cur_css = ctx->sqo_blkcg_css;
1031 }
1032#endif
1033}
1034
1035static void io_sq_thread_unassociate_blkcg(void)
1036{
1037#ifdef CONFIG_BLK_CGROUP
1038 kthread_associate_blkcg(NULL);
1039#endif
1040}
1041
Jens Axboec40f6372020-06-25 15:39:59 -06001042static inline void req_set_fail_links(struct io_kiocb *req)
1043{
1044 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1045 req->flags |= REQ_F_FAIL_LINK;
1046}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001047
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001048/*
Jens Axboe1e6fa522020-10-15 08:46:24 -06001049 * None of these are dereferenced, they are simply used to check if any of
1050 * them have changed. If we're under current and check they are still the
1051 * same, we're fine to grab references to them for actual out-of-line use.
1052 */
1053static void io_init_identity(struct io_identity *id)
1054{
1055 id->files = current->files;
1056 id->mm = current->mm;
1057#ifdef CONFIG_BLK_CGROUP
1058 rcu_read_lock();
1059 id->blkcg_css = blkcg_css();
1060 rcu_read_unlock();
1061#endif
1062 id->creds = current_cred();
1063 id->nsproxy = current->nsproxy;
1064 id->fs = current->fs;
1065 id->fsize = rlimit(RLIMIT_FSIZE);
Jens Axboe4ea33a92020-10-15 13:46:44 -06001066#ifdef CONFIG_AUDIT
1067 id->loginuid = current->loginuid;
1068 id->sessionid = current->sessionid;
1069#endif
Jens Axboe1e6fa522020-10-15 08:46:24 -06001070 refcount_set(&id->count, 1);
1071}
1072
Pavel Begunkovec99ca62020-10-18 10:17:38 +01001073static inline void __io_req_init_async(struct io_kiocb *req)
1074{
1075 memset(&req->work, 0, sizeof(req->work));
1076 req->flags |= REQ_F_WORK_INITIALIZED;
1077}
1078
Jens Axboe1e6fa522020-10-15 08:46:24 -06001079/*
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001080 * Note: must call io_req_init_async() for the first time you
1081 * touch any members of io_wq_work.
1082 */
1083static inline void io_req_init_async(struct io_kiocb *req)
1084{
Jens Axboe500a3732020-10-15 17:38:03 -06001085 struct io_uring_task *tctx = current->io_uring;
1086
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001087 if (req->flags & REQ_F_WORK_INITIALIZED)
1088 return;
1089
Pavel Begunkovec99ca62020-10-18 10:17:38 +01001090 __io_req_init_async(req);
Jens Axboe500a3732020-10-15 17:38:03 -06001091
1092 /* Grab a ref if this isn't our static identity */
1093 req->work.identity = tctx->identity;
1094 if (tctx->identity != &tctx->__identity)
1095 refcount_inc(&req->work.identity->count);
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001096}
1097
Pavel Begunkov0cdaf762020-05-17 14:13:40 +03001098static inline bool io_async_submit(struct io_ring_ctx *ctx)
1099{
1100 return ctx->flags & IORING_SETUP_SQPOLL;
1101}
1102
Jens Axboe2b188cc2019-01-07 10:46:33 -07001103static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1104{
1105 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1106
Jens Axboe0f158b42020-05-14 17:18:39 -06001107 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001108}
1109
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001110static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1111{
1112 return !req->timeout.off;
1113}
1114
Jens Axboe2b188cc2019-01-07 10:46:33 -07001115static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1116{
1117 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001118 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001119
1120 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1121 if (!ctx)
1122 return NULL;
1123
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001124 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
1125 if (!ctx->fallback_req)
1126 goto err;
1127
Jens Axboe78076bb2019-12-04 19:56:40 -07001128 /*
1129 * Use 5 bits less than the max cq entries, that should give us around
1130 * 32 entries per hash list if totally full and uniformly spread.
1131 */
1132 hash_bits = ilog2(p->cq_entries);
1133 hash_bits -= 5;
1134 if (hash_bits <= 0)
1135 hash_bits = 1;
1136 ctx->cancel_hash_bits = hash_bits;
1137 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1138 GFP_KERNEL);
1139 if (!ctx->cancel_hash)
1140 goto err;
1141 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1142
Roman Gushchin21482892019-05-07 10:01:48 -07001143 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001144 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1145 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001146
1147 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001148 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001149 INIT_LIST_HEAD(&ctx->sqd_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001150 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001151 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001152 init_completion(&ctx->ref_comp);
1153 init_completion(&ctx->sq_thread_comp);
Jens Axboe5a2e7452020-02-23 16:23:11 -07001154 idr_init(&ctx->io_buffer_idr);
Jens Axboe071698e2020-01-28 10:04:42 -07001155 idr_init(&ctx->personality_idr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001156 mutex_init(&ctx->uring_lock);
1157 init_waitqueue_head(&ctx->wait);
1158 spin_lock_init(&ctx->completion_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001159 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001160 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001161 INIT_LIST_HEAD(&ctx->timeout_list);
Jens Axboefcb323c2019-10-24 12:39:47 -06001162 init_waitqueue_head(&ctx->inflight_wait);
1163 spin_lock_init(&ctx->inflight_lock);
1164 INIT_LIST_HEAD(&ctx->inflight_list);
Jens Axboe4a38aed22020-05-14 17:21:15 -06001165 INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
1166 init_llist_head(&ctx->file_put_llist);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001167 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001168err:
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001169 if (ctx->fallback_req)
1170 kmem_cache_free(req_cachep, ctx->fallback_req);
Jens Axboe78076bb2019-12-04 19:56:40 -07001171 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001172 kfree(ctx);
1173 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001174}
1175
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001176static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001177{
Jens Axboe2bc99302020-07-09 09:43:27 -06001178 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1179 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001180
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001181 return seq != ctx->cached_cq_tail
Pavel Begunkov31af27c2020-04-15 00:39:50 +03001182 + atomic_read(&ctx->cached_cq_overflow);
Jens Axboe2bc99302020-07-09 09:43:27 -06001183 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001184
Bob Liu9d858b22019-11-13 18:06:25 +08001185 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001186}
1187
Jens Axboede0617e2019-04-06 21:51:27 -06001188static void __io_commit_cqring(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001189{
Hristo Venev75b28af2019-08-26 17:23:46 +00001190 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001191
Pavel Begunkov07910152020-01-17 03:52:46 +03001192 /* order cqe stores with ring update */
1193 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001194
Pavel Begunkov07910152020-01-17 03:52:46 +03001195 if (wq_has_sleeper(&ctx->cq_wait)) {
1196 wake_up_interruptible(&ctx->cq_wait);
1197 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001198 }
1199}
1200
Jens Axboe5c3462c2020-10-15 09:02:33 -06001201static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
Jens Axboe1e6fa522020-10-15 08:46:24 -06001202{
Jens Axboe500a3732020-10-15 17:38:03 -06001203 if (req->work.identity == &tctx->__identity)
Jens Axboe1e6fa522020-10-15 08:46:24 -06001204 return;
1205 if (refcount_dec_and_test(&req->work.identity->count))
1206 kfree(req->work.identity);
1207}
1208
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001209static void io_req_clean_work(struct io_kiocb *req)
Jens Axboecccf0ee2020-01-27 16:34:48 -07001210{
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001211 if (!(req->flags & REQ_F_WORK_INITIALIZED))
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001212 return;
Jens Axboe51a4cc12020-08-10 10:55:56 -06001213
1214 req->flags &= ~REQ_F_WORK_INITIALIZED;
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001215
Jens Axboedfead8a2020-10-14 10:12:37 -06001216 if (req->work.flags & IO_WQ_WORK_MM) {
Jens Axboe98447d62020-10-14 10:48:51 -06001217 mmdrop(req->work.identity->mm);
Jens Axboedfead8a2020-10-14 10:12:37 -06001218 req->work.flags &= ~IO_WQ_WORK_MM;
Jens Axboecccf0ee2020-01-27 16:34:48 -07001219 }
Dennis Zhou91d8f512020-09-16 13:41:05 -07001220#ifdef CONFIG_BLK_CGROUP
Jens Axboedfead8a2020-10-14 10:12:37 -06001221 if (req->work.flags & IO_WQ_WORK_BLKCG) {
Jens Axboe98447d62020-10-14 10:48:51 -06001222 css_put(req->work.identity->blkcg_css);
Jens Axboedfead8a2020-10-14 10:12:37 -06001223 req->work.flags &= ~IO_WQ_WORK_BLKCG;
Jens Axboecccf0ee2020-01-27 16:34:48 -07001224 }
Jens Axboedfead8a2020-10-14 10:12:37 -06001225#endif
1226 if (req->work.flags & IO_WQ_WORK_CREDS) {
Jens Axboe98447d62020-10-14 10:48:51 -06001227 put_cred(req->work.identity->creds);
Jens Axboedfead8a2020-10-14 10:12:37 -06001228 req->work.flags &= ~IO_WQ_WORK_CREDS;
1229 }
1230 if (req->work.flags & IO_WQ_WORK_FS) {
Jens Axboe98447d62020-10-14 10:48:51 -06001231 struct fs_struct *fs = req->work.identity->fs;
Jens Axboeff002b32020-02-07 16:05:21 -07001232
Jens Axboe98447d62020-10-14 10:48:51 -06001233 spin_lock(&req->work.identity->fs->lock);
Jens Axboeff002b32020-02-07 16:05:21 -07001234 if (--fs->users)
1235 fs = NULL;
Jens Axboe98447d62020-10-14 10:48:51 -06001236 spin_unlock(&req->work.identity->fs->lock);
Jens Axboeff002b32020-02-07 16:05:21 -07001237 if (fs)
1238 free_fs_struct(fs);
Jens Axboedfead8a2020-10-14 10:12:37 -06001239 req->work.flags &= ~IO_WQ_WORK_FS;
Jens Axboeff002b32020-02-07 16:05:21 -07001240 }
Jens Axboe1e6fa522020-10-15 08:46:24 -06001241
Jens Axboe5c3462c2020-10-15 09:02:33 -06001242 io_put_identity(req->task->io_uring, req);
Jens Axboe1e6fa522020-10-15 08:46:24 -06001243}
1244
1245/*
1246 * Create a private copy of io_identity, since some fields don't match
1247 * the current context.
1248 */
1249static bool io_identity_cow(struct io_kiocb *req)
1250{
Jens Axboe5c3462c2020-10-15 09:02:33 -06001251 struct io_uring_task *tctx = current->io_uring;
Jens Axboe1e6fa522020-10-15 08:46:24 -06001252 const struct cred *creds = NULL;
1253 struct io_identity *id;
1254
1255 if (req->work.flags & IO_WQ_WORK_CREDS)
1256 creds = req->work.identity->creds;
1257
1258 id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL);
1259 if (unlikely(!id)) {
1260 req->work.flags |= IO_WQ_WORK_CANCEL;
1261 return false;
1262 }
1263
1264 /*
1265 * We can safely just re-init the creds we copied Either the field
1266 * matches the current one, or we haven't grabbed it yet. The only
1267 * exception is ->creds, through registered personalities, so handle
1268 * that one separately.
1269 */
1270 io_init_identity(id);
1271 if (creds)
1272 req->work.identity->creds = creds;
1273
1274 /* add one for this request */
1275 refcount_inc(&id->count);
1276
1277 /* drop old identity, assign new one. one ref for req, one for tctx */
Jens Axboe500a3732020-10-15 17:38:03 -06001278 if (req->work.identity != tctx->identity &&
Jens Axboe1e6fa522020-10-15 08:46:24 -06001279 refcount_sub_and_test(2, &req->work.identity->count))
1280 kfree(req->work.identity);
1281
1282 req->work.identity = id;
Jens Axboe500a3732020-10-15 17:38:03 -06001283 tctx->identity = id;
Jens Axboe1e6fa522020-10-15 08:46:24 -06001284 return true;
1285}
1286
1287static bool io_grab_identity(struct io_kiocb *req)
1288{
1289 const struct io_op_def *def = &io_op_defs[req->opcode];
Jens Axboe5c3462c2020-10-15 09:02:33 -06001290 struct io_identity *id = req->work.identity;
Jens Axboe1e6fa522020-10-15 08:46:24 -06001291 struct io_ring_ctx *ctx = req->ctx;
1292
1293 if (def->needs_fsize && id->fsize != rlimit(RLIMIT_FSIZE))
1294 return false;
1295
1296 if (!(req->work.flags & IO_WQ_WORK_FILES) &&
1297 (def->work_flags & IO_WQ_WORK_FILES) &&
1298 !(req->flags & REQ_F_NO_FILE_TABLE)) {
1299 if (id->files != current->files ||
1300 id->nsproxy != current->nsproxy)
1301 return false;
1302 atomic_inc(&id->files->count);
1303 get_nsproxy(id->nsproxy);
1304 req->flags |= REQ_F_INFLIGHT;
1305
1306 spin_lock_irq(&ctx->inflight_lock);
1307 list_add(&req->inflight_entry, &ctx->inflight_list);
1308 spin_unlock_irq(&ctx->inflight_lock);
1309 req->work.flags |= IO_WQ_WORK_FILES;
1310 }
1311#ifdef CONFIG_BLK_CGROUP
1312 if (!(req->work.flags & IO_WQ_WORK_BLKCG) &&
1313 (def->work_flags & IO_WQ_WORK_BLKCG)) {
1314 rcu_read_lock();
1315 if (id->blkcg_css != blkcg_css()) {
1316 rcu_read_unlock();
1317 return false;
1318 }
1319 /*
1320 * This should be rare, either the cgroup is dying or the task
1321 * is moving cgroups. Just punt to root for the handful of ios.
1322 */
1323 if (css_tryget_online(id->blkcg_css))
1324 req->work.flags |= IO_WQ_WORK_BLKCG;
1325 rcu_read_unlock();
1326 }
1327#endif
1328 if (!(req->work.flags & IO_WQ_WORK_CREDS)) {
1329 if (id->creds != current_cred())
1330 return false;
1331 get_cred(id->creds);
1332 req->work.flags |= IO_WQ_WORK_CREDS;
1333 }
Jens Axboe4ea33a92020-10-15 13:46:44 -06001334#ifdef CONFIG_AUDIT
1335 if (!uid_eq(current->loginuid, id->loginuid) ||
1336 current->sessionid != id->sessionid)
1337 return false;
1338#endif
Jens Axboe1e6fa522020-10-15 08:46:24 -06001339 if (!(req->work.flags & IO_WQ_WORK_FS) &&
1340 (def->work_flags & IO_WQ_WORK_FS)) {
1341 if (current->fs != id->fs)
1342 return false;
1343 spin_lock(&id->fs->lock);
1344 if (!id->fs->in_exec) {
1345 id->fs->users++;
1346 req->work.flags |= IO_WQ_WORK_FS;
1347 } else {
1348 req->work.flags |= IO_WQ_WORK_CANCEL;
1349 }
1350 spin_unlock(&current->fs->lock);
1351 }
1352
1353 return true;
Jens Axboe561fb042019-10-24 07:25:42 -06001354}
1355
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001356static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001357{
Jens Axboed3656342019-12-18 09:50:26 -07001358 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001359 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5c3462c2020-10-15 09:02:33 -06001360 struct io_identity *id;
Jens Axboe54a91f32019-09-10 09:15:04 -06001361
Pavel Begunkov16d59802020-07-12 16:16:47 +03001362 io_req_init_async(req);
Jens Axboe5c3462c2020-10-15 09:02:33 -06001363 id = req->work.identity;
Pavel Begunkov16d59802020-07-12 16:16:47 +03001364
Jens Axboed3656342019-12-18 09:50:26 -07001365 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001366 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001367 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboed3656342019-12-18 09:50:26 -07001368 } else {
1369 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001370 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001371 }
Pavel Begunkov23329512020-10-10 18:34:06 +01001372
Jens Axboe1e6fa522020-10-15 08:46:24 -06001373 /* ->mm can never change on us */
Jens Axboedfead8a2020-10-14 10:12:37 -06001374 if (!(req->work.flags & IO_WQ_WORK_MM) &&
1375 (def->work_flags & IO_WQ_WORK_MM)) {
Jens Axboe1e6fa522020-10-15 08:46:24 -06001376 mmgrab(id->mm);
Jens Axboedfead8a2020-10-14 10:12:37 -06001377 req->work.flags |= IO_WQ_WORK_MM;
Pavel Begunkovdca9cf82020-07-15 12:46:49 +03001378 }
Jens Axboe1e6fa522020-10-15 08:46:24 -06001379
1380 /* if we fail grabbing identity, we must COW, regrab, and retry */
1381 if (io_grab_identity(req))
1382 return;
1383
1384 if (!io_identity_cow(req))
1385 return;
1386
1387 /* can't fail at this point */
1388 if (!io_grab_identity(req))
1389 WARN_ON(1);
Jens Axboe561fb042019-10-24 07:25:42 -06001390}
1391
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001392static void io_prep_async_link(struct io_kiocb *req)
1393{
1394 struct io_kiocb *cur;
1395
1396 io_prep_async_work(req);
1397 if (req->flags & REQ_F_LINK_HEAD)
1398 list_for_each_entry(cur, &req->link_list, link_list)
1399 io_prep_async_work(cur);
1400}
1401
Jens Axboe7271ef32020-08-10 09:55:22 -06001402static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001403{
Jackie Liua197f662019-11-08 08:09:12 -07001404 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001405 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe561fb042019-10-24 07:25:42 -06001406
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001407 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1408 &req->work, req->flags);
1409 io_wq_enqueue(ctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001410 return link;
Jens Axboe18d9be12019-09-10 09:13:05 -06001411}
1412
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001413static void io_queue_async_work(struct io_kiocb *req)
1414{
Jens Axboe7271ef32020-08-10 09:55:22 -06001415 struct io_kiocb *link;
1416
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001417 /* init ->work of the whole link before punting */
1418 io_prep_async_link(req);
Jens Axboe7271ef32020-08-10 09:55:22 -06001419 link = __io_queue_async_work(req);
1420
1421 if (link)
1422 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001423}
1424
Jens Axboe5262f562019-09-17 12:26:57 -06001425static void io_kill_timeout(struct io_kiocb *req)
1426{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001427 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001428 int ret;
1429
Jens Axboee8c2bc12020-08-15 18:44:09 -07001430 ret = hrtimer_try_to_cancel(&io->timer);
Jens Axboe5262f562019-09-17 12:26:57 -06001431 if (ret != -1) {
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001432 atomic_set(&req->ctx->cq_timeouts,
1433 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001434 list_del_init(&req->timeout.list);
Jens Axboe78e19bb2019-11-06 15:21:34 -07001435 io_cqring_fill_event(req, 0);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001436 io_put_req_deferred(req, 1);
Jens Axboe5262f562019-09-17 12:26:57 -06001437 }
1438}
1439
Jens Axboef3606e32020-09-22 08:18:24 -06001440static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk)
1441{
1442 struct io_ring_ctx *ctx = req->ctx;
1443
1444 if (!tsk || req->task == tsk)
1445 return true;
Jens Axboe534ca6d2020-09-02 13:52:19 -06001446 if (ctx->flags & IORING_SETUP_SQPOLL) {
1447 if (ctx->sq_data && req->task == ctx->sq_data->thread)
1448 return true;
1449 }
Jens Axboef3606e32020-09-22 08:18:24 -06001450 return false;
1451}
1452
Jens Axboe76e1b642020-09-26 15:05:03 -06001453/*
1454 * Returns true if we found and killed one or more timeouts
1455 */
1456static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk)
Jens Axboe5262f562019-09-17 12:26:57 -06001457{
1458 struct io_kiocb *req, *tmp;
Jens Axboe76e1b642020-09-26 15:05:03 -06001459 int canceled = 0;
Jens Axboe5262f562019-09-17 12:26:57 -06001460
1461 spin_lock_irq(&ctx->completion_lock);
Jens Axboef3606e32020-09-22 08:18:24 -06001462 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
Jens Axboe76e1b642020-09-26 15:05:03 -06001463 if (io_task_match(req, tsk)) {
Jens Axboef3606e32020-09-22 08:18:24 -06001464 io_kill_timeout(req);
Jens Axboe76e1b642020-09-26 15:05:03 -06001465 canceled++;
1466 }
Jens Axboef3606e32020-09-22 08:18:24 -06001467 }
Jens Axboe5262f562019-09-17 12:26:57 -06001468 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe76e1b642020-09-26 15:05:03 -06001469 return canceled != 0;
Jens Axboe5262f562019-09-17 12:26:57 -06001470}
1471
Pavel Begunkov04518942020-05-26 20:34:05 +03001472static void __io_queue_deferred(struct io_ring_ctx *ctx)
1473{
1474 do {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001475 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1476 struct io_defer_entry, list);
Jens Axboe7271ef32020-08-10 09:55:22 -06001477 struct io_kiocb *link;
Pavel Begunkov04518942020-05-26 20:34:05 +03001478
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001479 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001480 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001481 list_del_init(&de->list);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001482 /* punt-init is done before queueing for defer */
Jens Axboe7271ef32020-08-10 09:55:22 -06001483 link = __io_queue_async_work(de->req);
1484 if (link) {
1485 __io_queue_linked_timeout(link);
1486 /* drop submission reference */
Pavel Begunkov216578e2020-10-13 09:44:00 +01001487 io_put_req_deferred(link, 1);
Jens Axboe7271ef32020-08-10 09:55:22 -06001488 }
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001489 kfree(de);
Pavel Begunkov04518942020-05-26 20:34:05 +03001490 } while (!list_empty(&ctx->defer_list));
1491}
1492
Pavel Begunkov360428f2020-05-30 14:54:17 +03001493static void io_flush_timeouts(struct io_ring_ctx *ctx)
1494{
1495 while (!list_empty(&ctx->timeout_list)) {
1496 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001497 struct io_kiocb, timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001498
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001499 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001500 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001501 if (req->timeout.target_seq != ctx->cached_cq_tail
1502 - atomic_read(&ctx->cq_timeouts))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001503 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001504
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001505 list_del_init(&req->timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001506 io_kill_timeout(req);
1507 }
1508}
1509
Jens Axboede0617e2019-04-06 21:51:27 -06001510static void io_commit_cqring(struct io_ring_ctx *ctx)
1511{
Pavel Begunkov360428f2020-05-30 14:54:17 +03001512 io_flush_timeouts(ctx);
Jens Axboede0617e2019-04-06 21:51:27 -06001513 __io_commit_cqring(ctx);
1514
Pavel Begunkov04518942020-05-26 20:34:05 +03001515 if (unlikely(!list_empty(&ctx->defer_list)))
1516 __io_queue_deferred(ctx);
Jens Axboede0617e2019-04-06 21:51:27 -06001517}
1518
Jens Axboe90554202020-09-03 12:12:41 -06001519static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1520{
1521 struct io_rings *r = ctx->rings;
1522
1523 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
1524}
1525
Jens Axboe2b188cc2019-01-07 10:46:33 -07001526static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1527{
Hristo Venev75b28af2019-08-26 17:23:46 +00001528 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001529 unsigned tail;
1530
1531 tail = ctx->cached_cq_tail;
Stefan Bühler115e12e2019-04-24 23:54:18 +02001532 /*
1533 * writes to the cq entry need to come after reading head; the
1534 * control dependency is enough as we're using WRITE_ONCE to
1535 * fill the cq entry
1536 */
Hristo Venev75b28af2019-08-26 17:23:46 +00001537 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001538 return NULL;
1539
1540 ctx->cached_cq_tail++;
Hristo Venev75b28af2019-08-26 17:23:46 +00001541 return &rings->cqes[tail & ctx->cq_mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001542}
1543
Jens Axboef2842ab2020-01-08 11:04:00 -07001544static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1545{
Jens Axboef0b493e2020-02-01 21:30:11 -07001546 if (!ctx->cq_ev_fd)
1547 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001548 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1549 return false;
Jens Axboef2842ab2020-01-08 11:04:00 -07001550 if (!ctx->eventfd_async)
1551 return true;
Jens Axboeb41e9852020-02-17 09:52:41 -07001552 return io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001553}
1554
Jens Axboeb41e9852020-02-17 09:52:41 -07001555static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001556{
1557 if (waitqueue_active(&ctx->wait))
1558 wake_up(&ctx->wait);
Jens Axboe534ca6d2020-09-02 13:52:19 -06001559 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1560 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001561 if (io_should_trigger_evfd(ctx))
Jens Axboe9b402842019-04-11 11:45:41 -06001562 eventfd_signal(ctx->cq_ev_fd, 1);
Jens Axboe8c838782019-03-12 15:48:16 -06001563}
1564
Pavel Begunkov46930142020-07-30 18:43:49 +03001565static void io_cqring_mark_overflow(struct io_ring_ctx *ctx)
1566{
1567 if (list_empty(&ctx->cq_overflow_list)) {
1568 clear_bit(0, &ctx->sq_check_overflow);
1569 clear_bit(0, &ctx->cq_check_overflow);
1570 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1571 }
1572}
1573
Jens Axboee6c8aa92020-09-28 13:10:13 -06001574static inline bool io_match_files(struct io_kiocb *req,
1575 struct files_struct *files)
1576{
1577 if (!files)
1578 return true;
Jens Axboedfead8a2020-10-14 10:12:37 -06001579 if ((req->flags & REQ_F_WORK_INITIALIZED) &&
1580 (req->work.flags & IO_WQ_WORK_FILES))
Jens Axboe98447d62020-10-14 10:48:51 -06001581 return req->work.identity->files == files;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001582 return false;
1583}
1584
Jens Axboec4a2ed72019-11-21 21:01:26 -07001585/* Returns true if there are no backlogged entries after the flush */
Jens Axboee6c8aa92020-09-28 13:10:13 -06001586static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1587 struct task_struct *tsk,
1588 struct files_struct *files)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001589{
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001590 struct io_rings *rings = ctx->rings;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001591 struct io_kiocb *req, *tmp;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001592 struct io_uring_cqe *cqe;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001593 unsigned long flags;
1594 LIST_HEAD(list);
1595
1596 if (!force) {
1597 if (list_empty_careful(&ctx->cq_overflow_list))
Jens Axboec4a2ed72019-11-21 21:01:26 -07001598 return true;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001599 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1600 rings->cq_ring_entries))
Jens Axboec4a2ed72019-11-21 21:01:26 -07001601 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001602 }
1603
1604 spin_lock_irqsave(&ctx->completion_lock, flags);
1605
1606 /* if force is set, the ring is going away. always drop after that */
1607 if (force)
Jens Axboe69b3e542020-01-08 11:01:46 -07001608 ctx->cq_overflow_flushed = 1;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001609
Jens Axboec4a2ed72019-11-21 21:01:26 -07001610 cqe = NULL;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001611 list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
1612 if (tsk && req->task != tsk)
1613 continue;
1614 if (!io_match_files(req, files))
1615 continue;
1616
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001617 cqe = io_get_cqring(ctx);
1618 if (!cqe && !force)
1619 break;
1620
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001621 list_move(&req->compl.list, &list);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001622 if (cqe) {
1623 WRITE_ONCE(cqe->user_data, req->user_data);
1624 WRITE_ONCE(cqe->res, req->result);
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001625 WRITE_ONCE(cqe->flags, req->compl.cflags);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001626 } else {
1627 WRITE_ONCE(ctx->rings->cq_overflow,
1628 atomic_inc_return(&ctx->cached_cq_overflow));
1629 }
1630 }
1631
1632 io_commit_cqring(ctx);
Pavel Begunkov46930142020-07-30 18:43:49 +03001633 io_cqring_mark_overflow(ctx);
1634
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001635 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1636 io_cqring_ev_posted(ctx);
1637
1638 while (!list_empty(&list)) {
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001639 req = list_first_entry(&list, struct io_kiocb, compl.list);
1640 list_del(&req->compl.list);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001641 io_put_req(req);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001642 }
Jens Axboec4a2ed72019-11-21 21:01:26 -07001643
1644 return cqe != NULL;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001645}
1646
Jens Axboebcda7ba2020-02-23 16:42:51 -07001647static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001648{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001649 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001650 struct io_uring_cqe *cqe;
1651
Jens Axboe78e19bb2019-11-06 15:21:34 -07001652 trace_io_uring_complete(ctx, req->user_data, res);
Jens Axboe51c3ff62019-11-03 06:52:50 -07001653
Jens Axboe2b188cc2019-01-07 10:46:33 -07001654 /*
1655 * If we can't get a cq entry, userspace overflowed the
1656 * submission (by quite a lot). Increment the overflow count in
1657 * the ring.
1658 */
1659 cqe = io_get_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001660 if (likely(cqe)) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07001661 WRITE_ONCE(cqe->user_data, req->user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001662 WRITE_ONCE(cqe->res, res);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001663 WRITE_ONCE(cqe->flags, cflags);
Jens Axboe0f212202020-09-13 13:09:39 -06001664 } else if (ctx->cq_overflow_flushed || req->task->io_uring->in_idle) {
1665 /*
1666 * If we're in ring overflow flush mode, or in task cancel mode,
1667 * then we cannot store the request for later flushing, we need
1668 * to drop it on the floor.
1669 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07001670 WRITE_ONCE(ctx->rings->cq_overflow,
1671 atomic_inc_return(&ctx->cached_cq_overflow));
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001672 } else {
Jens Axboead3eb2c2019-12-18 17:12:20 -07001673 if (list_empty(&ctx->cq_overflow_list)) {
1674 set_bit(0, &ctx->sq_check_overflow);
1675 set_bit(0, &ctx->cq_check_overflow);
Xiaoguang Wang6d5f9042020-07-09 09:15:29 +08001676 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
Jens Axboead3eb2c2019-12-18 17:12:20 -07001677 }
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001678 io_clean_op(req);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001679 req->result = res;
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001680 req->compl.cflags = cflags;
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001681 refcount_inc(&req->refs);
1682 list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001683 }
1684}
1685
Jens Axboebcda7ba2020-02-23 16:42:51 -07001686static void io_cqring_fill_event(struct io_kiocb *req, long res)
1687{
1688 __io_cqring_fill_event(req, res, 0);
1689}
1690
Jens Axboee1e16092020-06-22 09:17:17 -06001691static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001692{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001693 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001694 unsigned long flags;
1695
1696 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001697 __io_cqring_fill_event(req, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001698 io_commit_cqring(ctx);
1699 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1700
Jens Axboe8c838782019-03-12 15:48:16 -06001701 io_cqring_ev_posted(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001702}
1703
Jens Axboe229a7b62020-06-22 10:13:11 -06001704static void io_submit_flush_completions(struct io_comp_state *cs)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001705{
Jens Axboe229a7b62020-06-22 10:13:11 -06001706 struct io_ring_ctx *ctx = cs->ctx;
1707
1708 spin_lock_irq(&ctx->completion_lock);
1709 while (!list_empty(&cs->list)) {
1710 struct io_kiocb *req;
1711
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001712 req = list_first_entry(&cs->list, struct io_kiocb, compl.list);
1713 list_del(&req->compl.list);
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001714 __io_cqring_fill_event(req, req->result, req->compl.cflags);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001715
1716 /*
1717 * io_free_req() doesn't care about completion_lock unless one
1718 * of these flags is set. REQ_F_WORK_INITIALIZED is in the list
1719 * because of a potential deadlock with req->work.fs->lock
1720 */
1721 if (req->flags & (REQ_F_FAIL_LINK|REQ_F_LINK_TIMEOUT
1722 |REQ_F_WORK_INITIALIZED)) {
Jens Axboe229a7b62020-06-22 10:13:11 -06001723 spin_unlock_irq(&ctx->completion_lock);
1724 io_put_req(req);
1725 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001726 } else {
1727 io_put_req(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06001728 }
1729 }
1730 io_commit_cqring(ctx);
1731 spin_unlock_irq(&ctx->completion_lock);
1732
1733 io_cqring_ev_posted(ctx);
1734 cs->nr = 0;
1735}
1736
1737static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags,
1738 struct io_comp_state *cs)
1739{
1740 if (!cs) {
1741 io_cqring_add_event(req, res, cflags);
1742 io_put_req(req);
1743 } else {
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001744 io_clean_op(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06001745 req->result = res;
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001746 req->compl.cflags = cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001747 list_add_tail(&req->compl.list, &cs->list);
Jens Axboe229a7b62020-06-22 10:13:11 -06001748 if (++cs->nr >= 32)
1749 io_submit_flush_completions(cs);
1750 }
Jens Axboee1e16092020-06-22 09:17:17 -06001751}
1752
1753static void io_req_complete(struct io_kiocb *req, long res)
1754{
Jens Axboe229a7b62020-06-22 10:13:11 -06001755 __io_req_complete(req, res, 0, NULL);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001756}
1757
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001758static inline bool io_is_fallback_req(struct io_kiocb *req)
1759{
1760 return req == (struct io_kiocb *)
1761 ((unsigned long) req->ctx->fallback_req & ~1UL);
1762}
1763
1764static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1765{
1766 struct io_kiocb *req;
1767
1768 req = ctx->fallback_req;
Bijan Mottahedehdd461af2020-04-29 17:47:50 -07001769 if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001770 return req;
1771
1772 return NULL;
1773}
1774
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03001775static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
1776 struct io_submit_state *state)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001777{
Pavel Begunkovf6b6c7d2020-06-21 13:09:53 +03001778 if (!state->free_reqs) {
Pavel Begunkov291b2822020-09-30 22:57:01 +03001779 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2579f912019-01-09 09:10:43 -07001780 size_t sz;
1781 int ret;
1782
1783 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
Jens Axboefd6fab22019-03-14 16:30:06 -06001784 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1785
1786 /*
1787 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1788 * retry single alloc to be on the safe side.
1789 */
1790 if (unlikely(ret <= 0)) {
1791 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1792 if (!state->reqs[0])
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001793 goto fallback;
Jens Axboefd6fab22019-03-14 16:30:06 -06001794 ret = 1;
1795 }
Pavel Begunkov291b2822020-09-30 22:57:01 +03001796 state->free_reqs = ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001797 }
1798
Pavel Begunkov291b2822020-09-30 22:57:01 +03001799 state->free_reqs--;
1800 return state->reqs[state->free_reqs];
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001801fallback:
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03001802 return io_get_fallback_req(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001803}
1804
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001805static inline void io_put_file(struct io_kiocb *req, struct file *file,
1806 bool fixed)
1807{
1808 if (fixed)
Xiaoguang Wang05589552020-03-31 14:05:18 +08001809 percpu_ref_put(req->fixed_file_refs);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001810 else
1811 fput(file);
1812}
1813
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001814static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001815{
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001816 io_clean_op(req);
Pavel Begunkov929a3af2020-02-19 00:19:09 +03001817
Jens Axboee8c2bc12020-08-15 18:44:09 -07001818 if (req->async_data)
1819 kfree(req->async_data);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001820 if (req->file)
1821 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
Jens Axboefcb323c2019-10-24 12:39:47 -06001822
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001823 io_req_clean_work(req);
Pavel Begunkove6543a82020-06-28 12:52:30 +03001824}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001825
Pavel Begunkov216578e2020-10-13 09:44:00 +01001826static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03001827{
Jens Axboe1e6fa522020-10-15 08:46:24 -06001828 struct io_uring_task *tctx = req->task->io_uring;
1829 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov216578e2020-10-13 09:44:00 +01001830
1831 io_dismantle_req(req);
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001832
Jens Axboed8a6df12020-10-15 16:24:45 -06001833 percpu_counter_dec(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06001834 if (tctx->in_idle)
1835 wake_up(&tctx->wait);
Jens Axboee3bc8e92020-09-24 08:45:57 -06001836 put_task_struct(req->task);
1837
Pavel Begunkovb1e50e52020-04-08 08:58:44 +03001838 if (likely(!io_is_fallback_req(req)))
1839 kmem_cache_free(req_cachep, req);
1840 else
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001841 clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req);
1842 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06001843}
1844
Jackie Liua197f662019-11-08 08:09:12 -07001845static bool io_link_cancel_timeout(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001846{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001847 struct io_timeout_data *io = req->async_data;
Jackie Liua197f662019-11-08 08:09:12 -07001848 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07001849 int ret;
1850
Jens Axboee8c2bc12020-08-15 18:44:09 -07001851 ret = hrtimer_try_to_cancel(&io->timer);
Jens Axboe2665abf2019-11-05 12:40:47 -07001852 if (ret != -1) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07001853 io_cqring_fill_event(req, -ECANCELED);
Jens Axboe2665abf2019-11-05 12:40:47 -07001854 io_commit_cqring(ctx);
Pavel Begunkovdea3b492020-04-12 02:05:04 +03001855 req->flags &= ~REQ_F_LINK_HEAD;
Pavel Begunkov216578e2020-10-13 09:44:00 +01001856 io_put_req_deferred(req, 1);
Jens Axboe2665abf2019-11-05 12:40:47 -07001857 return true;
1858 }
1859
1860 return false;
1861}
1862
Jens Axboeab0b6452020-06-30 08:43:15 -06001863static bool __io_kill_linked_timeout(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001864{
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001865 struct io_kiocb *link;
Jens Axboeab0b6452020-06-30 08:43:15 -06001866 bool wake_ev;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001867
1868 if (list_empty(&req->link_list))
Jens Axboeab0b6452020-06-30 08:43:15 -06001869 return false;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001870 link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
1871 if (link->opcode != IORING_OP_LINK_TIMEOUT)
Jens Axboeab0b6452020-06-30 08:43:15 -06001872 return false;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001873
1874 list_del_init(&link->link_list);
1875 wake_ev = io_link_cancel_timeout(link);
1876 req->flags &= ~REQ_F_LINK_TIMEOUT;
Jens Axboeab0b6452020-06-30 08:43:15 -06001877 return wake_ev;
1878}
1879
1880static void io_kill_linked_timeout(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001881{
Jens Axboe2665abf2019-11-05 12:40:47 -07001882 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov216578e2020-10-13 09:44:00 +01001883 unsigned long flags;
Jens Axboeab0b6452020-06-30 08:43:15 -06001884 bool wake_ev;
Jens Axboe9e645e112019-05-10 16:07:28 -06001885
Pavel Begunkov216578e2020-10-13 09:44:00 +01001886 spin_lock_irqsave(&ctx->completion_lock, flags);
1887 wake_ev = __io_kill_linked_timeout(req);
1888 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Jens Axboeab0b6452020-06-30 08:43:15 -06001889
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001890 if (wake_ev)
1891 io_cqring_ev_posted(ctx);
1892}
1893
Pavel Begunkov9b5f7bd92020-06-29 13:13:00 +03001894static struct io_kiocb *io_req_link_next(struct io_kiocb *req)
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001895{
1896 struct io_kiocb *nxt;
Jens Axboe4d7dd462019-11-20 13:03:52 -07001897
Jens Axboe9e645e112019-05-10 16:07:28 -06001898 /*
1899 * The list should never be empty when we are called here. But could
1900 * potentially happen if the chain is messed up, check to be on the
1901 * safe side.
1902 */
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001903 if (unlikely(list_empty(&req->link_list)))
Pavel Begunkov9b5f7bd92020-06-29 13:13:00 +03001904 return NULL;
Jens Axboe94ae5e72019-11-14 19:39:52 -07001905
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001906 nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list);
1907 list_del_init(&req->link_list);
1908 if (!list_empty(&nxt->link_list))
1909 nxt->flags |= REQ_F_LINK_HEAD;
Pavel Begunkov9b5f7bd92020-06-29 13:13:00 +03001910 return nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06001911}
1912
1913/*
Pavel Begunkovdea3b492020-04-12 02:05:04 +03001914 * Called if REQ_F_LINK_HEAD is set, and we fail the head request
Jens Axboe9e645e112019-05-10 16:07:28 -06001915 */
Pavel Begunkovd148ca42020-10-18 10:17:39 +01001916static void io_fail_links(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001917{
Jens Axboe2665abf2019-11-05 12:40:47 -07001918 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovd148ca42020-10-18 10:17:39 +01001919 unsigned long flags;
Jens Axboe9e645e112019-05-10 16:07:28 -06001920
Pavel Begunkovd148ca42020-10-18 10:17:39 +01001921 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboe9e645e112019-05-10 16:07:28 -06001922 while (!list_empty(&req->link_list)) {
Pavel Begunkov44932332019-12-05 16:16:35 +03001923 struct io_kiocb *link = list_first_entry(&req->link_list,
1924 struct io_kiocb, link_list);
Jens Axboe9e645e112019-05-10 16:07:28 -06001925
Pavel Begunkov44932332019-12-05 16:16:35 +03001926 list_del_init(&link->link_list);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02001927 trace_io_uring_fail_link(req, link);
Jens Axboe2665abf2019-11-05 12:40:47 -07001928
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001929 io_cqring_fill_event(link, -ECANCELED);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001930
1931 /*
1932 * It's ok to free under spinlock as they're not linked anymore,
1933 * but avoid REQ_F_WORK_INITIALIZED because it may deadlock on
1934 * work.fs->lock.
1935 */
1936 if (link->flags & REQ_F_WORK_INITIALIZED)
1937 io_put_req_deferred(link, 2);
1938 else
1939 io_double_put_req(link);
Jens Axboe9e645e112019-05-10 16:07:28 -06001940 }
Jens Axboe2665abf2019-11-05 12:40:47 -07001941
1942 io_commit_cqring(ctx);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001943 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001944
Jens Axboe2665abf2019-11-05 12:40:47 -07001945 io_cqring_ev_posted(ctx);
Jens Axboe9e645e112019-05-10 16:07:28 -06001946}
1947
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001948static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001949{
Pavel Begunkov9b0d9112020-06-28 12:52:34 +03001950 req->flags &= ~REQ_F_LINK_HEAD;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001951 if (req->flags & REQ_F_LINK_TIMEOUT)
1952 io_kill_linked_timeout(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07001953
Jens Axboe9e645e112019-05-10 16:07:28 -06001954 /*
1955 * If LINK is set, we have dependent requests in this chain. If we
1956 * didn't fail this request, queue the first one up, moving any other
1957 * dependencies to the next request. In case of failure, fail the rest
1958 * of the chain.
1959 */
Pavel Begunkov9b5f7bd92020-06-29 13:13:00 +03001960 if (likely(!(req->flags & REQ_F_FAIL_LINK)))
1961 return io_req_link_next(req);
1962 io_fail_links(req);
1963 return NULL;
Jens Axboe4d7dd462019-11-20 13:03:52 -07001964}
Jens Axboe2665abf2019-11-05 12:40:47 -07001965
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001966static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
1967{
1968 if (likely(!(req->flags & REQ_F_LINK_HEAD)))
1969 return NULL;
1970 return __io_req_find_next(req);
1971}
1972
Jens Axboe87c43112020-09-30 21:00:14 -06001973static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
Jens Axboec2c4c832020-07-01 15:37:11 -06001974{
1975 struct task_struct *tsk = req->task;
1976 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe0ba9c9e2020-08-06 19:41:50 -06001977 int ret, notify;
Jens Axboec2c4c832020-07-01 15:37:11 -06001978
Jens Axboe6200b0a2020-09-13 14:38:30 -06001979 if (tsk->flags & PF_EXITING)
1980 return -ESRCH;
1981
Jens Axboec2c4c832020-07-01 15:37:11 -06001982 /*
Jens Axboe0ba9c9e2020-08-06 19:41:50 -06001983 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
1984 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
1985 * processing task_work. There's no reliable way to tell if TWA_RESUME
1986 * will do the job.
Jens Axboec2c4c832020-07-01 15:37:11 -06001987 */
Jens Axboe0ba9c9e2020-08-06 19:41:50 -06001988 notify = 0;
Jens Axboefd7d6de2020-08-23 11:00:37 -06001989 if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
Jens Axboec2c4c832020-07-01 15:37:11 -06001990 notify = TWA_SIGNAL;
1991
Jens Axboe87c43112020-09-30 21:00:14 -06001992 ret = task_work_add(tsk, &req->task_work, notify);
Jens Axboec2c4c832020-07-01 15:37:11 -06001993 if (!ret)
1994 wake_up_process(tsk);
Jens Axboe0ba9c9e2020-08-06 19:41:50 -06001995
Jens Axboec2c4c832020-07-01 15:37:11 -06001996 return ret;
1997}
1998
Jens Axboec40f6372020-06-25 15:39:59 -06001999static void __io_req_task_cancel(struct io_kiocb *req, int error)
2000{
2001 struct io_ring_ctx *ctx = req->ctx;
2002
2003 spin_lock_irq(&ctx->completion_lock);
2004 io_cqring_fill_event(req, error);
2005 io_commit_cqring(ctx);
2006 spin_unlock_irq(&ctx->completion_lock);
2007
2008 io_cqring_ev_posted(ctx);
2009 req_set_fail_links(req);
2010 io_double_put_req(req);
2011}
2012
2013static void io_req_task_cancel(struct callback_head *cb)
2014{
2015 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002016 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002017
2018 __io_req_task_cancel(req, -ECANCELED);
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002019 percpu_ref_put(&ctx->refs);
Jens Axboec40f6372020-06-25 15:39:59 -06002020}
2021
2022static void __io_req_task_submit(struct io_kiocb *req)
2023{
2024 struct io_ring_ctx *ctx = req->ctx;
2025
Jens Axboec40f6372020-06-25 15:39:59 -06002026 if (!__io_sq_thread_acquire_mm(ctx)) {
2027 mutex_lock(&ctx->uring_lock);
Pavel Begunkovc1379e22020-09-30 22:57:56 +03002028 __io_queue_sqe(req, NULL);
Jens Axboec40f6372020-06-25 15:39:59 -06002029 mutex_unlock(&ctx->uring_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07002030 } else {
Jens Axboec40f6372020-06-25 15:39:59 -06002031 __io_req_task_cancel(req, -EFAULT);
Jens Axboe2665abf2019-11-05 12:40:47 -07002032 }
Jens Axboe9e645e112019-05-10 16:07:28 -06002033}
2034
Jens Axboec40f6372020-06-25 15:39:59 -06002035static void io_req_task_submit(struct callback_head *cb)
2036{
2037 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe6d816e02020-08-11 08:04:14 -06002038 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002039
2040 __io_req_task_submit(req);
Jens Axboe6d816e02020-08-11 08:04:14 -06002041 percpu_ref_put(&ctx->refs);
Jens Axboec40f6372020-06-25 15:39:59 -06002042}
2043
2044static void io_req_task_queue(struct io_kiocb *req)
2045{
Jens Axboec40f6372020-06-25 15:39:59 -06002046 int ret;
2047
2048 init_task_work(&req->task_work, io_req_task_submit);
Jens Axboe6d816e02020-08-11 08:04:14 -06002049 percpu_ref_get(&req->ctx->refs);
Jens Axboec40f6372020-06-25 15:39:59 -06002050
Jens Axboe87c43112020-09-30 21:00:14 -06002051 ret = io_req_task_work_add(req, true);
Jens Axboec40f6372020-06-25 15:39:59 -06002052 if (unlikely(ret)) {
Jens Axboec2c4c832020-07-01 15:37:11 -06002053 struct task_struct *tsk;
2054
Jens Axboec40f6372020-06-25 15:39:59 -06002055 init_task_work(&req->task_work, io_req_task_cancel);
2056 tsk = io_wq_get_task(req->ctx->io_wq);
Jens Axboec2c4c832020-07-01 15:37:11 -06002057 task_work_add(tsk, &req->task_work, 0);
2058 wake_up_process(tsk);
Jens Axboec40f6372020-06-25 15:39:59 -06002059 }
Jens Axboec40f6372020-06-25 15:39:59 -06002060}
2061
Pavel Begunkovc3524382020-06-28 12:52:32 +03002062static void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002063{
Pavel Begunkov9b5f7bd92020-06-29 13:13:00 +03002064 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002065
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002066 if (nxt)
2067 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002068}
2069
Jens Axboe9e645e112019-05-10 16:07:28 -06002070static void io_free_req(struct io_kiocb *req)
2071{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002072 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002073 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002074}
2075
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002076struct req_batch {
2077 void *reqs[IO_IOPOLL_BATCH];
2078 int to_free;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002079
2080 struct task_struct *task;
2081 int task_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002082};
2083
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002084static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002085{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002086 rb->to_free = 0;
2087 rb->task_refs = 0;
2088 rb->task = NULL;
2089}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002090
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002091static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
2092 struct req_batch *rb)
2093{
2094 kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
2095 percpu_ref_put_many(&ctx->refs, rb->to_free);
2096 rb->to_free = 0;
2097}
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002098
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002099static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2100 struct req_batch *rb)
2101{
2102 if (rb->to_free)
2103 __io_req_free_batch_flush(ctx, rb);
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002104 if (rb->task) {
Jens Axboed8a6df12020-10-15 16:24:45 -06002105 struct io_uring_task *tctx = rb->task->io_uring;
2106
2107 percpu_counter_sub(&tctx->inflight, rb->task_refs);
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002108 put_task_struct_many(rb->task, rb->task_refs);
2109 rb->task = NULL;
2110 }
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002111}
2112
2113static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
2114{
2115 if (unlikely(io_is_fallback_req(req))) {
2116 io_free_req(req);
2117 return;
2118 }
2119 if (req->flags & REQ_F_LINK_HEAD)
2120 io_queue_next(req);
2121
Jens Axboee3bc8e92020-09-24 08:45:57 -06002122 if (req->task != rb->task) {
Jens Axboe0f212202020-09-13 13:09:39 -06002123 if (rb->task) {
Jens Axboed8a6df12020-10-15 16:24:45 -06002124 struct io_uring_task *tctx = rb->task->io_uring;
2125
2126 percpu_counter_sub(&tctx->inflight, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002127 put_task_struct_many(rb->task, rb->task_refs);
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002128 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002129 rb->task = req->task;
2130 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002131 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002132 rb->task_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002133
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01002134 io_dismantle_req(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002135 rb->reqs[rb->to_free++] = req;
2136 if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
2137 __io_req_free_batch_flush(req->ctx, rb);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002138}
2139
Jens Axboeba816ad2019-09-28 11:36:45 -06002140/*
2141 * Drop reference to request, return next in chain (if there is one) if this
2142 * was the last reference to this request.
2143 */
Pavel Begunkov9b5f7bd92020-06-29 13:13:00 +03002144static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002145{
Pavel Begunkov9b5f7bd92020-06-29 13:13:00 +03002146 struct io_kiocb *nxt = NULL;
2147
Jens Axboe2a44f462020-02-25 13:25:41 -07002148 if (refcount_dec_and_test(&req->refs)) {
Pavel Begunkov9b5f7bd92020-06-29 13:13:00 +03002149 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002150 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002151 }
Pavel Begunkov9b5f7bd92020-06-29 13:13:00 +03002152 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002153}
2154
Jens Axboe2b188cc2019-01-07 10:46:33 -07002155static void io_put_req(struct io_kiocb *req)
2156{
Jens Axboedef596e2019-01-09 08:59:42 -07002157 if (refcount_dec_and_test(&req->refs))
2158 io_free_req(req);
2159}
2160
Pavel Begunkov216578e2020-10-13 09:44:00 +01002161static void io_put_req_deferred_cb(struct callback_head *cb)
2162{
2163 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2164
2165 io_free_req(req);
2166}
2167
2168static void io_free_req_deferred(struct io_kiocb *req)
2169{
2170 int ret;
2171
2172 init_task_work(&req->task_work, io_put_req_deferred_cb);
2173 ret = io_req_task_work_add(req, true);
2174 if (unlikely(ret)) {
2175 struct task_struct *tsk;
2176
2177 tsk = io_wq_get_task(req->ctx->io_wq);
2178 task_work_add(tsk, &req->task_work, 0);
2179 wake_up_process(tsk);
2180 }
2181}
2182
2183static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2184{
2185 if (refcount_sub_and_test(refs, &req->refs))
2186 io_free_req_deferred(req);
2187}
2188
Pavel Begunkovf4db7182020-06-25 18:20:54 +03002189static struct io_wq_work *io_steal_work(struct io_kiocb *req)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002190{
Pavel Begunkov6df1db62020-07-03 22:15:06 +03002191 struct io_kiocb *nxt;
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002192
Pavel Begunkovf4db7182020-06-25 18:20:54 +03002193 /*
2194 * A ref is owned by io-wq in which context we're. So, if that's the
2195 * last one, it's safe to steal next work. False negatives are Ok,
2196 * it just will be re-punted async in io_put_work()
2197 */
2198 if (refcount_read(&req->refs) != 1)
2199 return NULL;
2200
Pavel Begunkov9b5f7bd92020-06-29 13:13:00 +03002201 nxt = io_req_find_next(req);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03002202 return nxt ? &nxt->work : NULL;
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002203}
2204
Jens Axboe978db572019-11-14 22:39:04 -07002205static void io_double_put_req(struct io_kiocb *req)
2206{
2207 /* drop both submit and complete references */
2208 if (refcount_sub_and_test(2, &req->refs))
2209 io_free_req(req);
2210}
2211
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07002212static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
Jens Axboea3a0e432019-08-20 11:03:11 -06002213{
Jens Axboe84f97dc2019-11-06 11:27:53 -07002214 struct io_rings *rings = ctx->rings;
2215
Jens Axboead3eb2c2019-12-18 17:12:20 -07002216 if (test_bit(0, &ctx->cq_check_overflow)) {
2217 /*
2218 * noflush == true is from the waitqueue handler, just ensure
2219 * we wake up the task, and the next invocation will flush the
2220 * entries. We cannot safely to it from here.
2221 */
2222 if (noflush && !list_empty(&ctx->cq_overflow_list))
2223 return -1U;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07002224
Jens Axboee6c8aa92020-09-28 13:10:13 -06002225 io_cqring_overflow_flush(ctx, false, NULL, NULL);
Jens Axboead3eb2c2019-12-18 17:12:20 -07002226 }
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07002227
Jens Axboea3a0e432019-08-20 11:03:11 -06002228 /* See comment at the top of this file */
2229 smp_rmb();
Jens Axboead3eb2c2019-12-18 17:12:20 -07002230 return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
Jens Axboea3a0e432019-08-20 11:03:11 -06002231}
2232
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002233static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2234{
2235 struct io_rings *rings = ctx->rings;
2236
2237 /* make sure SQ entry isn't read before tail */
2238 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2239}
2240
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002241static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002242{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002243 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002244
Jens Axboebcda7ba2020-02-23 16:42:51 -07002245 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2246 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002247 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002248 kfree(kbuf);
2249 return cflags;
2250}
2251
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002252static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2253{
2254 struct io_buffer *kbuf;
2255
2256 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2257 return io_put_kbuf(req, kbuf);
2258}
2259
Jens Axboe4c6e2772020-07-01 11:29:10 -06002260static inline bool io_run_task_work(void)
2261{
Jens Axboe6200b0a2020-09-13 14:38:30 -06002262 /*
2263 * Not safe to run on exiting task, and the task_work handling will
2264 * not add work to such a task.
2265 */
2266 if (unlikely(current->flags & PF_EXITING))
2267 return false;
Jens Axboe4c6e2772020-07-01 11:29:10 -06002268 if (current->task_works) {
2269 __set_current_state(TASK_RUNNING);
2270 task_work_run();
2271 return true;
2272 }
2273
2274 return false;
2275}
2276
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002277static void io_iopoll_queue(struct list_head *again)
2278{
2279 struct io_kiocb *req;
2280
2281 do {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002282 req = list_first_entry(again, struct io_kiocb, inflight_entry);
2283 list_del(&req->inflight_entry);
Pavel Begunkov81b68a52020-07-30 18:43:46 +03002284 __io_complete_rw(req, -EAGAIN, 0, NULL);
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002285 } while (!list_empty(again));
2286}
2287
Jens Axboedef596e2019-01-09 08:59:42 -07002288/*
2289 * Find and free completed poll iocbs
2290 */
2291static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2292 struct list_head *done)
2293{
Jens Axboe8237e042019-12-28 10:48:22 -07002294 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002295 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002296 LIST_HEAD(again);
2297
2298 /* order with ->result store in io_complete_rw_iopoll() */
2299 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002300
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002301 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002302 while (!list_empty(done)) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002303 int cflags = 0;
2304
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002305 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002306 if (READ_ONCE(req->result) == -EAGAIN) {
Jens Axboe56450c22020-08-26 18:58:26 -06002307 req->result = 0;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002308 req->iopoll_completed = 0;
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002309 list_move_tail(&req->inflight_entry, &again);
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002310 continue;
2311 }
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002312 list_del(&req->inflight_entry);
Jens Axboedef596e2019-01-09 08:59:42 -07002313
Jens Axboebcda7ba2020-02-23 16:42:51 -07002314 if (req->flags & REQ_F_BUFFER_SELECTED)
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002315 cflags = io_put_rw_kbuf(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002316
2317 __io_cqring_fill_event(req, req->result, cflags);
Jens Axboedef596e2019-01-09 08:59:42 -07002318 (*nr_events)++;
2319
Pavel Begunkovc3524382020-06-28 12:52:32 +03002320 if (refcount_dec_and_test(&req->refs))
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002321 io_req_free_batch(&rb, req);
Jens Axboedef596e2019-01-09 08:59:42 -07002322 }
Jens Axboedef596e2019-01-09 08:59:42 -07002323
Jens Axboe09bb8392019-03-13 12:39:28 -06002324 io_commit_cqring(ctx);
Xiaoguang Wang32b22442020-03-11 09:26:09 +08002325 if (ctx->flags & IORING_SETUP_SQPOLL)
2326 io_cqring_ev_posted(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002327 io_req_free_batch_finish(ctx, &rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002328
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002329 if (!list_empty(&again))
2330 io_iopoll_queue(&again);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002331}
2332
Jens Axboedef596e2019-01-09 08:59:42 -07002333static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2334 long min)
2335{
2336 struct io_kiocb *req, *tmp;
2337 LIST_HEAD(done);
2338 bool spin;
2339 int ret;
2340
2341 /*
2342 * Only spin for completions if we don't have multiple devices hanging
2343 * off our complete list, and we're under the requested amount.
2344 */
2345 spin = !ctx->poll_multi_file && *nr_events < min;
2346
2347 ret = 0;
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002348 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002349 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboedef596e2019-01-09 08:59:42 -07002350
2351 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002352 * Move completed and retryable entries to our local lists.
2353 * If we find a request that requires polling, break out
2354 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002355 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002356 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002357 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002358 continue;
2359 }
2360 if (!list_empty(&done))
2361 break;
2362
2363 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2364 if (ret < 0)
2365 break;
2366
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002367 /* iopoll may have completed current req */
2368 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002369 list_move_tail(&req->inflight_entry, &done);
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002370
Jens Axboedef596e2019-01-09 08:59:42 -07002371 if (ret && spin)
2372 spin = false;
2373 ret = 0;
2374 }
2375
2376 if (!list_empty(&done))
2377 io_iopoll_complete(ctx, nr_events, &done);
2378
2379 return ret;
2380}
2381
2382/*
Brian Gianforcarod195a662019-12-13 03:09:50 -08002383 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
Jens Axboedef596e2019-01-09 08:59:42 -07002384 * non-spinning poll check - we'll still enter the driver poll loop, but only
2385 * as a non-spinning completion check.
2386 */
2387static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
2388 long min)
2389{
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002390 while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
Jens Axboedef596e2019-01-09 08:59:42 -07002391 int ret;
2392
2393 ret = io_do_iopoll(ctx, nr_events, min);
2394 if (ret < 0)
2395 return ret;
Pavel Begunkoveba0a4d2020-07-06 17:59:30 +03002396 if (*nr_events >= min)
Jens Axboedef596e2019-01-09 08:59:42 -07002397 return 0;
2398 }
2399
2400 return 1;
2401}
2402
2403/*
2404 * We can't just wait for polled events to come to us, we have to actively
2405 * find and complete them.
2406 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002407static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002408{
2409 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2410 return;
2411
2412 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002413 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002414 unsigned int nr_events = 0;
2415
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002416 io_do_iopoll(ctx, &nr_events, 0);
Jens Axboe08f54392019-08-21 22:19:11 -06002417
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002418 /* let it sleep and repeat later if can't complete a request */
2419 if (nr_events == 0)
2420 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002421 /*
2422 * Ensure we allow local-to-the-cpu processing to take place,
2423 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002424 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002425 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002426 if (need_resched()) {
2427 mutex_unlock(&ctx->uring_lock);
2428 cond_resched();
2429 mutex_lock(&ctx->uring_lock);
2430 }
Jens Axboedef596e2019-01-09 08:59:42 -07002431 }
2432 mutex_unlock(&ctx->uring_lock);
2433}
2434
Pavel Begunkov7668b922020-07-07 16:36:21 +03002435static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002436{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002437 unsigned int nr_events = 0;
Jens Axboe2b2ed972019-10-25 10:06:15 -06002438 int iters = 0, ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002439
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002440 /*
2441 * We disallow the app entering submit/complete with polling, but we
2442 * still need to lock the ring to prevent racing with polled issue
2443 * that got punted to a workqueue.
2444 */
2445 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002446 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002447 /*
Jens Axboea3a0e432019-08-20 11:03:11 -06002448 * Don't enter poll loop if we already have events pending.
2449 * If we do, we can potentially be spinning for commands that
2450 * already triggered a CQE (eg in error).
2451 */
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07002452 if (io_cqring_events(ctx, false))
Jens Axboea3a0e432019-08-20 11:03:11 -06002453 break;
2454
2455 /*
Jens Axboe500f9fb2019-08-19 12:15:59 -06002456 * If a submit got punted to a workqueue, we can have the
2457 * application entering polling for a command before it gets
2458 * issued. That app will hold the uring_lock for the duration
2459 * of the poll right here, so we need to take a breather every
2460 * now and then to ensure that the issue has a chance to add
2461 * the poll to the issued list. Otherwise we can spin here
2462 * forever, while the workqueue is stuck trying to acquire the
2463 * very same mutex.
2464 */
2465 if (!(++iters & 7)) {
2466 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002467 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002468 mutex_lock(&ctx->uring_lock);
2469 }
2470
Pavel Begunkov7668b922020-07-07 16:36:21 +03002471 ret = io_iopoll_getevents(ctx, &nr_events, min);
Jens Axboedef596e2019-01-09 08:59:42 -07002472 if (ret <= 0)
2473 break;
2474 ret = 0;
Pavel Begunkov7668b922020-07-07 16:36:21 +03002475 } while (min && !nr_events && !need_resched());
Jens Axboedef596e2019-01-09 08:59:42 -07002476
Jens Axboe500f9fb2019-08-19 12:15:59 -06002477 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002478 return ret;
2479}
2480
Jens Axboe491381ce2019-10-17 09:20:46 -06002481static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002482{
Jens Axboe491381ce2019-10-17 09:20:46 -06002483 /*
2484 * Tell lockdep we inherited freeze protection from submission
2485 * thread.
2486 */
2487 if (req->flags & REQ_F_ISREG) {
2488 struct inode *inode = file_inode(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002489
Jens Axboe491381ce2019-10-17 09:20:46 -06002490 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002491 }
Jens Axboe491381ce2019-10-17 09:20:46 -06002492 file_end_write(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002493}
2494
Jens Axboea1d7c392020-06-22 11:09:46 -06002495static void io_complete_rw_common(struct kiocb *kiocb, long res,
2496 struct io_comp_state *cs)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002497{
Jens Axboe9adbd452019-12-20 08:45:55 -07002498 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002499 int cflags = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002500
Jens Axboe491381ce2019-10-17 09:20:46 -06002501 if (kiocb->ki_flags & IOCB_WRITE)
2502 kiocb_end_write(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002503
Jens Axboe4e88d6e2019-12-07 20:59:47 -07002504 if (res != req->result)
2505 req_set_fail_links(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002506 if (req->flags & REQ_F_BUFFER_SELECTED)
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002507 cflags = io_put_rw_kbuf(req);
Jens Axboea1d7c392020-06-22 11:09:46 -06002508 __io_req_complete(req, res, cflags, cs);
Jens Axboeba816ad2019-09-28 11:36:45 -06002509}
2510
Jens Axboeb63534c2020-06-04 11:28:00 -06002511#ifdef CONFIG_BLOCK
2512static bool io_resubmit_prep(struct io_kiocb *req, int error)
2513{
2514 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
2515 ssize_t ret = -ECANCELED;
2516 struct iov_iter iter;
2517 int rw;
2518
2519 if (error) {
2520 ret = error;
2521 goto end_req;
2522 }
2523
2524 switch (req->opcode) {
2525 case IORING_OP_READV:
2526 case IORING_OP_READ_FIXED:
2527 case IORING_OP_READ:
2528 rw = READ;
2529 break;
2530 case IORING_OP_WRITEV:
2531 case IORING_OP_WRITE_FIXED:
2532 case IORING_OP_WRITE:
2533 rw = WRITE;
2534 break;
2535 default:
2536 printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
2537 req->opcode);
2538 goto end_req;
2539 }
2540
Jens Axboee8c2bc12020-08-15 18:44:09 -07002541 if (!req->async_data) {
Jens Axboe8f3d7492020-09-14 09:28:14 -06002542 ret = io_import_iovec(rw, req, &iovec, &iter, false);
2543 if (ret < 0)
2544 goto end_req;
2545 ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
2546 if (!ret)
2547 return true;
2548 kfree(iovec);
2549 } else {
Jens Axboeb63534c2020-06-04 11:28:00 -06002550 return true;
Jens Axboe8f3d7492020-09-14 09:28:14 -06002551 }
Jens Axboeb63534c2020-06-04 11:28:00 -06002552end_req:
Jens Axboeb63534c2020-06-04 11:28:00 -06002553 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06002554 io_req_complete(req, ret);
Jens Axboeb63534c2020-06-04 11:28:00 -06002555 return false;
2556}
Jens Axboeb63534c2020-06-04 11:28:00 -06002557#endif
2558
2559static bool io_rw_reissue(struct io_kiocb *req, long res)
2560{
2561#ifdef CONFIG_BLOCK
Jens Axboe355afae2020-09-02 09:30:31 -06002562 umode_t mode = file_inode(req->file)->i_mode;
Jens Axboeb63534c2020-06-04 11:28:00 -06002563 int ret;
2564
Jens Axboe355afae2020-09-02 09:30:31 -06002565 if (!S_ISBLK(mode) && !S_ISREG(mode))
2566 return false;
Jens Axboeb63534c2020-06-04 11:28:00 -06002567 if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
2568 return false;
2569
Jens Axboefdee9462020-08-27 16:46:24 -06002570 ret = io_sq_thread_acquire_mm(req->ctx, req);
Jens Axboe6d816e02020-08-11 08:04:14 -06002571
Jens Axboefdee9462020-08-27 16:46:24 -06002572 if (io_resubmit_prep(req, ret)) {
2573 refcount_inc(&req->refs);
2574 io_queue_async_work(req);
Jens Axboeb63534c2020-06-04 11:28:00 -06002575 return true;
Jens Axboefdee9462020-08-27 16:46:24 -06002576 }
2577
Jens Axboeb63534c2020-06-04 11:28:00 -06002578#endif
2579 return false;
2580}
2581
Jens Axboea1d7c392020-06-22 11:09:46 -06002582static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2583 struct io_comp_state *cs)
2584{
2585 if (!io_rw_reissue(req, res))
2586 io_complete_rw_common(&req->rw.kiocb, res, cs);
Jens Axboeba816ad2019-09-28 11:36:45 -06002587}
2588
2589static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2590{
Jens Axboe9adbd452019-12-20 08:45:55 -07002591 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002592
Jens Axboea1d7c392020-06-22 11:09:46 -06002593 __io_complete_rw(req, res, res2, NULL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002594}
2595
Jens Axboedef596e2019-01-09 08:59:42 -07002596static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2597{
Jens Axboe9adbd452019-12-20 08:45:55 -07002598 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002599
Jens Axboe491381ce2019-10-17 09:20:46 -06002600 if (kiocb->ki_flags & IOCB_WRITE)
2601 kiocb_end_write(req);
Jens Axboedef596e2019-01-09 08:59:42 -07002602
Xiaoguang Wang2d7d6792020-06-16 02:06:37 +08002603 if (res != -EAGAIN && res != req->result)
Jens Axboe4e88d6e2019-12-07 20:59:47 -07002604 req_set_fail_links(req);
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002605
2606 WRITE_ONCE(req->result, res);
2607 /* order with io_poll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002608 smp_wmb();
2609 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002610}
2611
2612/*
2613 * After the iocb has been issued, it's safe to be found on the poll list.
2614 * Adding the kiocb to the list AFTER submission ensures that we don't
2615 * find it from a io_iopoll_getevents() thread before the issuer is done
2616 * accessing the kiocb cookie.
2617 */
2618static void io_iopoll_req_issued(struct io_kiocb *req)
2619{
2620 struct io_ring_ctx *ctx = req->ctx;
2621
2622 /*
2623 * Track whether we have multiple files in our lists. This will impact
2624 * how we do polling eventually, not spinning if we're on potentially
2625 * different devices.
2626 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002627 if (list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002628 ctx->poll_multi_file = false;
2629 } else if (!ctx->poll_multi_file) {
2630 struct io_kiocb *list_req;
2631
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002632 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002633 inflight_entry);
Jens Axboe9adbd452019-12-20 08:45:55 -07002634 if (list_req->file != req->file)
Jens Axboedef596e2019-01-09 08:59:42 -07002635 ctx->poll_multi_file = true;
2636 }
2637
2638 /*
2639 * For fast devices, IO may have already completed. If it has, add
2640 * it to the front so we find it first.
2641 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002642 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002643 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002644 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002645 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002646
2647 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
Jens Axboe534ca6d2020-09-02 13:52:19 -06002648 wq_has_sleeper(&ctx->sq_data->wait))
2649 wake_up(&ctx->sq_data->wait);
Jens Axboedef596e2019-01-09 08:59:42 -07002650}
2651
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002652static void __io_state_file_put(struct io_submit_state *state)
Jens Axboe9a56a232019-01-09 09:06:50 -07002653{
Pavel Begunkov06ef3602020-07-16 23:28:33 +03002654 if (state->has_refs)
2655 fput_many(state->file, state->has_refs);
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002656 state->file = NULL;
2657}
2658
2659static inline void io_state_file_put(struct io_submit_state *state)
2660{
2661 if (state->file)
2662 __io_state_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07002663}
2664
2665/*
2666 * Get as many references to a file as we have IOs left in this submission,
2667 * assuming most submissions are for one file, or at least that each file
2668 * has more than one submission.
2669 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002670static struct file *__io_file_get(struct io_submit_state *state, int fd)
Jens Axboe9a56a232019-01-09 09:06:50 -07002671{
2672 if (!state)
2673 return fget(fd);
2674
2675 if (state->file) {
2676 if (state->fd == fd) {
Pavel Begunkov06ef3602020-07-16 23:28:33 +03002677 state->has_refs--;
Jens Axboe9a56a232019-01-09 09:06:50 -07002678 return state->file;
2679 }
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002680 __io_state_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07002681 }
2682 state->file = fget_many(fd, state->ios_left);
2683 if (!state->file)
2684 return NULL;
2685
2686 state->fd = fd;
Pavel Begunkov71b547c2020-10-10 18:34:09 +01002687 state->has_refs = state->ios_left - 1;
Jens Axboe9a56a232019-01-09 09:06:50 -07002688 return state->file;
2689}
2690
Jens Axboe4503b762020-06-01 10:00:27 -06002691static bool io_bdev_nowait(struct block_device *bdev)
2692{
2693#ifdef CONFIG_BLOCK
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002694 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002695#else
2696 return true;
2697#endif
2698}
2699
Jens Axboe2b188cc2019-01-07 10:46:33 -07002700/*
2701 * If we tracked the file through the SCM inflight mechanism, we could support
2702 * any file. For now, just ensure that anything potentially problematic is done
2703 * inline.
2704 */
Jens Axboeaf197f52020-04-28 13:15:06 -06002705static bool io_file_supports_async(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002706{
2707 umode_t mode = file_inode(file)->i_mode;
2708
Jens Axboe4503b762020-06-01 10:00:27 -06002709 if (S_ISBLK(mode)) {
2710 if (io_bdev_nowait(file->f_inode->i_bdev))
2711 return true;
2712 return false;
2713 }
2714 if (S_ISCHR(mode) || S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002715 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002716 if (S_ISREG(mode)) {
2717 if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
2718 file->f_op != &io_uring_fops)
2719 return true;
2720 return false;
2721 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002722
Jens Axboec5b85622020-06-09 19:23:05 -06002723 /* any ->read/write should understand O_NONBLOCK */
2724 if (file->f_flags & O_NONBLOCK)
2725 return true;
2726
Jens Axboeaf197f52020-04-28 13:15:06 -06002727 if (!(file->f_mode & FMODE_NOWAIT))
2728 return false;
2729
2730 if (rw == READ)
2731 return file->f_op->read_iter != NULL;
2732
2733 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002734}
2735
Pavel Begunkova88fc402020-09-30 22:57:53 +03002736static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002737{
Jens Axboedef596e2019-01-09 08:59:42 -07002738 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002739 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboe09bb8392019-03-13 12:39:28 -06002740 unsigned ioprio;
2741 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002742
Jens Axboe491381ce2019-10-17 09:20:46 -06002743 if (S_ISREG(file_inode(req->file)->i_mode))
2744 req->flags |= REQ_F_ISREG;
2745
Jens Axboe2b188cc2019-01-07 10:46:33 -07002746 kiocb->ki_pos = READ_ONCE(sqe->off);
Jens Axboeba042912019-12-25 16:33:42 -07002747 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
2748 req->flags |= REQ_F_CUR_POS;
2749 kiocb->ki_pos = req->file->f_pos;
2750 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002751 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002752 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2753 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2754 if (unlikely(ret))
2755 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002756
2757 ioprio = READ_ONCE(sqe->ioprio);
2758 if (ioprio) {
2759 ret = ioprio_check_cap(ioprio);
2760 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002761 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002762
2763 kiocb->ki_ioprio = ioprio;
2764 } else
2765 kiocb->ki_ioprio = get_current_ioprio();
2766
Stefan Bühler8449eed2019-04-27 20:34:19 +02002767 /* don't allow async punt if RWF_NOWAIT was requested */
Jens Axboec5b85622020-06-09 19:23:05 -06002768 if (kiocb->ki_flags & IOCB_NOWAIT)
Stefan Bühler8449eed2019-04-27 20:34:19 +02002769 req->flags |= REQ_F_NOWAIT;
2770
Jens Axboedef596e2019-01-09 08:59:42 -07002771 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002772 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2773 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002774 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002775
Jens Axboedef596e2019-01-09 08:59:42 -07002776 kiocb->ki_flags |= IOCB_HIPRI;
2777 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002778 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002779 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002780 if (kiocb->ki_flags & IOCB_HIPRI)
2781 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002782 kiocb->ki_complete = io_complete_rw;
2783 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002784
Jens Axboe3529d8c2019-12-19 18:24:38 -07002785 req->rw.addr = READ_ONCE(sqe->addr);
2786 req->rw.len = READ_ONCE(sqe->len);
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002787 req->buf_index = READ_ONCE(sqe->buf_index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002788 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002789}
2790
2791static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2792{
2793 switch (ret) {
2794 case -EIOCBQUEUED:
2795 break;
2796 case -ERESTARTSYS:
2797 case -ERESTARTNOINTR:
2798 case -ERESTARTNOHAND:
2799 case -ERESTART_RESTARTBLOCK:
2800 /*
2801 * We can't just restart the syscall, since previously
2802 * submitted sqes may already be in progress. Just fail this
2803 * IO with EINTR.
2804 */
2805 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002806 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002807 default:
2808 kiocb->ki_complete(kiocb, ret, 0);
2809 }
2810}
2811
Jens Axboea1d7c392020-06-22 11:09:46 -06002812static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
2813 struct io_comp_state *cs)
Jens Axboeba816ad2019-09-28 11:36:45 -06002814{
Jens Axboeba042912019-12-25 16:33:42 -07002815 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboee8c2bc12020-08-15 18:44:09 -07002816 struct io_async_rw *io = req->async_data;
Jens Axboeba042912019-12-25 16:33:42 -07002817
Jens Axboe227c0c92020-08-13 11:51:40 -06002818 /* add previously done IO, if any */
Jens Axboee8c2bc12020-08-15 18:44:09 -07002819 if (io && io->bytes_done > 0) {
Jens Axboe227c0c92020-08-13 11:51:40 -06002820 if (ret < 0)
Jens Axboee8c2bc12020-08-15 18:44:09 -07002821 ret = io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002822 else
Jens Axboee8c2bc12020-08-15 18:44:09 -07002823 ret += io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002824 }
2825
Jens Axboeba042912019-12-25 16:33:42 -07002826 if (req->flags & REQ_F_CUR_POS)
2827 req->file->f_pos = kiocb->ki_pos;
Pavel Begunkovbcaec082020-02-24 11:30:18 +03002828 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
Jens Axboea1d7c392020-06-22 11:09:46 -06002829 __io_complete_rw(req, ret, 0, cs);
Jens Axboeba816ad2019-09-28 11:36:45 -06002830 else
2831 io_rw_done(kiocb, ret);
2832}
2833
Jens Axboe9adbd452019-12-20 08:45:55 -07002834static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
Pavel Begunkov7d009162019-11-25 23:14:40 +03002835 struct iov_iter *iter)
Jens Axboeedafcce2019-01-09 09:16:05 -07002836{
Jens Axboe9adbd452019-12-20 08:45:55 -07002837 struct io_ring_ctx *ctx = req->ctx;
2838 size_t len = req->rw.len;
Jens Axboeedafcce2019-01-09 09:16:05 -07002839 struct io_mapped_ubuf *imu;
Pavel Begunkov4be1c612020-09-06 00:45:48 +03002840 u16 index, buf_index = req->buf_index;
Jens Axboeedafcce2019-01-09 09:16:05 -07002841 size_t offset;
2842 u64 buf_addr;
2843
Jens Axboeedafcce2019-01-09 09:16:05 -07002844 if (unlikely(buf_index >= ctx->nr_user_bufs))
2845 return -EFAULT;
Jens Axboeedafcce2019-01-09 09:16:05 -07002846 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2847 imu = &ctx->user_bufs[index];
Jens Axboe9adbd452019-12-20 08:45:55 -07002848 buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07002849
2850 /* overflow */
2851 if (buf_addr + len < buf_addr)
2852 return -EFAULT;
2853 /* not inside the mapped region */
2854 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2855 return -EFAULT;
2856
2857 /*
2858 * May not be a start of buffer, set size appropriately
2859 * and advance us to the beginning.
2860 */
2861 offset = buf_addr - imu->ubuf;
2862 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06002863
2864 if (offset) {
2865 /*
2866 * Don't use iov_iter_advance() here, as it's really slow for
2867 * using the latter parts of a big fixed buffer - it iterates
2868 * over each segment manually. We can cheat a bit here, because
2869 * we know that:
2870 *
2871 * 1) it's a BVEC iter, we set it up
2872 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2873 * first and last bvec
2874 *
2875 * So just find our index, and adjust the iterator afterwards.
2876 * If the offset is within the first bvec (or the whole first
2877 * bvec, just use iov_iter_advance(). This makes it easier
2878 * since we can just skip the first segment, which may not
2879 * be PAGE_SIZE aligned.
2880 */
2881 const struct bio_vec *bvec = imu->bvec;
2882
2883 if (offset <= bvec->bv_len) {
2884 iov_iter_advance(iter, offset);
2885 } else {
2886 unsigned long seg_skip;
2887
2888 /* skip first vec */
2889 offset -= bvec->bv_len;
2890 seg_skip = 1 + (offset >> PAGE_SHIFT);
2891
2892 iter->bvec = bvec + seg_skip;
2893 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02002894 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002895 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002896 }
2897 }
2898
Jens Axboe5e559562019-11-13 16:12:46 -07002899 return len;
Jens Axboeedafcce2019-01-09 09:16:05 -07002900}
2901
Jens Axboebcda7ba2020-02-23 16:42:51 -07002902static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2903{
2904 if (needs_lock)
2905 mutex_unlock(&ctx->uring_lock);
2906}
2907
2908static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2909{
2910 /*
2911 * "Normal" inline submissions always hold the uring_lock, since we
2912 * grab it from the system call. Same is true for the SQPOLL offload.
2913 * The only exception is when we've detached the request and issue it
2914 * from an async worker thread, grab the lock for that case.
2915 */
2916 if (needs_lock)
2917 mutex_lock(&ctx->uring_lock);
2918}
2919
2920static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2921 int bgid, struct io_buffer *kbuf,
2922 bool needs_lock)
2923{
2924 struct io_buffer *head;
2925
2926 if (req->flags & REQ_F_BUFFER_SELECTED)
2927 return kbuf;
2928
2929 io_ring_submit_lock(req->ctx, needs_lock);
2930
2931 lockdep_assert_held(&req->ctx->uring_lock);
2932
2933 head = idr_find(&req->ctx->io_buffer_idr, bgid);
2934 if (head) {
2935 if (!list_empty(&head->list)) {
2936 kbuf = list_last_entry(&head->list, struct io_buffer,
2937 list);
2938 list_del(&kbuf->list);
2939 } else {
2940 kbuf = head;
2941 idr_remove(&req->ctx->io_buffer_idr, bgid);
2942 }
2943 if (*len > kbuf->len)
2944 *len = kbuf->len;
2945 } else {
2946 kbuf = ERR_PTR(-ENOBUFS);
2947 }
2948
2949 io_ring_submit_unlock(req->ctx, needs_lock);
2950
2951 return kbuf;
2952}
2953
Jens Axboe4d954c22020-02-27 07:31:19 -07002954static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2955 bool needs_lock)
2956{
2957 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002958 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07002959
2960 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002961 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07002962 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2963 if (IS_ERR(kbuf))
2964 return kbuf;
2965 req->rw.addr = (u64) (unsigned long) kbuf;
2966 req->flags |= REQ_F_BUFFER_SELECTED;
2967 return u64_to_user_ptr(kbuf->addr);
2968}
2969
2970#ifdef CONFIG_COMPAT
2971static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2972 bool needs_lock)
2973{
2974 struct compat_iovec __user *uiov;
2975 compat_ssize_t clen;
2976 void __user *buf;
2977 ssize_t len;
2978
2979 uiov = u64_to_user_ptr(req->rw.addr);
2980 if (!access_ok(uiov, sizeof(*uiov)))
2981 return -EFAULT;
2982 if (__get_user(clen, &uiov->iov_len))
2983 return -EFAULT;
2984 if (clen < 0)
2985 return -EINVAL;
2986
2987 len = clen;
2988 buf = io_rw_buffer_select(req, &len, needs_lock);
2989 if (IS_ERR(buf))
2990 return PTR_ERR(buf);
2991 iov[0].iov_base = buf;
2992 iov[0].iov_len = (compat_size_t) len;
2993 return 0;
2994}
2995#endif
2996
2997static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2998 bool needs_lock)
2999{
3000 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3001 void __user *buf;
3002 ssize_t len;
3003
3004 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3005 return -EFAULT;
3006
3007 len = iov[0].iov_len;
3008 if (len < 0)
3009 return -EINVAL;
3010 buf = io_rw_buffer_select(req, &len, needs_lock);
3011 if (IS_ERR(buf))
3012 return PTR_ERR(buf);
3013 iov[0].iov_base = buf;
3014 iov[0].iov_len = len;
3015 return 0;
3016}
3017
3018static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3019 bool needs_lock)
3020{
Jens Axboedddb3e22020-06-04 11:27:01 -06003021 if (req->flags & REQ_F_BUFFER_SELECTED) {
3022 struct io_buffer *kbuf;
3023
3024 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3025 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3026 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07003027 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06003028 }
Jens Axboe4d954c22020-02-27 07:31:19 -07003029 if (!req->rw.len)
3030 return 0;
3031 else if (req->rw.len > 1)
3032 return -EINVAL;
3033
3034#ifdef CONFIG_COMPAT
3035 if (req->ctx->compat)
3036 return io_compat_import(req, iov, needs_lock);
3037#endif
3038
3039 return __io_iov_buffer_select(req, iov, needs_lock);
3040}
3041
Jens Axboe8452fd02020-08-18 13:58:33 -07003042static ssize_t __io_import_iovec(int rw, struct io_kiocb *req,
3043 struct iovec **iovec, struct iov_iter *iter,
3044 bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003045{
Jens Axboe9adbd452019-12-20 08:45:55 -07003046 void __user *buf = u64_to_user_ptr(req->rw.addr);
3047 size_t sqe_len = req->rw.len;
Jens Axboe4d954c22020-02-27 07:31:19 -07003048 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07003049 u8 opcode;
3050
Jens Axboed625c6e2019-12-17 19:53:05 -07003051 opcode = req->opcode;
Pavel Begunkov7d009162019-11-25 23:14:40 +03003052 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07003053 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07003054 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07003055 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003056
Jens Axboebcda7ba2020-02-23 16:42:51 -07003057 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003058 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07003059 return -EINVAL;
3060
Jens Axboe3a6820f2019-12-22 15:19:35 -07003061 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07003062 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07003063 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03003064 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07003065 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06003066 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07003067 }
3068
Jens Axboe3a6820f2019-12-22 15:19:35 -07003069 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3070 *iovec = NULL;
Jens Axboe3a901592020-02-25 17:48:55 -07003071 return ret < 0 ? ret : sqe_len;
Jens Axboe3a6820f2019-12-22 15:19:35 -07003072 }
3073
Jens Axboe4d954c22020-02-27 07:31:19 -07003074 if (req->flags & REQ_F_BUFFER_SELECT) {
3075 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Jens Axboe3f9d6442020-03-11 12:27:04 -06003076 if (!ret) {
3077 ret = (*iovec)->iov_len;
3078 iov_iter_init(iter, rw, *iovec, 1, ret);
3079 }
Jens Axboe4d954c22020-02-27 07:31:19 -07003080 *iovec = NULL;
3081 return ret;
3082 }
3083
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02003084 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3085 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003086}
3087
Jens Axboe8452fd02020-08-18 13:58:33 -07003088static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
3089 struct iovec **iovec, struct iov_iter *iter,
3090 bool needs_lock)
3091{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003092 struct io_async_rw *iorw = req->async_data;
3093
3094 if (!iorw)
Jens Axboe8452fd02020-08-18 13:58:33 -07003095 return __io_import_iovec(rw, req, iovec, iter, needs_lock);
3096 *iovec = NULL;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003097 return iov_iter_count(&iorw->iter);
Jens Axboe8452fd02020-08-18 13:58:33 -07003098}
3099
Jens Axboe0fef9482020-08-26 10:36:20 -06003100static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3101{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03003102 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06003103}
3104
Jens Axboe32960612019-09-23 11:05:34 -06003105/*
3106 * For files that don't have ->read_iter() and ->write_iter(), handle them
3107 * by looping over ->read() or ->write() manually.
3108 */
3109static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
3110 struct iov_iter *iter)
3111{
3112 ssize_t ret = 0;
3113
3114 /*
3115 * Don't support polled IO through this interface, and we can't
3116 * support non-blocking either. For the latter, this just causes
3117 * the kiocb to be handled from an async context.
3118 */
3119 if (kiocb->ki_flags & IOCB_HIPRI)
3120 return -EOPNOTSUPP;
3121 if (kiocb->ki_flags & IOCB_NOWAIT)
3122 return -EAGAIN;
3123
3124 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003125 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003126 ssize_t nr;
3127
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003128 if (!iov_iter_is_bvec(iter)) {
3129 iovec = iov_iter_iovec(iter);
3130 } else {
3131 /* fixed buffers import bvec */
3132 iovec.iov_base = kmap(iter->bvec->bv_page)
3133 + iter->iov_offset;
3134 iovec.iov_len = min(iter->count,
3135 iter->bvec->bv_len - iter->iov_offset);
3136 }
3137
Jens Axboe32960612019-09-23 11:05:34 -06003138 if (rw == READ) {
3139 nr = file->f_op->read(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003140 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003141 } else {
3142 nr = file->f_op->write(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003143 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003144 }
3145
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003146 if (iov_iter_is_bvec(iter))
3147 kunmap(iter->bvec->bv_page);
3148
Jens Axboe32960612019-09-23 11:05:34 -06003149 if (nr < 0) {
3150 if (!ret)
3151 ret = nr;
3152 break;
3153 }
3154 ret += nr;
3155 if (nr != iovec.iov_len)
3156 break;
3157 iov_iter_advance(iter, nr);
3158 }
3159
3160 return ret;
3161}
3162
Jens Axboeff6165b2020-08-13 09:47:43 -06003163static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3164 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003165{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003166 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003167
Jens Axboeff6165b2020-08-13 09:47:43 -06003168 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003169 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003170 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003171 /* can only be fixed buffers, no need to do anything */
3172 if (iter->type == ITER_BVEC)
3173 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003174 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003175 unsigned iov_off = 0;
3176
3177 rw->iter.iov = rw->fast_iov;
3178 if (iter->iov != fast_iov) {
3179 iov_off = iter->iov - fast_iov;
3180 rw->iter.iov += iov_off;
3181 }
3182 if (rw->fast_iov != fast_iov)
3183 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003184 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003185 } else {
3186 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003187 }
3188}
3189
Jens Axboee8c2bc12020-08-15 18:44:09 -07003190static inline int __io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003191{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003192 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3193 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3194 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003195}
3196
Jens Axboee8c2bc12020-08-15 18:44:09 -07003197static int io_alloc_async_data(struct io_kiocb *req)
Jens Axboef67676d2019-12-02 11:03:47 -07003198{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003199 if (!io_op_defs[req->opcode].needs_async_data)
Jens Axboed3656342019-12-18 09:50:26 -07003200 return 0;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003201
Jens Axboee8c2bc12020-08-15 18:44:09 -07003202 return __io_alloc_async_data(req);
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003203}
3204
Jens Axboeff6165b2020-08-13 09:47:43 -06003205static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3206 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003207 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003208{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003209 if (!force && !io_op_defs[req->opcode].needs_async_data)
Jens Axboe74566df2020-01-13 19:23:24 -07003210 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003211 if (!req->async_data) {
3212 if (__io_alloc_async_data(req))
Jens Axboe5d204bc2020-01-31 12:06:52 -07003213 return -ENOMEM;
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003214
Jens Axboeff6165b2020-08-13 09:47:43 -06003215 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003216 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003217 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003218}
3219
Pavel Begunkov73debe62020-09-30 22:57:54 +03003220static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003221{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003222 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003223 struct iovec *iov = iorw->fast_iov;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003224 ssize_t ret;
3225
Pavel Begunkov73debe62020-09-30 22:57:54 +03003226 ret = __io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003227 if (unlikely(ret < 0))
3228 return ret;
3229
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003230 iorw->bytes_done = 0;
3231 iorw->free_iovec = iov;
3232 if (iov)
3233 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003234 return 0;
3235}
3236
Pavel Begunkov73debe62020-09-30 22:57:54 +03003237static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003238{
3239 ssize_t ret;
3240
Pavel Begunkova88fc402020-09-30 22:57:53 +03003241 ret = io_prep_rw(req, sqe);
Jens Axboe3529d8c2019-12-19 18:24:38 -07003242 if (ret)
3243 return ret;
Jens Axboef67676d2019-12-02 11:03:47 -07003244
Jens Axboe3529d8c2019-12-19 18:24:38 -07003245 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3246 return -EBADF;
Jens Axboef67676d2019-12-02 11:03:47 -07003247
Pavel Begunkov5f798be2020-02-08 13:28:02 +03003248 /* either don't need iovec imported or already have it */
Pavel Begunkov2d199892020-09-30 22:57:35 +03003249 if (!req->async_data)
Jens Axboe3529d8c2019-12-19 18:24:38 -07003250 return 0;
Pavel Begunkov73debe62020-09-30 22:57:54 +03003251 return io_rw_prep_async(req, READ);
Jens Axboef67676d2019-12-02 11:03:47 -07003252}
3253
Jens Axboec1dd91d2020-08-03 16:43:59 -06003254/*
3255 * This is our waitqueue callback handler, registered through lock_page_async()
3256 * when we initially tried to do the IO with the iocb armed our waitqueue.
3257 * This gets called when the page is unlocked, and we generally expect that to
3258 * happen when the page IO is completed and the page is now uptodate. This will
3259 * queue a task_work based retry of the operation, attempting to copy the data
3260 * again. If the latter fails because the page was NOT uptodate, then we will
3261 * do a thread based blocking retry of the operation. That's the unexpected
3262 * slow path.
3263 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003264static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3265 int sync, void *arg)
3266{
3267 struct wait_page_queue *wpq;
3268 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003269 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003270 int ret;
3271
3272 wpq = container_of(wait, struct wait_page_queue, wait);
3273
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003274 if (!wake_page_match(wpq, key))
3275 return 0;
3276
Hao Xuc8d317a2020-09-29 20:00:45 +08003277 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003278 list_del_init(&wait->entry);
3279
Pavel Begunkove7375122020-07-12 20:42:04 +03003280 init_task_work(&req->task_work, io_req_task_submit);
Jens Axboe6d816e02020-08-11 08:04:14 -06003281 percpu_ref_get(&req->ctx->refs);
3282
Jens Axboebcf5a062020-05-22 09:24:42 -06003283 /* submit ref gets dropped, acquire a new one */
3284 refcount_inc(&req->refs);
Jens Axboe87c43112020-09-30 21:00:14 -06003285 ret = io_req_task_work_add(req, true);
Jens Axboebcf5a062020-05-22 09:24:42 -06003286 if (unlikely(ret)) {
Jens Axboec2c4c832020-07-01 15:37:11 -06003287 struct task_struct *tsk;
3288
Jens Axboebcf5a062020-05-22 09:24:42 -06003289 /* queue just for cancelation */
Pavel Begunkove7375122020-07-12 20:42:04 +03003290 init_task_work(&req->task_work, io_req_task_cancel);
Jens Axboebcf5a062020-05-22 09:24:42 -06003291 tsk = io_wq_get_task(req->ctx->io_wq);
Pavel Begunkove7375122020-07-12 20:42:04 +03003292 task_work_add(tsk, &req->task_work, 0);
Jens Axboec2c4c832020-07-01 15:37:11 -06003293 wake_up_process(tsk);
Jens Axboebcf5a062020-05-22 09:24:42 -06003294 }
Jens Axboebcf5a062020-05-22 09:24:42 -06003295 return 1;
3296}
3297
Jens Axboec1dd91d2020-08-03 16:43:59 -06003298/*
3299 * This controls whether a given IO request should be armed for async page
3300 * based retry. If we return false here, the request is handed to the async
3301 * worker threads for retry. If we're doing buffered reads on a regular file,
3302 * we prepare a private wait_page_queue entry and retry the operation. This
3303 * will either succeed because the page is now uptodate and unlocked, or it
3304 * will register a callback when the page is unlocked at IO completion. Through
3305 * that callback, io_uring uses task_work to setup a retry of the operation.
3306 * That retry will attempt the buffered read again. The retry will generally
3307 * succeed, or in rare cases where it fails, we then fall back to using the
3308 * async worker threads for a blocking retry.
3309 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003310static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003311{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003312 struct io_async_rw *rw = req->async_data;
3313 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003314 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003315
3316 /* never retry for NOWAIT, we just complete with -EAGAIN */
3317 if (req->flags & REQ_F_NOWAIT)
3318 return false;
3319
Jens Axboe227c0c92020-08-13 11:51:40 -06003320 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003321 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003322 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003323
Jens Axboebcf5a062020-05-22 09:24:42 -06003324 /*
3325 * just use poll if we can, and don't attempt if the fs doesn't
3326 * support callback based unlocks
3327 */
3328 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3329 return false;
3330
Jens Axboe3b2a4432020-08-16 10:58:43 -07003331 wait->wait.func = io_async_buf_func;
3332 wait->wait.private = req;
3333 wait->wait.flags = 0;
3334 INIT_LIST_HEAD(&wait->wait.entry);
3335 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003336 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003337 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003338 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003339}
3340
3341static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3342{
3343 if (req->file->f_op->read_iter)
3344 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003345 else if (req->file->f_op->read)
3346 return loop_rw_iter(READ, req->file, &req->rw.kiocb, iter);
3347 else
3348 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003349}
3350
Jens Axboea1d7c392020-06-22 11:09:46 -06003351static int io_read(struct io_kiocb *req, bool force_nonblock,
3352 struct io_comp_state *cs)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003353{
3354 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003355 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003356 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003357 struct io_async_rw *rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003358 ssize_t io_size, ret, ret2;
Jens Axboe31b51512019-01-18 22:56:34 -07003359 size_t iov_count;
Jens Axboef5cac8b2020-09-14 09:30:38 -06003360 bool no_async;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003361
Jens Axboee8c2bc12020-08-15 18:44:09 -07003362 if (rw)
3363 iter = &rw->iter;
Jens Axboeff6165b2020-08-13 09:47:43 -06003364
3365 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
Jens Axboe06b76d42019-12-19 14:44:26 -07003366 if (ret < 0)
3367 return ret;
Jens Axboeeefdf302020-08-27 16:40:19 -06003368 iov_count = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003369 io_size = ret;
3370 req->result = io_size;
Jens Axboe227c0c92020-08-13 11:51:40 -06003371 ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003372
Jens Axboefd6c2e42019-12-18 12:19:41 -07003373 /* Ensure we clear previously set non-block flag */
3374 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003375 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003376 else
3377 kiocb->ki_flags |= IOCB_NOWAIT;
3378
Jens Axboefd6c2e42019-12-18 12:19:41 -07003379
Pavel Begunkov24c74672020-06-21 13:09:51 +03003380 /* If the file doesn't support async, just async punt */
Jens Axboef5cac8b2020-09-14 09:30:38 -06003381 no_async = force_nonblock && !io_file_supports_async(req->file, READ);
3382 if (no_async)
Jens Axboef67676d2019-12-02 11:03:47 -07003383 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003384
Jens Axboe0fef9482020-08-26 10:36:20 -06003385 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003386 if (unlikely(ret))
3387 goto out_free;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003388
Jens Axboe227c0c92020-08-13 11:51:40 -06003389 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003390
Jens Axboe227c0c92020-08-13 11:51:40 -06003391 if (!ret) {
3392 goto done;
3393 } else if (ret == -EIOCBQUEUED) {
3394 ret = 0;
3395 goto out_free;
3396 } else if (ret == -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003397 /* IOPOLL retry should happen for io-wq threads */
3398 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003399 goto done;
Jens Axboe355afae2020-09-02 09:30:31 -06003400 /* no retry on NONBLOCK marked file */
3401 if (req->file->f_flags & O_NONBLOCK)
3402 goto done;
Jens Axboe84216312020-08-24 11:45:26 -06003403 /* some cases will consume bytes even on error returns */
3404 iov_iter_revert(iter, iov_count - iov_iter_count(iter));
Jens Axboef38c7e32020-09-25 15:23:43 -06003405 ret = 0;
3406 goto copy_iov;
Jens Axboe227c0c92020-08-13 11:51:40 -06003407 } else if (ret < 0) {
Jens Axboe00d23d52020-08-25 12:59:22 -06003408 /* make sure -ERESTARTSYS -> -EINTR is done */
3409 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003410 }
3411
3412 /* read it all, or we did blocking attempt. no retry. */
Jens Axboef91daf52020-08-15 15:58:42 -07003413 if (!iov_iter_count(iter) || !force_nonblock ||
3414 (req->file->f_flags & O_NONBLOCK))
Jens Axboe227c0c92020-08-13 11:51:40 -06003415 goto done;
3416
3417 io_size -= ret;
3418copy_iov:
3419 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3420 if (ret2) {
3421 ret = ret2;
3422 goto out_free;
3423 }
Jens Axboef5cac8b2020-09-14 09:30:38 -06003424 if (no_async)
3425 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003426 rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003427 /* it's copied and will be cleaned with ->io */
3428 iovec = NULL;
3429 /* now use our persistent iterator, if we aren't already */
Jens Axboee8c2bc12020-08-15 18:44:09 -07003430 iter = &rw->iter;
Jens Axboe227c0c92020-08-13 11:51:40 -06003431retry:
Jens Axboee8c2bc12020-08-15 18:44:09 -07003432 rw->bytes_done += ret;
Jens Axboe227c0c92020-08-13 11:51:40 -06003433 /* if we can retry, do so with the callbacks armed */
3434 if (!io_rw_should_retry(req)) {
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003435 kiocb->ki_flags &= ~IOCB_WAITQ;
3436 return -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003437 }
Jens Axboe227c0c92020-08-13 11:51:40 -06003438
3439 /*
3440 * Now retry read with the IOCB_WAITQ parts set in the iocb. If we
3441 * get -EIOCBQUEUED, then we'll get a notification when the desired
3442 * page gets unlocked. We can also get a partial read here, and if we
3443 * do, then just retry at the new offset.
3444 */
3445 ret = io_iter_do_read(req, iter);
3446 if (ret == -EIOCBQUEUED) {
3447 ret = 0;
3448 goto out_free;
3449 } else if (ret > 0 && ret < io_size) {
3450 /* we got some bytes, but not all. retry. */
3451 goto retry;
3452 }
3453done:
3454 kiocb_done(kiocb, ret, cs);
3455 ret = 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003456out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003457 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003458 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003459 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003460 return ret;
3461}
3462
Pavel Begunkov73debe62020-09-30 22:57:54 +03003463static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003464{
3465 ssize_t ret;
3466
Pavel Begunkova88fc402020-09-30 22:57:53 +03003467 ret = io_prep_rw(req, sqe);
Jens Axboe3529d8c2019-12-19 18:24:38 -07003468 if (ret)
3469 return ret;
Jens Axboef67676d2019-12-02 11:03:47 -07003470
Jens Axboe3529d8c2019-12-19 18:24:38 -07003471 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3472 return -EBADF;
Jens Axboef67676d2019-12-02 11:03:47 -07003473
Pavel Begunkov5f798be2020-02-08 13:28:02 +03003474 /* either don't need iovec imported or already have it */
Pavel Begunkov2d199892020-09-30 22:57:35 +03003475 if (!req->async_data)
Jens Axboe3529d8c2019-12-19 18:24:38 -07003476 return 0;
Pavel Begunkov73debe62020-09-30 22:57:54 +03003477 return io_rw_prep_async(req, WRITE);
Jens Axboef67676d2019-12-02 11:03:47 -07003478}
3479
Jens Axboea1d7c392020-06-22 11:09:46 -06003480static int io_write(struct io_kiocb *req, bool force_nonblock,
3481 struct io_comp_state *cs)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003482{
3483 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003484 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003485 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003486 struct io_async_rw *rw = req->async_data;
Jens Axboe31b51512019-01-18 22:56:34 -07003487 size_t iov_count;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003488 ssize_t ret, ret2, io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003489
Jens Axboee8c2bc12020-08-15 18:44:09 -07003490 if (rw)
3491 iter = &rw->iter;
Jens Axboeff6165b2020-08-13 09:47:43 -06003492
3493 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
Jens Axboe06b76d42019-12-19 14:44:26 -07003494 if (ret < 0)
3495 return ret;
Jens Axboeeefdf302020-08-27 16:40:19 -06003496 iov_count = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003497 io_size = ret;
3498 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003499
Jens Axboefd6c2e42019-12-18 12:19:41 -07003500 /* Ensure we clear previously set non-block flag */
3501 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003502 kiocb->ki_flags &= ~IOCB_NOWAIT;
3503 else
3504 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003505
Pavel Begunkov24c74672020-06-21 13:09:51 +03003506 /* If the file doesn't support async, just async punt */
Jens Axboeaf197f52020-04-28 13:15:06 -06003507 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003508 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003509
Jens Axboe10d59342019-12-09 20:16:22 -07003510 /* file path doesn't support NOWAIT for non-direct_IO */
3511 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3512 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003513 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003514
Jens Axboe0fef9482020-08-26 10:36:20 -06003515 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003516 if (unlikely(ret))
3517 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003518
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003519 /*
3520 * Open-code file_start_write here to grab freeze protection,
3521 * which will be released by another thread in
3522 * io_complete_rw(). Fool lockdep by telling it the lock got
3523 * released so that it doesn't complain about the held lock when
3524 * we return to userspace.
3525 */
3526 if (req->flags & REQ_F_ISREG) {
3527 __sb_start_write(file_inode(req->file)->i_sb,
3528 SB_FREEZE_WRITE, true);
3529 __sb_writers_release(file_inode(req->file)->i_sb,
3530 SB_FREEZE_WRITE);
3531 }
3532 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003533
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003534 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003535 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003536 else if (req->file->f_op->write)
Jens Axboeff6165b2020-08-13 09:47:43 -06003537 ret2 = loop_rw_iter(WRITE, req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003538 else
3539 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003540
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003541 /*
3542 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3543 * retry them without IOCB_NOWAIT.
3544 */
3545 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3546 ret2 = -EAGAIN;
Jens Axboe355afae2020-09-02 09:30:31 -06003547 /* no retry on NONBLOCK marked file */
3548 if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK))
3549 goto done;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003550 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003551 /* IOPOLL retry should happen for io-wq threads */
3552 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3553 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003554done:
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003555 kiocb_done(kiocb, ret2, cs);
3556 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003557copy_iov:
Jens Axboe84216312020-08-24 11:45:26 -06003558 /* some cases will consume bytes even on error returns */
3559 iov_iter_revert(iter, iov_count - iov_iter_count(iter));
Jens Axboe227c0c92020-08-13 11:51:40 -06003560 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Jens Axboeff6165b2020-08-13 09:47:43 -06003561 if (!ret)
3562 return -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003563 }
Jens Axboe31b51512019-01-18 22:56:34 -07003564out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003565 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003566 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003567 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003568 return ret;
3569}
3570
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003571static int __io_splice_prep(struct io_kiocb *req,
3572 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003573{
3574 struct io_splice* sp = &req->splice;
3575 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003576
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003577 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3578 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003579
3580 sp->file_in = NULL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003581 sp->len = READ_ONCE(sqe->len);
3582 sp->flags = READ_ONCE(sqe->splice_flags);
3583
3584 if (unlikely(sp->flags & ~valid_flags))
3585 return -EINVAL;
3586
Pavel Begunkov8371adf2020-10-10 18:34:08 +01003587 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3588 (sp->flags & SPLICE_F_FD_IN_FIXED));
3589 if (!sp->file_in)
3590 return -EBADF;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003591 req->flags |= REQ_F_NEED_CLEANUP;
3592
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08003593 if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
3594 /*
3595 * Splice operation will be punted aync, and here need to
3596 * modify io_wq_work.flags, so initialize io_wq_work firstly.
3597 */
3598 io_req_init_async(req);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003599 req->work.flags |= IO_WQ_WORK_UNBOUND;
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08003600 }
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003601
3602 return 0;
3603}
3604
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003605static int io_tee_prep(struct io_kiocb *req,
3606 const struct io_uring_sqe *sqe)
3607{
3608 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3609 return -EINVAL;
3610 return __io_splice_prep(req, sqe);
3611}
3612
3613static int io_tee(struct io_kiocb *req, bool force_nonblock)
3614{
3615 struct io_splice *sp = &req->splice;
3616 struct file *in = sp->file_in;
3617 struct file *out = sp->file_out;
3618 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3619 long ret = 0;
3620
3621 if (force_nonblock)
3622 return -EAGAIN;
3623 if (sp->len)
3624 ret = do_tee(in, out, sp->len, flags);
3625
3626 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3627 req->flags &= ~REQ_F_NEED_CLEANUP;
3628
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003629 if (ret != sp->len)
3630 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003631 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003632 return 0;
3633}
3634
3635static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3636{
3637 struct io_splice* sp = &req->splice;
3638
3639 sp->off_in = READ_ONCE(sqe->splice_off_in);
3640 sp->off_out = READ_ONCE(sqe->off);
3641 return __io_splice_prep(req, sqe);
3642}
3643
Pavel Begunkov014db002020-03-03 21:33:12 +03003644static int io_splice(struct io_kiocb *req, bool force_nonblock)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003645{
3646 struct io_splice *sp = &req->splice;
3647 struct file *in = sp->file_in;
3648 struct file *out = sp->file_out;
3649 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3650 loff_t *poff_in, *poff_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003651 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003652
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03003653 if (force_nonblock)
3654 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003655
3656 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3657 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003658
Jens Axboe948a7742020-05-17 14:21:38 -06003659 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03003660 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003661
3662 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3663 req->flags &= ~REQ_F_NEED_CLEANUP;
3664
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003665 if (ret != sp->len)
3666 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003667 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003668 return 0;
3669}
3670
Jens Axboe2b188cc2019-01-07 10:46:33 -07003671/*
3672 * IORING_OP_NOP just posts a completion event, nothing else.
3673 */
Jens Axboe229a7b62020-06-22 10:13:11 -06003674static int io_nop(struct io_kiocb *req, struct io_comp_state *cs)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003675{
3676 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003677
Jens Axboedef596e2019-01-09 08:59:42 -07003678 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3679 return -EINVAL;
3680
Jens Axboe229a7b62020-06-22 10:13:11 -06003681 __io_req_complete(req, 0, 0, cs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003682 return 0;
3683}
3684
Jens Axboe3529d8c2019-12-19 18:24:38 -07003685static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003686{
Jens Axboe6b063142019-01-10 22:13:58 -07003687 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003688
Jens Axboe09bb8392019-03-13 12:39:28 -06003689 if (!req->file)
3690 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003691
Jens Axboe6b063142019-01-10 22:13:58 -07003692 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07003693 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07003694 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003695 return -EINVAL;
3696
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003697 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3698 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3699 return -EINVAL;
3700
3701 req->sync.off = READ_ONCE(sqe->off);
3702 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003703 return 0;
3704}
3705
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003706static int io_fsync(struct io_kiocb *req, bool force_nonblock)
Jens Axboe78912932020-01-14 22:09:06 -07003707{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003708 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003709 int ret;
3710
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003711 /* fsync always requires a blocking context */
3712 if (force_nonblock)
3713 return -EAGAIN;
3714
Jens Axboe9adbd452019-12-20 08:45:55 -07003715 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003716 end > 0 ? end : LLONG_MAX,
3717 req->sync.flags & IORING_FSYNC_DATASYNC);
3718 if (ret < 0)
3719 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003720 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003721 return 0;
3722}
3723
Jens Axboed63d1b52019-12-10 10:38:56 -07003724static int io_fallocate_prep(struct io_kiocb *req,
3725 const struct io_uring_sqe *sqe)
3726{
3727 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3728 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003729 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3730 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07003731
3732 req->sync.off = READ_ONCE(sqe->off);
3733 req->sync.len = READ_ONCE(sqe->addr);
3734 req->sync.mode = READ_ONCE(sqe->len);
3735 return 0;
3736}
3737
Pavel Begunkov014db002020-03-03 21:33:12 +03003738static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
Jens Axboed63d1b52019-12-10 10:38:56 -07003739{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003740 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07003741
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003742 /* fallocate always requiring blocking context */
3743 if (force_nonblock)
3744 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003745 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3746 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003747 if (ret < 0)
3748 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003749 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07003750 return 0;
3751}
3752
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003753static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003754{
Jens Axboef8748882020-01-08 17:47:02 -07003755 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003756 int ret;
3757
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003758 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07003759 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003760 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07003761 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003762
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003763 /* open.how should be already initialised */
3764 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06003765 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003766
Pavel Begunkov25e72d12020-06-03 18:03:23 +03003767 req->open.dfd = READ_ONCE(sqe->fd);
3768 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07003769 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003770 if (IS_ERR(req->open.filename)) {
3771 ret = PTR_ERR(req->open.filename);
3772 req->open.filename = NULL;
3773 return ret;
3774 }
Jens Axboe4022e7a2020-03-19 19:23:18 -06003775 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003776 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003777 return 0;
3778}
3779
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003780static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3781{
3782 u64 flags, mode;
3783
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003784 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3785 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003786 mode = READ_ONCE(sqe->len);
3787 flags = READ_ONCE(sqe->open_flags);
3788 req->open.how = build_open_how(flags, mode);
3789 return __io_openat_prep(req, sqe);
3790}
3791
Jens Axboecebdb982020-01-08 17:59:24 -07003792static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3793{
3794 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07003795 size_t len;
3796 int ret;
3797
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003798 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3799 return -EINVAL;
Jens Axboecebdb982020-01-08 17:59:24 -07003800 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3801 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07003802 if (len < OPEN_HOW_SIZE_VER0)
3803 return -EINVAL;
3804
3805 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3806 len);
3807 if (ret)
3808 return ret;
3809
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003810 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07003811}
3812
Pavel Begunkov014db002020-03-03 21:33:12 +03003813static int io_openat2(struct io_kiocb *req, bool force_nonblock)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003814{
3815 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003816 struct file *file;
3817 int ret;
3818
Jens Axboef86cd202020-01-29 13:46:44 -07003819 if (force_nonblock)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003820 return -EAGAIN;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003821
Jens Axboecebdb982020-01-08 17:59:24 -07003822 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003823 if (ret)
3824 goto err;
3825
Jens Axboe4022e7a2020-03-19 19:23:18 -06003826 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003827 if (ret < 0)
3828 goto err;
3829
3830 file = do_filp_open(req->open.dfd, req->open.filename, &op);
3831 if (IS_ERR(file)) {
3832 put_unused_fd(ret);
3833 ret = PTR_ERR(file);
3834 } else {
3835 fsnotify_open(file);
3836 fd_install(ret, file);
3837 }
3838err:
3839 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003840 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003841 if (ret < 0)
3842 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003843 io_req_complete(req, ret);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003844 return 0;
3845}
3846
Pavel Begunkov014db002020-03-03 21:33:12 +03003847static int io_openat(struct io_kiocb *req, bool force_nonblock)
Jens Axboecebdb982020-01-08 17:59:24 -07003848{
Pavel Begunkov014db002020-03-03 21:33:12 +03003849 return io_openat2(req, force_nonblock);
Jens Axboecebdb982020-01-08 17:59:24 -07003850}
3851
Jens Axboe067524e2020-03-02 16:32:28 -07003852static int io_remove_buffers_prep(struct io_kiocb *req,
3853 const struct io_uring_sqe *sqe)
3854{
3855 struct io_provide_buf *p = &req->pbuf;
3856 u64 tmp;
3857
3858 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3859 return -EINVAL;
3860
3861 tmp = READ_ONCE(sqe->fd);
3862 if (!tmp || tmp > USHRT_MAX)
3863 return -EINVAL;
3864
3865 memset(p, 0, sizeof(*p));
3866 p->nbufs = tmp;
3867 p->bgid = READ_ONCE(sqe->buf_group);
3868 return 0;
3869}
3870
3871static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3872 int bgid, unsigned nbufs)
3873{
3874 unsigned i = 0;
3875
3876 /* shouldn't happen */
3877 if (!nbufs)
3878 return 0;
3879
3880 /* the head kbuf is the list itself */
3881 while (!list_empty(&buf->list)) {
3882 struct io_buffer *nxt;
3883
3884 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3885 list_del(&nxt->list);
3886 kfree(nxt);
3887 if (++i == nbufs)
3888 return i;
3889 }
3890 i++;
3891 kfree(buf);
3892 idr_remove(&ctx->io_buffer_idr, bgid);
3893
3894 return i;
3895}
3896
Jens Axboe229a7b62020-06-22 10:13:11 -06003897static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
3898 struct io_comp_state *cs)
Jens Axboe067524e2020-03-02 16:32:28 -07003899{
3900 struct io_provide_buf *p = &req->pbuf;
3901 struct io_ring_ctx *ctx = req->ctx;
3902 struct io_buffer *head;
3903 int ret = 0;
3904
3905 io_ring_submit_lock(ctx, !force_nonblock);
3906
3907 lockdep_assert_held(&ctx->uring_lock);
3908
3909 ret = -ENOENT;
3910 head = idr_find(&ctx->io_buffer_idr, p->bgid);
3911 if (head)
3912 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
3913
3914 io_ring_submit_lock(ctx, !force_nonblock);
3915 if (ret < 0)
3916 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06003917 __io_req_complete(req, ret, 0, cs);
Jens Axboe067524e2020-03-02 16:32:28 -07003918 return 0;
3919}
3920
Jens Axboeddf0322d2020-02-23 16:41:33 -07003921static int io_provide_buffers_prep(struct io_kiocb *req,
3922 const struct io_uring_sqe *sqe)
3923{
3924 struct io_provide_buf *p = &req->pbuf;
3925 u64 tmp;
3926
3927 if (sqe->ioprio || sqe->rw_flags)
3928 return -EINVAL;
3929
3930 tmp = READ_ONCE(sqe->fd);
3931 if (!tmp || tmp > USHRT_MAX)
3932 return -E2BIG;
3933 p->nbufs = tmp;
3934 p->addr = READ_ONCE(sqe->addr);
3935 p->len = READ_ONCE(sqe->len);
3936
Bijan Mottahedehefe68c12020-06-04 18:01:52 -07003937 if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
Jens Axboeddf0322d2020-02-23 16:41:33 -07003938 return -EFAULT;
3939
3940 p->bgid = READ_ONCE(sqe->buf_group);
3941 tmp = READ_ONCE(sqe->off);
3942 if (tmp > USHRT_MAX)
3943 return -E2BIG;
3944 p->bid = tmp;
3945 return 0;
3946}
3947
3948static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3949{
3950 struct io_buffer *buf;
3951 u64 addr = pbuf->addr;
3952 int i, bid = pbuf->bid;
3953
3954 for (i = 0; i < pbuf->nbufs; i++) {
3955 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3956 if (!buf)
3957 break;
3958
3959 buf->addr = addr;
3960 buf->len = pbuf->len;
3961 buf->bid = bid;
3962 addr += pbuf->len;
3963 bid++;
3964 if (!*head) {
3965 INIT_LIST_HEAD(&buf->list);
3966 *head = buf;
3967 } else {
3968 list_add_tail(&buf->list, &(*head)->list);
3969 }
3970 }
3971
3972 return i ? i : -ENOMEM;
3973}
3974
Jens Axboe229a7b62020-06-22 10:13:11 -06003975static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock,
3976 struct io_comp_state *cs)
Jens Axboeddf0322d2020-02-23 16:41:33 -07003977{
3978 struct io_provide_buf *p = &req->pbuf;
3979 struct io_ring_ctx *ctx = req->ctx;
3980 struct io_buffer *head, *list;
3981 int ret = 0;
3982
3983 io_ring_submit_lock(ctx, !force_nonblock);
3984
3985 lockdep_assert_held(&ctx->uring_lock);
3986
3987 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
3988
3989 ret = io_add_buffers(p, &head);
3990 if (ret < 0)
3991 goto out;
3992
3993 if (!list) {
3994 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
3995 GFP_KERNEL);
3996 if (ret < 0) {
Jens Axboe067524e2020-03-02 16:32:28 -07003997 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07003998 goto out;
3999 }
4000 }
4001out:
4002 io_ring_submit_unlock(ctx, !force_nonblock);
4003 if (ret < 0)
4004 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06004005 __io_req_complete(req, ret, 0, cs);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004006 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004007}
4008
Jens Axboe3e4827b2020-01-08 15:18:09 -07004009static int io_epoll_ctl_prep(struct io_kiocb *req,
4010 const struct io_uring_sqe *sqe)
4011{
4012#if defined(CONFIG_EPOLL)
4013 if (sqe->ioprio || sqe->buf_index)
4014 return -EINVAL;
Jens Axboe6ca56f82020-09-18 16:51:19 -06004015 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004016 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004017
4018 req->epoll.epfd = READ_ONCE(sqe->fd);
4019 req->epoll.op = READ_ONCE(sqe->len);
4020 req->epoll.fd = READ_ONCE(sqe->off);
4021
4022 if (ep_op_has_event(req->epoll.op)) {
4023 struct epoll_event __user *ev;
4024
4025 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4026 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4027 return -EFAULT;
4028 }
4029
4030 return 0;
4031#else
4032 return -EOPNOTSUPP;
4033#endif
4034}
4035
Jens Axboe229a7b62020-06-22 10:13:11 -06004036static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock,
4037 struct io_comp_state *cs)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004038{
4039#if defined(CONFIG_EPOLL)
4040 struct io_epoll *ie = &req->epoll;
4041 int ret;
4042
4043 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4044 if (force_nonblock && ret == -EAGAIN)
4045 return -EAGAIN;
4046
4047 if (ret < 0)
4048 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06004049 __io_req_complete(req, ret, 0, cs);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004050 return 0;
4051#else
4052 return -EOPNOTSUPP;
4053#endif
4054}
4055
Jens Axboec1ca7572019-12-25 22:18:28 -07004056static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4057{
4058#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4059 if (sqe->ioprio || sqe->buf_index || sqe->off)
4060 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004061 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4062 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004063
4064 req->madvise.addr = READ_ONCE(sqe->addr);
4065 req->madvise.len = READ_ONCE(sqe->len);
4066 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4067 return 0;
4068#else
4069 return -EOPNOTSUPP;
4070#endif
4071}
4072
Pavel Begunkov014db002020-03-03 21:33:12 +03004073static int io_madvise(struct io_kiocb *req, bool force_nonblock)
Jens Axboec1ca7572019-12-25 22:18:28 -07004074{
4075#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4076 struct io_madvise *ma = &req->madvise;
4077 int ret;
4078
4079 if (force_nonblock)
4080 return -EAGAIN;
4081
4082 ret = do_madvise(ma->addr, ma->len, ma->advice);
4083 if (ret < 0)
4084 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004085 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004086 return 0;
4087#else
4088 return -EOPNOTSUPP;
4089#endif
4090}
4091
Jens Axboe4840e412019-12-25 22:03:45 -07004092static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4093{
4094 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4095 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004096 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4097 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004098
4099 req->fadvise.offset = READ_ONCE(sqe->off);
4100 req->fadvise.len = READ_ONCE(sqe->len);
4101 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4102 return 0;
4103}
4104
Pavel Begunkov014db002020-03-03 21:33:12 +03004105static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
Jens Axboe4840e412019-12-25 22:03:45 -07004106{
4107 struct io_fadvise *fa = &req->fadvise;
4108 int ret;
4109
Jens Axboe3e694262020-02-01 09:22:49 -07004110 if (force_nonblock) {
4111 switch (fa->advice) {
4112 case POSIX_FADV_NORMAL:
4113 case POSIX_FADV_RANDOM:
4114 case POSIX_FADV_SEQUENTIAL:
4115 break;
4116 default:
4117 return -EAGAIN;
4118 }
4119 }
Jens Axboe4840e412019-12-25 22:03:45 -07004120
4121 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4122 if (ret < 0)
4123 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004124 io_req_complete(req, ret);
Jens Axboe4840e412019-12-25 22:03:45 -07004125 return 0;
4126}
4127
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004128static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4129{
Jens Axboe6ca56f82020-09-18 16:51:19 -06004130 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004131 return -EINVAL;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004132 if (sqe->ioprio || sqe->buf_index)
4133 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004134 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004135 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004136
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004137 req->statx.dfd = READ_ONCE(sqe->fd);
4138 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004139 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004140 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4141 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004142
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004143 return 0;
4144}
4145
Pavel Begunkov014db002020-03-03 21:33:12 +03004146static int io_statx(struct io_kiocb *req, bool force_nonblock)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004147{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004148 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004149 int ret;
4150
Jens Axboe5b0bbee2020-04-27 10:41:22 -06004151 if (force_nonblock) {
4152 /* only need file table for an actual valid fd */
4153 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
4154 req->flags |= REQ_F_NO_FILE_TABLE;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004155 return -EAGAIN;
Jens Axboe5b0bbee2020-04-27 10:41:22 -06004156 }
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004157
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004158 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4159 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004160
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004161 if (ret < 0)
4162 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004163 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004164 return 0;
4165}
4166
Jens Axboeb5dba592019-12-11 14:02:38 -07004167static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4168{
4169 /*
4170 * If we queue this for async, it must not be cancellable. That would
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08004171 * leave the 'file' in an undeterminate state, and here need to modify
4172 * io_wq_work.flags, so initialize io_wq_work firstly.
Jens Axboeb5dba592019-12-11 14:02:38 -07004173 */
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08004174 io_req_init_async(req);
Jens Axboeb5dba592019-12-11 14:02:38 -07004175 req->work.flags |= IO_WQ_WORK_NO_CANCEL;
4176
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004177 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
4178 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004179 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4180 sqe->rw_flags || sqe->buf_index)
4181 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004182 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004183 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004184
4185 req->close.fd = READ_ONCE(sqe->fd);
Jens Axboe0f212202020-09-13 13:09:39 -06004186 if ((req->file && req->file->f_op == &io_uring_fops))
Jens Axboefd2206e2020-06-02 16:40:47 -06004187 return -EBADF;
4188
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004189 req->close.put_file = NULL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004190 return 0;
4191}
4192
Jens Axboe229a7b62020-06-22 10:13:11 -06004193static int io_close(struct io_kiocb *req, bool force_nonblock,
4194 struct io_comp_state *cs)
Jens Axboeb5dba592019-12-11 14:02:38 -07004195{
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004196 struct io_close *close = &req->close;
Jens Axboeb5dba592019-12-11 14:02:38 -07004197 int ret;
4198
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004199 /* might be already done during nonblock submission */
4200 if (!close->put_file) {
4201 ret = __close_fd_get_file(close->fd, &close->put_file);
4202 if (ret < 0)
4203 return (ret == -ENOENT) ? -EBADF : ret;
4204 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004205
4206 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004207 if (close->put_file->f_op->flush && force_nonblock) {
Pavel Begunkov24c74672020-06-21 13:09:51 +03004208 /* was never set, but play safe */
4209 req->flags &= ~REQ_F_NOWAIT;
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004210 /* avoid grabbing files - we don't need the files */
Pavel Begunkov24c74672020-06-21 13:09:51 +03004211 req->flags |= REQ_F_NO_FILE_TABLE;
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004212 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004213 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004214
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004215 /* No ->flush() or already async, safely close from here */
Jens Axboe98447d62020-10-14 10:48:51 -06004216 ret = filp_close(close->put_file, req->work.identity->files);
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004217 if (ret < 0)
4218 req_set_fail_links(req);
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004219 fput(close->put_file);
4220 close->put_file = NULL;
Jens Axboe229a7b62020-06-22 10:13:11 -06004221 __io_req_complete(req, ret, 0, cs);
Jens Axboe1a417f42020-01-31 17:16:48 -07004222 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004223}
4224
Jens Axboe3529d8c2019-12-19 18:24:38 -07004225static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004226{
4227 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004228
4229 if (!req->file)
4230 return -EBADF;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004231
4232 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4233 return -EINVAL;
4234 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4235 return -EINVAL;
4236
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004237 req->sync.off = READ_ONCE(sqe->off);
4238 req->sync.len = READ_ONCE(sqe->len);
4239 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004240 return 0;
4241}
4242
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004243static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004244{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004245 int ret;
4246
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004247 /* sync_file_range always requires a blocking context */
4248 if (force_nonblock)
4249 return -EAGAIN;
4250
Jens Axboe9adbd452019-12-20 08:45:55 -07004251 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004252 req->sync.flags);
4253 if (ret < 0)
4254 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004255 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004256 return 0;
4257}
4258
YueHaibing469956e2020-03-04 15:53:52 +08004259#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004260static int io_setup_async_msg(struct io_kiocb *req,
4261 struct io_async_msghdr *kmsg)
4262{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004263 struct io_async_msghdr *async_msg = req->async_data;
4264
4265 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004266 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004267 if (io_alloc_async_data(req)) {
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004268 if (kmsg->iov != kmsg->fast_iov)
4269 kfree(kmsg->iov);
4270 return -ENOMEM;
4271 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004272 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004273 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004274 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004275 return -EAGAIN;
4276}
4277
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004278static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4279 struct io_async_msghdr *iomsg)
4280{
4281 iomsg->iov = iomsg->fast_iov;
4282 iomsg->msg.msg_name = &iomsg->addr;
4283 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
4284 req->sr_msg.msg_flags, &iomsg->iov);
4285}
4286
Jens Axboe3529d8c2019-12-19 18:24:38 -07004287static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004288{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004289 struct io_async_msghdr *async_msg = req->async_data;
Jens Axboee47293f2019-12-20 08:58:21 -07004290 struct io_sr_msg *sr = &req->sr_msg;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004291 int ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004292
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004293 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4294 return -EINVAL;
4295
Jens Axboee47293f2019-12-20 08:58:21 -07004296 sr->msg_flags = READ_ONCE(sqe->msg_flags);
Pavel Begunkov270a5942020-07-12 20:41:04 +03004297 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004298 sr->len = READ_ONCE(sqe->len);
Jens Axboe3529d8c2019-12-19 18:24:38 -07004299
Jens Axboed8768362020-02-27 14:17:49 -07004300#ifdef CONFIG_COMPAT
4301 if (req->ctx->compat)
4302 sr->msg_flags |= MSG_CMSG_COMPAT;
4303#endif
4304
Jens Axboee8c2bc12020-08-15 18:44:09 -07004305 if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
Jens Axboe3529d8c2019-12-19 18:24:38 -07004306 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004307 ret = io_sendmsg_copy_hdr(req, async_msg);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004308 if (!ret)
4309 req->flags |= REQ_F_NEED_CLEANUP;
4310 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004311}
4312
Jens Axboe229a7b62020-06-22 10:13:11 -06004313static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
4314 struct io_comp_state *cs)
Jens Axboe03b12302019-12-02 18:50:25 -07004315{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004316 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe03b12302019-12-02 18:50:25 -07004317 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004318 unsigned flags;
Jens Axboe03b12302019-12-02 18:50:25 -07004319 int ret;
4320
Jens Axboe03b12302019-12-02 18:50:25 -07004321 sock = sock_from_file(req->file, &ret);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004322 if (unlikely(!sock))
4323 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004324
Jens Axboee8c2bc12020-08-15 18:44:09 -07004325 if (req->async_data) {
4326 kmsg = req->async_data;
4327 kmsg->msg.msg_name = &kmsg->addr;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004328 /* if iov is set, it's allocated already */
4329 if (!kmsg->iov)
4330 kmsg->iov = kmsg->fast_iov;
4331 kmsg->msg.msg_iter.iov = kmsg->iov;
4332 } else {
4333 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004334 if (ret)
4335 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004336 kmsg = &iomsg;
Jens Axboefddafac2020-01-04 20:19:44 -07004337 }
4338
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004339 flags = req->sr_msg.msg_flags;
4340 if (flags & MSG_DONTWAIT)
4341 req->flags |= REQ_F_NOWAIT;
4342 else if (force_nonblock)
4343 flags |= MSG_DONTWAIT;
4344
4345 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
4346 if (force_nonblock && ret == -EAGAIN)
4347 return io_setup_async_msg(req, kmsg);
4348 if (ret == -ERESTARTSYS)
4349 ret = -EINTR;
4350
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004351 if (kmsg->iov != kmsg->fast_iov)
Jens Axboe03b12302019-12-02 18:50:25 -07004352 kfree(kmsg->iov);
4353 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboefddafac2020-01-04 20:19:44 -07004354 if (ret < 0)
4355 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06004356 __io_req_complete(req, ret, 0, cs);
Jens Axboefddafac2020-01-04 20:19:44 -07004357 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004358}
4359
Jens Axboe229a7b62020-06-22 10:13:11 -06004360static int io_send(struct io_kiocb *req, bool force_nonblock,
4361 struct io_comp_state *cs)
Jens Axboe03b12302019-12-02 18:50:25 -07004362{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004363 struct io_sr_msg *sr = &req->sr_msg;
4364 struct msghdr msg;
4365 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004366 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004367 unsigned flags;
Jens Axboe03b12302019-12-02 18:50:25 -07004368 int ret;
4369
4370 sock = sock_from_file(req->file, &ret);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004371 if (unlikely(!sock))
4372 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004373
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004374 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4375 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004376 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004377
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004378 msg.msg_name = NULL;
4379 msg.msg_control = NULL;
4380 msg.msg_controllen = 0;
4381 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004382
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004383 flags = req->sr_msg.msg_flags;
4384 if (flags & MSG_DONTWAIT)
4385 req->flags |= REQ_F_NOWAIT;
4386 else if (force_nonblock)
4387 flags |= MSG_DONTWAIT;
Jens Axboe03b12302019-12-02 18:50:25 -07004388
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004389 msg.msg_flags = flags;
4390 ret = sock_sendmsg(sock, &msg);
4391 if (force_nonblock && ret == -EAGAIN)
4392 return -EAGAIN;
4393 if (ret == -ERESTARTSYS)
4394 ret = -EINTR;
Jens Axboe03b12302019-12-02 18:50:25 -07004395
Jens Axboe03b12302019-12-02 18:50:25 -07004396 if (ret < 0)
4397 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06004398 __io_req_complete(req, ret, 0, cs);
Jens Axboe03b12302019-12-02 18:50:25 -07004399 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004400}
4401
Pavel Begunkov1400e692020-07-12 20:41:05 +03004402static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4403 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004404{
4405 struct io_sr_msg *sr = &req->sr_msg;
4406 struct iovec __user *uiov;
4407 size_t iov_len;
4408 int ret;
4409
Pavel Begunkov1400e692020-07-12 20:41:05 +03004410 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4411 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004412 if (ret)
4413 return ret;
4414
4415 if (req->flags & REQ_F_BUFFER_SELECT) {
4416 if (iov_len > 1)
4417 return -EINVAL;
Pavel Begunkov1400e692020-07-12 20:41:05 +03004418 if (copy_from_user(iomsg->iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07004419 return -EFAULT;
Pavel Begunkov1400e692020-07-12 20:41:05 +03004420 sr->len = iomsg->iov[0].iov_len;
4421 iov_iter_init(&iomsg->msg.msg_iter, READ, iomsg->iov, 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -07004422 sr->len);
Pavel Begunkov1400e692020-07-12 20:41:05 +03004423 iomsg->iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004424 } else {
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004425 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
4426 &iomsg->iov, &iomsg->msg.msg_iter,
4427 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004428 if (ret > 0)
4429 ret = 0;
4430 }
4431
4432 return ret;
4433}
4434
4435#ifdef CONFIG_COMPAT
4436static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03004437 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004438{
4439 struct compat_msghdr __user *msg_compat;
4440 struct io_sr_msg *sr = &req->sr_msg;
4441 struct compat_iovec __user *uiov;
4442 compat_uptr_t ptr;
4443 compat_size_t len;
4444 int ret;
4445
Pavel Begunkov270a5942020-07-12 20:41:04 +03004446 msg_compat = (struct compat_msghdr __user *) sr->umsg;
Pavel Begunkov1400e692020-07-12 20:41:05 +03004447 ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
Jens Axboe52de1fe2020-02-27 10:15:42 -07004448 &ptr, &len);
4449 if (ret)
4450 return ret;
4451
4452 uiov = compat_ptr(ptr);
4453 if (req->flags & REQ_F_BUFFER_SELECT) {
4454 compat_ssize_t clen;
4455
4456 if (len > 1)
4457 return -EINVAL;
4458 if (!access_ok(uiov, sizeof(*uiov)))
4459 return -EFAULT;
4460 if (__get_user(clen, &uiov->iov_len))
4461 return -EFAULT;
4462 if (clen < 0)
4463 return -EINVAL;
Pavel Begunkov1400e692020-07-12 20:41:05 +03004464 sr->len = iomsg->iov[0].iov_len;
4465 iomsg->iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004466 } else {
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004467 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
4468 UIO_FASTIOV, &iomsg->iov,
4469 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004470 if (ret < 0)
4471 return ret;
4472 }
4473
4474 return 0;
4475}
Jens Axboe03b12302019-12-02 18:50:25 -07004476#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07004477
Pavel Begunkov1400e692020-07-12 20:41:05 +03004478static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4479 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004480{
Pavel Begunkov1400e692020-07-12 20:41:05 +03004481 iomsg->msg.msg_name = &iomsg->addr;
4482 iomsg->iov = iomsg->fast_iov;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004483
4484#ifdef CONFIG_COMPAT
4485 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03004486 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004487#endif
4488
Pavel Begunkov1400e692020-07-12 20:41:05 +03004489 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004490}
4491
Jens Axboebcda7ba2020-02-23 16:42:51 -07004492static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004493 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07004494{
4495 struct io_sr_msg *sr = &req->sr_msg;
4496 struct io_buffer *kbuf;
4497
Jens Axboebcda7ba2020-02-23 16:42:51 -07004498 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4499 if (IS_ERR(kbuf))
4500 return kbuf;
4501
4502 sr->kbuf = kbuf;
4503 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004504 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07004505}
4506
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004507static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4508{
4509 return io_put_kbuf(req, req->sr_msg.kbuf);
4510}
4511
Jens Axboe3529d8c2019-12-19 18:24:38 -07004512static int io_recvmsg_prep(struct io_kiocb *req,
4513 const struct io_uring_sqe *sqe)
Jens Axboe03b12302019-12-02 18:50:25 -07004514{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004515 struct io_async_msghdr *async_msg = req->async_data;
Jens Axboee47293f2019-12-20 08:58:21 -07004516 struct io_sr_msg *sr = &req->sr_msg;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004517 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07004518
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004519 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4520 return -EINVAL;
4521
Jens Axboe3529d8c2019-12-19 18:24:38 -07004522 sr->msg_flags = READ_ONCE(sqe->msg_flags);
Pavel Begunkov270a5942020-07-12 20:41:04 +03004523 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07004524 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004525 sr->bgid = READ_ONCE(sqe->buf_group);
Jens Axboe3529d8c2019-12-19 18:24:38 -07004526
Jens Axboed8768362020-02-27 14:17:49 -07004527#ifdef CONFIG_COMPAT
4528 if (req->ctx->compat)
4529 sr->msg_flags |= MSG_CMSG_COMPAT;
4530#endif
4531
Jens Axboee8c2bc12020-08-15 18:44:09 -07004532 if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
Jens Axboe06b76d42019-12-19 14:44:26 -07004533 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004534 ret = io_recvmsg_copy_hdr(req, async_msg);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004535 if (!ret)
4536 req->flags |= REQ_F_NEED_CLEANUP;
4537 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004538}
4539
Jens Axboe229a7b62020-06-22 10:13:11 -06004540static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
4541 struct io_comp_state *cs)
Jens Axboe03b12302019-12-02 18:50:25 -07004542{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004543 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004544 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004545 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004546 unsigned flags;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004547 int ret, cflags = 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004548
Jens Axboe0fa03c62019-04-19 13:34:07 -06004549 sock = sock_from_file(req->file, &ret);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004550 if (unlikely(!sock))
4551 return ret;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004552
Jens Axboee8c2bc12020-08-15 18:44:09 -07004553 if (req->async_data) {
4554 kmsg = req->async_data;
4555 kmsg->msg.msg_name = &kmsg->addr;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004556 /* if iov is set, it's allocated already */
4557 if (!kmsg->iov)
4558 kmsg->iov = kmsg->fast_iov;
4559 kmsg->msg.msg_iter.iov = kmsg->iov;
4560 } else {
4561 ret = io_recvmsg_copy_hdr(req, &iomsg);
4562 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03004563 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004564 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004565 }
4566
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004567 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004568 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004569 if (IS_ERR(kbuf))
4570 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004571 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
4572 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
4573 1, req->sr_msg.len);
4574 }
4575
4576 flags = req->sr_msg.msg_flags;
4577 if (flags & MSG_DONTWAIT)
4578 req->flags |= REQ_F_NOWAIT;
4579 else if (force_nonblock)
4580 flags |= MSG_DONTWAIT;
4581
4582 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4583 kmsg->uaddr, flags);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004584 if (force_nonblock && ret == -EAGAIN)
4585 return io_setup_async_msg(req, kmsg);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004586 if (ret == -ERESTARTSYS)
4587 ret = -EINTR;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004588
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004589 if (req->flags & REQ_F_BUFFER_SELECTED)
4590 cflags = io_put_recv_kbuf(req);
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004591 if (kmsg->iov != kmsg->fast_iov)
Jens Axboe0b416c32019-12-15 10:57:46 -07004592 kfree(kmsg->iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004593 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004594 if (ret < 0)
4595 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06004596 __io_req_complete(req, ret, cflags, cs);
Jens Axboe0fa03c62019-04-19 13:34:07 -06004597 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004598}
4599
Jens Axboe229a7b62020-06-22 10:13:11 -06004600static int io_recv(struct io_kiocb *req, bool force_nonblock,
4601 struct io_comp_state *cs)
Jens Axboefddafac2020-01-04 20:19:44 -07004602{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004603 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004604 struct io_sr_msg *sr = &req->sr_msg;
4605 struct msghdr msg;
4606 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07004607 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004608 struct iovec iov;
4609 unsigned flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004610 int ret, cflags = 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004611
Jens Axboefddafac2020-01-04 20:19:44 -07004612 sock = sock_from_file(req->file, &ret);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004613 if (unlikely(!sock))
4614 return ret;
Jens Axboefddafac2020-01-04 20:19:44 -07004615
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004616 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004617 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004618 if (IS_ERR(kbuf))
4619 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004620 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07004621 }
4622
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004623 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004624 if (unlikely(ret))
4625 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07004626
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004627 msg.msg_name = NULL;
4628 msg.msg_control = NULL;
4629 msg.msg_controllen = 0;
4630 msg.msg_namelen = 0;
4631 msg.msg_iocb = NULL;
4632 msg.msg_flags = 0;
4633
4634 flags = req->sr_msg.msg_flags;
4635 if (flags & MSG_DONTWAIT)
4636 req->flags |= REQ_F_NOWAIT;
4637 else if (force_nonblock)
4638 flags |= MSG_DONTWAIT;
4639
4640 ret = sock_recvmsg(sock, &msg, flags);
4641 if (force_nonblock && ret == -EAGAIN)
4642 return -EAGAIN;
4643 if (ret == -ERESTARTSYS)
4644 ret = -EINTR;
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004645out_free:
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004646 if (req->flags & REQ_F_BUFFER_SELECTED)
4647 cflags = io_put_recv_kbuf(req);
Jens Axboefddafac2020-01-04 20:19:44 -07004648 if (ret < 0)
4649 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06004650 __io_req_complete(req, ret, cflags, cs);
Jens Axboefddafac2020-01-04 20:19:44 -07004651 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004652}
4653
Jens Axboe3529d8c2019-12-19 18:24:38 -07004654static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004655{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004656 struct io_accept *accept = &req->accept;
4657
Jens Axboe17f2fe32019-10-17 14:42:58 -06004658 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
4659 return -EINVAL;
Hrvoje Zeba8042d6c2019-11-25 14:40:22 -05004660 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004661 return -EINVAL;
4662
Jens Axboed55e5f52019-12-11 16:12:15 -07004663 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4664 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004665 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06004666 accept->nofile = rlimit(RLIMIT_NOFILE);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004667 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004668}
Jens Axboe17f2fe32019-10-17 14:42:58 -06004669
Jens Axboe229a7b62020-06-22 10:13:11 -06004670static int io_accept(struct io_kiocb *req, bool force_nonblock,
4671 struct io_comp_state *cs)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004672{
4673 struct io_accept *accept = &req->accept;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004674 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004675 int ret;
4676
Jiufei Xuee697dee2020-06-10 13:41:59 +08004677 if (req->file->f_flags & O_NONBLOCK)
4678 req->flags |= REQ_F_NOWAIT;
4679
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004680 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
Jens Axboe09952e32020-03-19 20:16:56 -06004681 accept->addr_len, accept->flags,
4682 accept->nofile);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004683 if (ret == -EAGAIN && force_nonblock)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004684 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004685 if (ret < 0) {
4686 if (ret == -ERESTARTSYS)
4687 ret = -EINTR;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004688 req_set_fail_links(req);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004689 }
Jens Axboe229a7b62020-06-22 10:13:11 -06004690 __io_req_complete(req, ret, 0, cs);
Jens Axboe17f2fe32019-10-17 14:42:58 -06004691 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004692}
4693
Jens Axboe3529d8c2019-12-19 18:24:38 -07004694static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07004695{
Jens Axboe3529d8c2019-12-19 18:24:38 -07004696 struct io_connect *conn = &req->connect;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004697 struct io_async_connect *io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07004698
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004699 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
4700 return -EINVAL;
4701 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4702 return -EINVAL;
4703
Jens Axboe3529d8c2019-12-19 18:24:38 -07004704 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4705 conn->addr_len = READ_ONCE(sqe->addr2);
4706
4707 if (!io)
4708 return 0;
4709
4710 return move_addr_to_kernel(conn->addr, conn->addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07004711 &io->address);
Jens Axboef499a022019-12-02 16:28:46 -07004712}
4713
Jens Axboe229a7b62020-06-22 10:13:11 -06004714static int io_connect(struct io_kiocb *req, bool force_nonblock,
4715 struct io_comp_state *cs)
Jens Axboef8e85cf2019-11-23 14:24:24 -07004716{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004717 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004718 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004719 int ret;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004720
Jens Axboee8c2bc12020-08-15 18:44:09 -07004721 if (req->async_data) {
4722 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07004723 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07004724 ret = move_addr_to_kernel(req->connect.addr,
4725 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07004726 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07004727 if (ret)
4728 goto out;
4729 io = &__io;
4730 }
4731
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004732 file_flags = force_nonblock ? O_NONBLOCK : 0;
4733
Jens Axboee8c2bc12020-08-15 18:44:09 -07004734 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004735 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07004736 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07004737 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004738 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004739 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07004740 ret = -ENOMEM;
4741 goto out;
4742 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004743 io = req->async_data;
4744 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07004745 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07004746 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07004747 if (ret == -ERESTARTSYS)
4748 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07004749out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004750 if (ret < 0)
4751 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06004752 __io_req_complete(req, ret, 0, cs);
Jens Axboef8e85cf2019-11-23 14:24:24 -07004753 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004754}
YueHaibing469956e2020-03-04 15:53:52 +08004755#else /* !CONFIG_NET */
4756static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4757{
Jens Axboef8e85cf2019-11-23 14:24:24 -07004758 return -EOPNOTSUPP;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004759}
4760
Randy Dunlap1e16c2f2020-06-26 16:32:50 -07004761static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
4762 struct io_comp_state *cs)
Jens Axboe221c5eb2019-01-17 09:41:58 -07004763{
YueHaibing469956e2020-03-04 15:53:52 +08004764 return -EOPNOTSUPP;
4765}
4766
Randy Dunlap1e16c2f2020-06-26 16:32:50 -07004767static int io_send(struct io_kiocb *req, bool force_nonblock,
4768 struct io_comp_state *cs)
YueHaibing469956e2020-03-04 15:53:52 +08004769{
4770 return -EOPNOTSUPP;
4771}
4772
4773static int io_recvmsg_prep(struct io_kiocb *req,
4774 const struct io_uring_sqe *sqe)
4775{
4776 return -EOPNOTSUPP;
4777}
4778
Randy Dunlap1e16c2f2020-06-26 16:32:50 -07004779static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
4780 struct io_comp_state *cs)
YueHaibing469956e2020-03-04 15:53:52 +08004781{
4782 return -EOPNOTSUPP;
4783}
4784
Randy Dunlap1e16c2f2020-06-26 16:32:50 -07004785static int io_recv(struct io_kiocb *req, bool force_nonblock,
4786 struct io_comp_state *cs)
YueHaibing469956e2020-03-04 15:53:52 +08004787{
4788 return -EOPNOTSUPP;
4789}
4790
4791static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4792{
4793 return -EOPNOTSUPP;
4794}
4795
Randy Dunlap1e16c2f2020-06-26 16:32:50 -07004796static int io_accept(struct io_kiocb *req, bool force_nonblock,
4797 struct io_comp_state *cs)
YueHaibing469956e2020-03-04 15:53:52 +08004798{
4799 return -EOPNOTSUPP;
4800}
4801
4802static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4803{
4804 return -EOPNOTSUPP;
4805}
4806
Randy Dunlap1e16c2f2020-06-26 16:32:50 -07004807static int io_connect(struct io_kiocb *req, bool force_nonblock,
4808 struct io_comp_state *cs)
YueHaibing469956e2020-03-04 15:53:52 +08004809{
4810 return -EOPNOTSUPP;
4811}
4812#endif /* CONFIG_NET */
Jens Axboe2b188cc2019-01-07 10:46:33 -07004813
Jens Axboed7718a92020-02-14 22:23:12 -07004814struct io_poll_table {
4815 struct poll_table_struct pt;
4816 struct io_kiocb *req;
4817 int error;
4818};
4819
Jens Axboed7718a92020-02-14 22:23:12 -07004820static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4821 __poll_t mask, task_work_func_t func)
4822{
Jens Axboefd7d6de2020-08-23 11:00:37 -06004823 bool twa_signal_ok;
Jens Axboeaa96bf82020-04-03 11:26:26 -06004824 int ret;
Jens Axboed7718a92020-02-14 22:23:12 -07004825
4826 /* for instances that support it check for an event match first: */
4827 if (mask && !(mask & poll->events))
4828 return 0;
4829
4830 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4831
4832 list_del_init(&poll->wait.entry);
4833
Jens Axboed7718a92020-02-14 22:23:12 -07004834 req->result = mask;
4835 init_task_work(&req->task_work, func);
Jens Axboe6d816e02020-08-11 08:04:14 -06004836 percpu_ref_get(&req->ctx->refs);
4837
Jens Axboed7718a92020-02-14 22:23:12 -07004838 /*
Jens Axboefd7d6de2020-08-23 11:00:37 -06004839 * If we using the signalfd wait_queue_head for this wakeup, then
4840 * it's not safe to use TWA_SIGNAL as we could be recursing on the
4841 * tsk->sighand->siglock on doing the wakeup. Should not be needed
4842 * either, as the normal wakeup will suffice.
4843 */
4844 twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh);
4845
4846 /*
Jens Axboee3aabf92020-05-18 11:04:17 -06004847 * If this fails, then the task is exiting. When a task exits, the
4848 * work gets canceled, so just cancel this request as well instead
4849 * of executing it. We can't safely execute it anyway, as we may not
4850 * have the needed state needed for it anyway.
Jens Axboed7718a92020-02-14 22:23:12 -07004851 */
Jens Axboe87c43112020-09-30 21:00:14 -06004852 ret = io_req_task_work_add(req, twa_signal_ok);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004853 if (unlikely(ret)) {
Jens Axboec2c4c832020-07-01 15:37:11 -06004854 struct task_struct *tsk;
4855
Jens Axboee3aabf92020-05-18 11:04:17 -06004856 WRITE_ONCE(poll->canceled, true);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004857 tsk = io_wq_get_task(req->ctx->io_wq);
Jens Axboece593a62020-06-30 12:39:05 -06004858 task_work_add(tsk, &req->task_work, 0);
4859 wake_up_process(tsk);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004860 }
Jens Axboed7718a92020-02-14 22:23:12 -07004861 return 1;
4862}
4863
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004864static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4865 __acquires(&req->ctx->completion_lock)
4866{
4867 struct io_ring_ctx *ctx = req->ctx;
4868
4869 if (!req->result && !READ_ONCE(poll->canceled)) {
4870 struct poll_table_struct pt = { ._key = poll->events };
4871
4872 req->result = vfs_poll(req->file, &pt) & poll->events;
4873 }
4874
4875 spin_lock_irq(&ctx->completion_lock);
4876 if (!req->result && !READ_ONCE(poll->canceled)) {
4877 add_wait_queue(poll->head, &poll->wait);
4878 return true;
4879 }
4880
4881 return false;
4882}
4883
Jens Axboed4e7cd32020-08-15 11:44:50 -07004884static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06004885{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004886 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
Jens Axboed4e7cd32020-08-15 11:44:50 -07004887 if (req->opcode == IORING_OP_POLL_ADD)
Jens Axboee8c2bc12020-08-15 18:44:09 -07004888 return req->async_data;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004889 return req->apoll->double_poll;
4890}
4891
4892static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4893{
4894 if (req->opcode == IORING_OP_POLL_ADD)
4895 return &req->poll;
4896 return &req->apoll->poll;
4897}
4898
4899static void io_poll_remove_double(struct io_kiocb *req)
4900{
4901 struct io_poll_iocb *poll = io_poll_get_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004902
4903 lockdep_assert_held(&req->ctx->completion_lock);
4904
4905 if (poll && poll->head) {
4906 struct wait_queue_head *head = poll->head;
4907
4908 spin_lock(&head->lock);
4909 list_del_init(&poll->wait.entry);
4910 if (poll->wait.private)
4911 refcount_dec(&req->refs);
4912 poll->head = NULL;
4913 spin_unlock(&head->lock);
4914 }
4915}
4916
4917static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
4918{
4919 struct io_ring_ctx *ctx = req->ctx;
4920
Jens Axboed4e7cd32020-08-15 11:44:50 -07004921 io_poll_remove_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004922 req->poll.done = true;
4923 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
4924 io_commit_cqring(ctx);
4925}
4926
4927static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
4928{
4929 struct io_ring_ctx *ctx = req->ctx;
4930
4931 if (io_poll_rewait(req, &req->poll)) {
4932 spin_unlock_irq(&ctx->completion_lock);
4933 return;
4934 }
4935
4936 hash_del(&req->hash_node);
4937 io_poll_complete(req, req->result, 0);
Jens Axboe18bceab2020-05-15 11:56:54 -06004938 spin_unlock_irq(&ctx->completion_lock);
4939
Pavel Begunkov6a0af222020-10-13 09:43:58 +01004940 *nxt = io_put_req_find_next(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004941 io_cqring_ev_posted(ctx);
4942}
4943
4944static void io_poll_task_func(struct callback_head *cb)
4945{
4946 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe6d816e02020-08-11 08:04:14 -06004947 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe18bceab2020-05-15 11:56:54 -06004948 struct io_kiocb *nxt = NULL;
4949
4950 io_poll_task_handler(req, &nxt);
Pavel Begunkovea1164e2020-06-30 15:20:41 +03004951 if (nxt)
4952 __io_req_task_submit(nxt);
Jens Axboe6d816e02020-08-11 08:04:14 -06004953 percpu_ref_put(&ctx->refs);
Jens Axboe18bceab2020-05-15 11:56:54 -06004954}
4955
4956static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4957 int sync, void *key)
4958{
4959 struct io_kiocb *req = wait->private;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004960 struct io_poll_iocb *poll = io_poll_get_single(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004961 __poll_t mask = key_to_poll(key);
4962
4963 /* for instances that support it check for an event match first: */
4964 if (mask && !(mask & poll->events))
4965 return 0;
4966
Jens Axboe8706e042020-09-28 08:38:54 -06004967 list_del_init(&wait->entry);
4968
Jens Axboe807abcb2020-07-17 17:09:27 -06004969 if (poll && poll->head) {
Jens Axboe18bceab2020-05-15 11:56:54 -06004970 bool done;
4971
Jens Axboe807abcb2020-07-17 17:09:27 -06004972 spin_lock(&poll->head->lock);
4973 done = list_empty(&poll->wait.entry);
Jens Axboe18bceab2020-05-15 11:56:54 -06004974 if (!done)
Jens Axboe807abcb2020-07-17 17:09:27 -06004975 list_del_init(&poll->wait.entry);
Jens Axboed4e7cd32020-08-15 11:44:50 -07004976 /* make sure double remove sees this as being gone */
4977 wait->private = NULL;
Jens Axboe807abcb2020-07-17 17:09:27 -06004978 spin_unlock(&poll->head->lock);
Jens Axboe18bceab2020-05-15 11:56:54 -06004979 if (!done)
4980 __io_async_wake(req, poll, mask, io_poll_task_func);
4981 }
4982 refcount_dec(&req->refs);
4983 return 1;
4984}
4985
4986static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
4987 wait_queue_func_t wake_func)
4988{
4989 poll->head = NULL;
4990 poll->done = false;
4991 poll->canceled = false;
4992 poll->events = events;
4993 INIT_LIST_HEAD(&poll->wait.entry);
4994 init_waitqueue_func_entry(&poll->wait, wake_func);
4995}
4996
4997static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06004998 struct wait_queue_head *head,
4999 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06005000{
5001 struct io_kiocb *req = pt->req;
5002
5003 /*
5004 * If poll->head is already set, it's because the file being polled
5005 * uses multiple waitqueues for poll handling (eg one for read, one
5006 * for write). Setup a separate io_poll_iocb if this happens.
5007 */
5008 if (unlikely(poll->head)) {
Pavel Begunkov58852d42020-10-16 20:55:56 +01005009 struct io_poll_iocb *poll_one = poll;
5010
Jens Axboe18bceab2020-05-15 11:56:54 -06005011 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06005012 if (*poll_ptr) {
Jens Axboe18bceab2020-05-15 11:56:54 -06005013 pt->error = -EINVAL;
5014 return;
5015 }
5016 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5017 if (!poll) {
5018 pt->error = -ENOMEM;
5019 return;
5020 }
Pavel Begunkov58852d42020-10-16 20:55:56 +01005021 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
Jens Axboe18bceab2020-05-15 11:56:54 -06005022 refcount_inc(&req->refs);
5023 poll->wait.private = req;
Jens Axboe807abcb2020-07-17 17:09:27 -06005024 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005025 }
5026
5027 pt->error = 0;
5028 poll->head = head;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005029
5030 if (poll->events & EPOLLEXCLUSIVE)
5031 add_wait_queue_exclusive(head, &poll->wait);
5032 else
5033 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005034}
5035
5036static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5037 struct poll_table_struct *p)
5038{
5039 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005040 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005041
Jens Axboe807abcb2020-07-17 17:09:27 -06005042 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005043}
5044
Jens Axboed7718a92020-02-14 22:23:12 -07005045static void io_async_task_func(struct callback_head *cb)
5046{
5047 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5048 struct async_poll *apoll = req->apoll;
5049 struct io_ring_ctx *ctx = req->ctx;
5050
5051 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
5052
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005053 if (io_poll_rewait(req, &apoll->poll)) {
Jens Axboed7718a92020-02-14 22:23:12 -07005054 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe6d816e02020-08-11 08:04:14 -06005055 percpu_ref_put(&ctx->refs);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005056 return;
Jens Axboed7718a92020-02-14 22:23:12 -07005057 }
5058
Jens Axboe31067252020-05-17 17:43:31 -06005059 /* If req is still hashed, it cannot have been canceled. Don't check. */
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005060 if (hash_hashed(&req->hash_node))
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005061 hash_del(&req->hash_node);
Jens Axboe2bae0472020-04-13 11:16:34 -06005062
Jens Axboed4e7cd32020-08-15 11:44:50 -07005063 io_poll_remove_double(req);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005064 spin_unlock_irq(&ctx->completion_lock);
5065
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005066 if (!READ_ONCE(apoll->poll.canceled))
5067 __io_req_task_submit(req);
5068 else
5069 __io_req_task_cancel(req, -ECANCELED);
Dan Carpenteraa340842020-07-08 21:47:11 +03005070
Jens Axboe6d816e02020-08-11 08:04:14 -06005071 percpu_ref_put(&ctx->refs);
Jens Axboe807abcb2020-07-17 17:09:27 -06005072 kfree(apoll->double_poll);
Jens Axboe31067252020-05-17 17:43:31 -06005073 kfree(apoll);
Jens Axboed7718a92020-02-14 22:23:12 -07005074}
5075
5076static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5077 void *key)
5078{
5079 struct io_kiocb *req = wait->private;
5080 struct io_poll_iocb *poll = &req->apoll->poll;
5081
5082 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5083 key_to_poll(key));
5084
5085 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5086}
5087
5088static void io_poll_req_insert(struct io_kiocb *req)
5089{
5090 struct io_ring_ctx *ctx = req->ctx;
5091 struct hlist_head *list;
5092
5093 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5094 hlist_add_head(&req->hash_node, list);
5095}
5096
5097static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5098 struct io_poll_iocb *poll,
5099 struct io_poll_table *ipt, __poll_t mask,
5100 wait_queue_func_t wake_func)
5101 __acquires(&ctx->completion_lock)
5102{
5103 struct io_ring_ctx *ctx = req->ctx;
5104 bool cancel = false;
5105
Jens Axboe18bceab2020-05-15 11:56:54 -06005106 io_init_poll_iocb(poll, mask, wake_func);
Pavel Begunkovb90cd192020-06-21 13:09:52 +03005107 poll->file = req->file;
Jens Axboe18bceab2020-05-15 11:56:54 -06005108 poll->wait.private = req;
Jens Axboed7718a92020-02-14 22:23:12 -07005109
5110 ipt->pt._key = mask;
5111 ipt->req = req;
5112 ipt->error = -EINVAL;
5113
Jens Axboed7718a92020-02-14 22:23:12 -07005114 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5115
5116 spin_lock_irq(&ctx->completion_lock);
5117 if (likely(poll->head)) {
5118 spin_lock(&poll->head->lock);
5119 if (unlikely(list_empty(&poll->wait.entry))) {
5120 if (ipt->error)
5121 cancel = true;
5122 ipt->error = 0;
5123 mask = 0;
5124 }
5125 if (mask || ipt->error)
5126 list_del_init(&poll->wait.entry);
5127 else if (cancel)
5128 WRITE_ONCE(poll->canceled, true);
5129 else if (!poll->done) /* actually waiting for an event */
5130 io_poll_req_insert(req);
5131 spin_unlock(&poll->head->lock);
5132 }
5133
5134 return mask;
5135}
5136
5137static bool io_arm_poll_handler(struct io_kiocb *req)
5138{
5139 const struct io_op_def *def = &io_op_defs[req->opcode];
5140 struct io_ring_ctx *ctx = req->ctx;
5141 struct async_poll *apoll;
5142 struct io_poll_table ipt;
5143 __poll_t mask, ret;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005144 int rw;
Jens Axboed7718a92020-02-14 22:23:12 -07005145
5146 if (!req->file || !file_can_poll(req->file))
5147 return false;
Pavel Begunkov24c74672020-06-21 13:09:51 +03005148 if (req->flags & REQ_F_POLLED)
Jens Axboed7718a92020-02-14 22:23:12 -07005149 return false;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005150 if (def->pollin)
5151 rw = READ;
5152 else if (def->pollout)
5153 rw = WRITE;
5154 else
5155 return false;
5156 /* if we can't nonblock try, then no point in arming a poll handler */
5157 if (!io_file_supports_async(req->file, rw))
Jens Axboed7718a92020-02-14 22:23:12 -07005158 return false;
5159
5160 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5161 if (unlikely(!apoll))
5162 return false;
Jens Axboe807abcb2020-07-17 17:09:27 -06005163 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005164
5165 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005166 req->apoll = apoll;
5167 INIT_HLIST_NODE(&req->hash_node);
5168
Nathan Chancellor8755d972020-03-02 16:01:19 -07005169 mask = 0;
Jens Axboed7718a92020-02-14 22:23:12 -07005170 if (def->pollin)
Nathan Chancellor8755d972020-03-02 16:01:19 -07005171 mask |= POLLIN | POLLRDNORM;
Jens Axboed7718a92020-02-14 22:23:12 -07005172 if (def->pollout)
5173 mask |= POLLOUT | POLLWRNORM;
Luke Hsiao901341b2020-08-21 21:41:05 -07005174
5175 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5176 if ((req->opcode == IORING_OP_RECVMSG) &&
5177 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5178 mask &= ~POLLIN;
5179
Jens Axboed7718a92020-02-14 22:23:12 -07005180 mask |= POLLERR | POLLPRI;
5181
5182 ipt.pt._qproc = io_async_queue_proc;
5183
5184 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5185 io_async_wake);
Jens Axboea36da652020-08-11 09:50:19 -06005186 if (ret || ipt.error) {
Jens Axboed4e7cd32020-08-15 11:44:50 -07005187 io_poll_remove_double(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005188 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe807abcb2020-07-17 17:09:27 -06005189 kfree(apoll->double_poll);
Jens Axboed7718a92020-02-14 22:23:12 -07005190 kfree(apoll);
5191 return false;
5192 }
5193 spin_unlock_irq(&ctx->completion_lock);
5194 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
5195 apoll->poll.events);
5196 return true;
5197}
5198
5199static bool __io_poll_remove_one(struct io_kiocb *req,
5200 struct io_poll_iocb *poll)
5201{
Jens Axboeb41e9852020-02-17 09:52:41 -07005202 bool do_complete = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005203
5204 spin_lock(&poll->head->lock);
5205 WRITE_ONCE(poll->canceled, true);
Jens Axboe392edb42019-12-09 17:52:20 -07005206 if (!list_empty(&poll->wait.entry)) {
5207 list_del_init(&poll->wait.entry);
Jens Axboeb41e9852020-02-17 09:52:41 -07005208 do_complete = true;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005209 }
5210 spin_unlock(&poll->head->lock);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005211 hash_del(&req->hash_node);
Jens Axboed7718a92020-02-14 22:23:12 -07005212 return do_complete;
5213}
5214
5215static bool io_poll_remove_one(struct io_kiocb *req)
5216{
5217 bool do_complete;
5218
Jens Axboed4e7cd32020-08-15 11:44:50 -07005219 io_poll_remove_double(req);
5220
Jens Axboed7718a92020-02-14 22:23:12 -07005221 if (req->opcode == IORING_OP_POLL_ADD) {
5222 do_complete = __io_poll_remove_one(req, &req->poll);
5223 } else {
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005224 struct async_poll *apoll = req->apoll;
5225
Jens Axboed7718a92020-02-14 22:23:12 -07005226 /* non-poll requests have submit ref still */
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005227 do_complete = __io_poll_remove_one(req, &apoll->poll);
5228 if (do_complete) {
Jens Axboed7718a92020-02-14 22:23:12 -07005229 io_put_req(req);
Jens Axboe807abcb2020-07-17 17:09:27 -06005230 kfree(apoll->double_poll);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005231 kfree(apoll);
5232 }
Xiaoguang Wangb1f573b2020-04-12 14:50:54 +08005233 }
5234
Jens Axboeb41e9852020-02-17 09:52:41 -07005235 if (do_complete) {
5236 io_cqring_fill_event(req, -ECANCELED);
5237 io_commit_cqring(req->ctx);
Jens Axboef254ac02020-08-12 17:33:30 -06005238 req_set_fail_links(req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01005239 io_put_req_deferred(req, 1);
Jens Axboeb41e9852020-02-17 09:52:41 -07005240 }
5241
5242 return do_complete;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005243}
5244
Jens Axboe76e1b642020-09-26 15:05:03 -06005245/*
5246 * Returns true if we found and killed one or more poll requests
5247 */
5248static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005249{
Jens Axboe78076bb2019-12-04 19:56:40 -07005250 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005251 struct io_kiocb *req;
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005252 int posted = 0, i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005253
5254 spin_lock_irq(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005255 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5256 struct hlist_head *list;
5257
5258 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005259 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
5260 if (io_task_match(req, tsk))
5261 posted += io_poll_remove_one(req);
5262 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005263 }
5264 spin_unlock_irq(&ctx->completion_lock);
Jens Axboeb41e9852020-02-17 09:52:41 -07005265
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005266 if (posted)
5267 io_cqring_ev_posted(ctx);
Jens Axboe76e1b642020-09-26 15:05:03 -06005268
5269 return posted != 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005270}
5271
Jens Axboe47f46762019-11-09 17:43:02 -07005272static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
5273{
Jens Axboe78076bb2019-12-04 19:56:40 -07005274 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07005275 struct io_kiocb *req;
5276
Jens Axboe78076bb2019-12-04 19:56:40 -07005277 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5278 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07005279 if (sqe_addr != req->user_data)
5280 continue;
5281 if (io_poll_remove_one(req))
Jens Axboeeac406c2019-11-14 12:09:58 -07005282 return 0;
Jens Axboeb41e9852020-02-17 09:52:41 -07005283 return -EALREADY;
Jens Axboe47f46762019-11-09 17:43:02 -07005284 }
5285
5286 return -ENOENT;
5287}
5288
Jens Axboe3529d8c2019-12-19 18:24:38 -07005289static int io_poll_remove_prep(struct io_kiocb *req,
5290 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005291{
Jens Axboe221c5eb2019-01-17 09:41:58 -07005292 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5293 return -EINVAL;
5294 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
5295 sqe->poll_events)
5296 return -EINVAL;
5297
Jens Axboe0969e782019-12-17 18:40:57 -07005298 req->poll.addr = READ_ONCE(sqe->addr);
Jens Axboe0969e782019-12-17 18:40:57 -07005299 return 0;
5300}
5301
5302/*
5303 * Find a running poll command that matches one specified in sqe->addr,
5304 * and remove it if found.
5305 */
5306static int io_poll_remove(struct io_kiocb *req)
5307{
5308 struct io_ring_ctx *ctx = req->ctx;
5309 u64 addr;
5310 int ret;
5311
Jens Axboe0969e782019-12-17 18:40:57 -07005312 addr = req->poll.addr;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005313 spin_lock_irq(&ctx->completion_lock);
Jens Axboe0969e782019-12-17 18:40:57 -07005314 ret = io_poll_cancel(ctx, addr);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005315 spin_unlock_irq(&ctx->completion_lock);
5316
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005317 if (ret < 0)
5318 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06005319 io_req_complete(req, ret);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005320 return 0;
5321}
5322
Jens Axboe221c5eb2019-01-17 09:41:58 -07005323static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5324 void *key)
5325{
Jens Axboec2f2eb72020-02-10 09:07:05 -07005326 struct io_kiocb *req = wait->private;
5327 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005328
Jens Axboed7718a92020-02-14 22:23:12 -07005329 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005330}
5331
Jens Axboe221c5eb2019-01-17 09:41:58 -07005332static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5333 struct poll_table_struct *p)
5334{
5335 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5336
Jens Axboee8c2bc12020-08-15 18:44:09 -07005337 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
Jens Axboeeac406c2019-11-14 12:09:58 -07005338}
5339
Jens Axboe3529d8c2019-12-19 18:24:38 -07005340static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005341{
5342 struct io_poll_iocb *poll = &req->poll;
Jiufei Xue5769a352020-06-17 17:53:55 +08005343 u32 events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005344
5345 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5346 return -EINVAL;
5347 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
5348 return -EINVAL;
Jens Axboe09bb8392019-03-13 12:39:28 -06005349 if (!poll->file)
5350 return -EBADF;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005351
Jiufei Xue5769a352020-06-17 17:53:55 +08005352 events = READ_ONCE(sqe->poll32_events);
5353#ifdef __BIG_ENDIAN
5354 events = swahw32(events);
5355#endif
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005356 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
5357 (events & EPOLLEXCLUSIVE);
Jens Axboe0969e782019-12-17 18:40:57 -07005358 return 0;
5359}
5360
Pavel Begunkov014db002020-03-03 21:33:12 +03005361static int io_poll_add(struct io_kiocb *req)
Jens Axboe0969e782019-12-17 18:40:57 -07005362{
5363 struct io_poll_iocb *poll = &req->poll;
5364 struct io_ring_ctx *ctx = req->ctx;
5365 struct io_poll_table ipt;
Jens Axboe0969e782019-12-17 18:40:57 -07005366 __poll_t mask;
Jens Axboe0969e782019-12-17 18:40:57 -07005367
Jens Axboe78076bb2019-12-04 19:56:40 -07005368 INIT_HLIST_NODE(&req->hash_node);
Jens Axboed7718a92020-02-14 22:23:12 -07005369 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06005370
Jens Axboed7718a92020-02-14 22:23:12 -07005371 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5372 io_poll_wake);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005373
Jens Axboe8c838782019-03-12 15:48:16 -06005374 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06005375 ipt.error = 0;
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005376 io_poll_complete(req, mask, 0);
Jens Axboe8c838782019-03-12 15:48:16 -06005377 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005378 spin_unlock_irq(&ctx->completion_lock);
5379
Jens Axboe8c838782019-03-12 15:48:16 -06005380 if (mask) {
5381 io_cqring_ev_posted(ctx);
Pavel Begunkov014db002020-03-03 21:33:12 +03005382 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005383 }
Jens Axboe8c838782019-03-12 15:48:16 -06005384 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005385}
5386
Jens Axboe5262f562019-09-17 12:26:57 -06005387static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5388{
Jens Axboead8a48a2019-11-15 08:49:11 -07005389 struct io_timeout_data *data = container_of(timer,
5390 struct io_timeout_data, timer);
5391 struct io_kiocb *req = data->req;
5392 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06005393 unsigned long flags;
5394
Jens Axboe5262f562019-09-17 12:26:57 -06005395 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01005396 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005397 atomic_set(&req->ctx->cq_timeouts,
5398 atomic_read(&req->ctx->cq_timeouts) + 1);
5399
Jens Axboe78e19bb2019-11-06 15:21:34 -07005400 io_cqring_fill_event(req, -ETIME);
Jens Axboe5262f562019-09-17 12:26:57 -06005401 io_commit_cqring(ctx);
5402 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5403
5404 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005405 req_set_fail_links(req);
Jens Axboe5262f562019-09-17 12:26:57 -06005406 io_put_req(req);
5407 return HRTIMER_NORESTART;
5408}
5409
Jens Axboef254ac02020-08-12 17:33:30 -06005410static int __io_timeout_cancel(struct io_kiocb *req)
Jens Axboe47f46762019-11-09 17:43:02 -07005411{
Jens Axboee8c2bc12020-08-15 18:44:09 -07005412 struct io_timeout_data *io = req->async_data;
Jens Axboef254ac02020-08-12 17:33:30 -06005413 int ret;
Jens Axboe47f46762019-11-09 17:43:02 -07005414
Jens Axboee8c2bc12020-08-15 18:44:09 -07005415 ret = hrtimer_try_to_cancel(&io->timer);
Jens Axboe47f46762019-11-09 17:43:02 -07005416 if (ret == -1)
5417 return -EALREADY;
Pavel Begunkova71976f2020-10-10 18:34:11 +01005418 list_del_init(&req->timeout.list);
Jens Axboe47f46762019-11-09 17:43:02 -07005419
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005420 req_set_fail_links(req);
Jens Axboe47f46762019-11-09 17:43:02 -07005421 io_cqring_fill_event(req, -ECANCELED);
Pavel Begunkov216578e2020-10-13 09:44:00 +01005422 io_put_req_deferred(req, 1);
Jens Axboe47f46762019-11-09 17:43:02 -07005423 return 0;
5424}
5425
Jens Axboef254ac02020-08-12 17:33:30 -06005426static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5427{
5428 struct io_kiocb *req;
5429 int ret = -ENOENT;
5430
5431 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
5432 if (user_data == req->user_data) {
5433 ret = 0;
5434 break;
5435 }
5436 }
5437
5438 if (ret == -ENOENT)
5439 return ret;
5440
5441 return __io_timeout_cancel(req);
5442}
5443
Jens Axboe3529d8c2019-12-19 18:24:38 -07005444static int io_timeout_remove_prep(struct io_kiocb *req,
5445 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07005446{
Jens Axboeb29472e2019-12-17 18:50:29 -07005447 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5448 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005449 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5450 return -EINVAL;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +01005451 if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags)
Jens Axboeb29472e2019-12-17 18:50:29 -07005452 return -EINVAL;
5453
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +01005454 req->timeout_rem.addr = READ_ONCE(sqe->addr);
Jens Axboeb29472e2019-12-17 18:50:29 -07005455 return 0;
5456}
5457
Jens Axboe11365042019-10-16 09:08:32 -06005458/*
5459 * Remove or update an existing timeout command
5460 */
Jens Axboefc4df992019-12-10 14:38:45 -07005461static int io_timeout_remove(struct io_kiocb *req)
Jens Axboe11365042019-10-16 09:08:32 -06005462{
5463 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07005464 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06005465
Jens Axboe11365042019-10-16 09:08:32 -06005466 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +01005467 ret = io_timeout_cancel(ctx, req->timeout_rem.addr);
Jens Axboe11365042019-10-16 09:08:32 -06005468
Jens Axboe47f46762019-11-09 17:43:02 -07005469 io_cqring_fill_event(req, ret);
Jens Axboe11365042019-10-16 09:08:32 -06005470 io_commit_cqring(ctx);
5471 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005472 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005473 if (ret < 0)
5474 req_set_fail_links(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08005475 io_put_req(req);
Jens Axboe11365042019-10-16 09:08:32 -06005476 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06005477}
5478
Jens Axboe3529d8c2019-12-19 18:24:38 -07005479static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07005480 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06005481{
Jens Axboead8a48a2019-11-15 08:49:11 -07005482 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06005483 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005484 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06005485
Jens Axboead8a48a2019-11-15 08:49:11 -07005486 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06005487 return -EINVAL;
Jens Axboead8a48a2019-11-15 08:49:11 -07005488 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
Jens Axboea41525a2019-10-15 16:48:15 -06005489 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005490 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07005491 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06005492 flags = READ_ONCE(sqe->timeout_flags);
5493 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06005494 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06005495
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005496 req->timeout.off = off;
Jens Axboe26a61672019-12-20 09:02:01 -07005497
Jens Axboee8c2bc12020-08-15 18:44:09 -07005498 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07005499 return -ENOMEM;
5500
Jens Axboee8c2bc12020-08-15 18:44:09 -07005501 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005502 data->req = req;
Jens Axboead8a48a2019-11-15 08:49:11 -07005503
5504 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06005505 return -EFAULT;
5506
Jens Axboe11365042019-10-16 09:08:32 -06005507 if (flags & IORING_TIMEOUT_ABS)
Jens Axboead8a48a2019-11-15 08:49:11 -07005508 data->mode = HRTIMER_MODE_ABS;
Jens Axboe11365042019-10-16 09:08:32 -06005509 else
Jens Axboead8a48a2019-11-15 08:49:11 -07005510 data->mode = HRTIMER_MODE_REL;
Jens Axboe11365042019-10-16 09:08:32 -06005511
Jens Axboead8a48a2019-11-15 08:49:11 -07005512 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
5513 return 0;
5514}
5515
Jens Axboefc4df992019-12-10 14:38:45 -07005516static int io_timeout(struct io_kiocb *req)
Jens Axboead8a48a2019-11-15 08:49:11 -07005517{
Jens Axboead8a48a2019-11-15 08:49:11 -07005518 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005519 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005520 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005521 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07005522
Pavel Begunkov733f5c92020-05-26 20:34:03 +03005523 spin_lock_irq(&ctx->completion_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07005524
Jens Axboe5262f562019-09-17 12:26:57 -06005525 /*
5526 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07005527 * timeout event to be satisfied. If it isn't set, then this is
5528 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06005529 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005530 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07005531 entry = ctx->timeout_list.prev;
5532 goto add;
5533 }
Jens Axboe5262f562019-09-17 12:26:57 -06005534
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005535 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5536 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06005537
5538 /*
5539 * Insertion sort, ensuring the first entry in the list is always
5540 * the one we need first.
5541 */
Jens Axboe5262f562019-09-17 12:26:57 -06005542 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005543 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5544 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06005545
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005546 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07005547 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005548 /* nxt.seq is behind @tail, otherwise would've been completed */
5549 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06005550 break;
5551 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07005552add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005553 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07005554 data->timer.function = io_timeout_fn;
5555 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe842f9612019-10-29 12:34:10 -06005556 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005557 return 0;
5558}
5559
Jens Axboe62755e32019-10-28 21:49:21 -06005560static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06005561{
Jens Axboe62755e32019-10-28 21:49:21 -06005562 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Jens Axboede0617e2019-04-06 21:51:27 -06005563
Jens Axboe62755e32019-10-28 21:49:21 -06005564 return req->user_data == (unsigned long) data;
5565}
5566
Jens Axboee977d6d2019-11-05 12:39:45 -07005567static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
Jens Axboe62755e32019-10-28 21:49:21 -06005568{
Jens Axboe62755e32019-10-28 21:49:21 -06005569 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06005570 int ret = 0;
5571
Pavel Begunkov4f26bda2020-06-15 10:24:03 +03005572 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
Jens Axboe62755e32019-10-28 21:49:21 -06005573 switch (cancel_ret) {
5574 case IO_WQ_CANCEL_OK:
5575 ret = 0;
5576 break;
5577 case IO_WQ_CANCEL_RUNNING:
5578 ret = -EALREADY;
5579 break;
5580 case IO_WQ_CANCEL_NOTFOUND:
5581 ret = -ENOENT;
5582 break;
5583 }
5584
Jens Axboee977d6d2019-11-05 12:39:45 -07005585 return ret;
5586}
5587
Jens Axboe47f46762019-11-09 17:43:02 -07005588static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5589 struct io_kiocb *req, __u64 sqe_addr,
Pavel Begunkov014db002020-03-03 21:33:12 +03005590 int success_ret)
Jens Axboe47f46762019-11-09 17:43:02 -07005591{
5592 unsigned long flags;
5593 int ret;
5594
5595 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
5596 if (ret != -ENOENT) {
5597 spin_lock_irqsave(&ctx->completion_lock, flags);
5598 goto done;
5599 }
5600
5601 spin_lock_irqsave(&ctx->completion_lock, flags);
5602 ret = io_timeout_cancel(ctx, sqe_addr);
5603 if (ret != -ENOENT)
5604 goto done;
5605 ret = io_poll_cancel(ctx, sqe_addr);
5606done:
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005607 if (!ret)
5608 ret = success_ret;
Jens Axboe47f46762019-11-09 17:43:02 -07005609 io_cqring_fill_event(req, ret);
5610 io_commit_cqring(ctx);
5611 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5612 io_cqring_ev_posted(ctx);
5613
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005614 if (ret < 0)
5615 req_set_fail_links(req);
Pavel Begunkov014db002020-03-03 21:33:12 +03005616 io_put_req(req);
Jens Axboe47f46762019-11-09 17:43:02 -07005617}
5618
Jens Axboe3529d8c2019-12-19 18:24:38 -07005619static int io_async_cancel_prep(struct io_kiocb *req,
5620 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07005621{
Jens Axboefbf23842019-12-17 18:45:56 -07005622 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07005623 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005624 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5625 return -EINVAL;
5626 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
Jens Axboee977d6d2019-11-05 12:39:45 -07005627 return -EINVAL;
5628
Jens Axboefbf23842019-12-17 18:45:56 -07005629 req->cancel.addr = READ_ONCE(sqe->addr);
5630 return 0;
5631}
5632
Pavel Begunkov014db002020-03-03 21:33:12 +03005633static int io_async_cancel(struct io_kiocb *req)
Jens Axboefbf23842019-12-17 18:45:56 -07005634{
5635 struct io_ring_ctx *ctx = req->ctx;
Jens Axboefbf23842019-12-17 18:45:56 -07005636
Pavel Begunkov014db002020-03-03 21:33:12 +03005637 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
Jens Axboe62755e32019-10-28 21:49:21 -06005638 return 0;
5639}
5640
Jens Axboe05f3fb32019-12-09 11:22:50 -07005641static int io_files_update_prep(struct io_kiocb *req,
5642 const struct io_uring_sqe *sqe)
5643{
Jens Axboe6ca56f82020-09-18 16:51:19 -06005644 if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
5645 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005646 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5647 return -EINVAL;
5648 if (sqe->ioprio || sqe->rw_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005649 return -EINVAL;
5650
5651 req->files_update.offset = READ_ONCE(sqe->off);
5652 req->files_update.nr_args = READ_ONCE(sqe->len);
5653 if (!req->files_update.nr_args)
5654 return -EINVAL;
5655 req->files_update.arg = READ_ONCE(sqe->addr);
5656 return 0;
5657}
5658
Jens Axboe229a7b62020-06-22 10:13:11 -06005659static int io_files_update(struct io_kiocb *req, bool force_nonblock,
5660 struct io_comp_state *cs)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005661{
5662 struct io_ring_ctx *ctx = req->ctx;
5663 struct io_uring_files_update up;
5664 int ret;
5665
Jens Axboef86cd202020-01-29 13:46:44 -07005666 if (force_nonblock)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005667 return -EAGAIN;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005668
5669 up.offset = req->files_update.offset;
5670 up.fds = req->files_update.arg;
5671
5672 mutex_lock(&ctx->uring_lock);
5673 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
5674 mutex_unlock(&ctx->uring_lock);
5675
5676 if (ret < 0)
5677 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06005678 __io_req_complete(req, ret, 0, cs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005679 return 0;
5680}
5681
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005682static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07005683{
Jens Axboed625c6e2019-12-17 19:53:05 -07005684 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07005685 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005686 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07005687 case IORING_OP_READV:
5688 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005689 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005690 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005691 case IORING_OP_WRITEV:
5692 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005693 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005694 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005695 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005696 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005697 case IORING_OP_POLL_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005698 return io_poll_remove_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005699 case IORING_OP_FSYNC:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005700 return io_prep_fsync(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005701 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005702 return io_prep_sfr(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005703 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005704 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005705 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005706 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005707 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005708 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07005709 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005710 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005711 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005712 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07005713 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005714 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07005715 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005716 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005717 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005718 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005719 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005720 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07005721 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005722 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07005723 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005724 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07005725 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005726 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005727 case IORING_OP_FILES_UPDATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005728 return io_files_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07005729 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005730 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07005731 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005732 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07005733 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005734 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07005735 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005736 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07005737 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005738 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005739 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005740 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07005741 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005742 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07005743 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005744 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03005745 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005746 return io_tee_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005747 }
5748
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005749 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5750 req->opcode);
5751 return-EINVAL;
5752}
5753
Jens Axboedef596e2019-01-09 08:59:42 -07005754static int io_req_defer_prep(struct io_kiocb *req,
5755 const struct io_uring_sqe *sqe)
Jens Axboedef596e2019-01-09 08:59:42 -07005756{
Jens Axboedef596e2019-01-09 08:59:42 -07005757 if (!sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005758 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005759 if (io_alloc_async_data(req))
Jens Axboeb76da702019-11-20 13:05:32 -07005760 return -EAGAIN;
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005761 return io_req_prep(req, sqe);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005762}
5763
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005764static u32 io_get_sequence(struct io_kiocb *req)
5765{
5766 struct io_kiocb *pos;
5767 struct io_ring_ctx *ctx = req->ctx;
5768 u32 total_submitted, nr_reqs = 1;
5769
5770 if (req->flags & REQ_F_LINK_HEAD)
5771 list_for_each_entry(pos, &req->link_list, link_list)
5772 nr_reqs++;
5773
5774 total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
5775 return total_submitted - nr_reqs;
5776}
5777
Jens Axboe3529d8c2019-12-19 18:24:38 -07005778static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005779{
5780 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005781 struct io_defer_entry *de;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005782 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005783 u32 seq;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005784
5785 /* Still need defer if there is pending req in defer list. */
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005786 if (likely(list_empty_careful(&ctx->defer_list) &&
5787 !(req->flags & REQ_F_IO_DRAIN)))
5788 return 0;
5789
5790 seq = io_get_sequence(req);
5791 /* Still a chance to pass the sequence check */
5792 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Jens Axboe2b188cc2019-01-07 10:46:33 -07005793 return 0;
5794
Jens Axboee8c2bc12020-08-15 18:44:09 -07005795 if (!req->async_data) {
Pavel Begunkov650b5482020-05-17 14:02:11 +03005796 ret = io_req_defer_prep(req, sqe);
Pavel Begunkov327d6d92020-07-15 12:46:51 +03005797 if (ret)
Pavel Begunkov650b5482020-05-17 14:02:11 +03005798 return ret;
5799 }
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03005800 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005801 de = kmalloc(sizeof(*de), GFP_KERNEL);
5802 if (!de)
5803 return -ENOMEM;
Jens Axboe31b51512019-01-18 22:56:34 -07005804
5805 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005806 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe31b51512019-01-18 22:56:34 -07005807 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005808 kfree(de);
Pavel Begunkovae348172020-07-23 20:25:20 +03005809 io_queue_async_work(req);
5810 return -EIOCBQUEUED;
Jens Axboe31b51512019-01-18 22:56:34 -07005811 }
5812
5813 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005814 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005815 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005816 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe31b51512019-01-18 22:56:34 -07005817 spin_unlock_irq(&ctx->completion_lock);
5818 return -EIOCBQUEUED;
5819}
Jens Axboeedafcce2019-01-09 09:16:05 -07005820
Jens Axboef573d382020-09-22 10:19:24 -06005821static void io_req_drop_files(struct io_kiocb *req)
5822{
5823 struct io_ring_ctx *ctx = req->ctx;
5824 unsigned long flags;
5825
5826 spin_lock_irqsave(&ctx->inflight_lock, flags);
5827 list_del(&req->inflight_entry);
5828 if (waitqueue_active(&ctx->inflight_wait))
5829 wake_up(&ctx->inflight_wait);
5830 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
5831 req->flags &= ~REQ_F_INFLIGHT;
Jens Axboe98447d62020-10-14 10:48:51 -06005832 put_files_struct(req->work.identity->files);
5833 put_nsproxy(req->work.identity->nsproxy);
Jens Axboedfead8a2020-10-14 10:12:37 -06005834 req->work.flags &= ~IO_WQ_WORK_FILES;
Jens Axboef573d382020-09-22 10:19:24 -06005835}
5836
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03005837static void __io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005838{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005839 if (req->flags & REQ_F_BUFFER_SELECTED) {
5840 switch (req->opcode) {
5841 case IORING_OP_READV:
5842 case IORING_OP_READ_FIXED:
5843 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07005844 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005845 break;
5846 case IORING_OP_RECVMSG:
5847 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07005848 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005849 break;
5850 }
5851 req->flags &= ~REQ_F_BUFFER_SELECTED;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005852 }
5853
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005854 if (req->flags & REQ_F_NEED_CLEANUP) {
5855 switch (req->opcode) {
5856 case IORING_OP_READV:
5857 case IORING_OP_READ_FIXED:
5858 case IORING_OP_READ:
5859 case IORING_OP_WRITEV:
5860 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07005861 case IORING_OP_WRITE: {
5862 struct io_async_rw *io = req->async_data;
5863 if (io->free_iovec)
5864 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005865 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005866 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005867 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07005868 case IORING_OP_SENDMSG: {
5869 struct io_async_msghdr *io = req->async_data;
5870 if (io->iov != io->fast_iov)
5871 kfree(io->iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005872 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005873 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005874 case IORING_OP_SPLICE:
5875 case IORING_OP_TEE:
5876 io_put_file(req, req->splice.file_in,
5877 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
5878 break;
Jens Axboef3cd48502020-09-24 14:55:54 -06005879 case IORING_OP_OPENAT:
5880 case IORING_OP_OPENAT2:
5881 if (req->open.filename)
5882 putname(req->open.filename);
5883 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005884 }
5885 req->flags &= ~REQ_F_NEED_CLEANUP;
5886 }
Pavel Begunkovbb175342020-08-20 11:33:35 +03005887
Jens Axboef573d382020-09-22 10:19:24 -06005888 if (req->flags & REQ_F_INFLIGHT)
5889 io_req_drop_files(req);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005890}
5891
Pavel Begunkovc1379e22020-09-30 22:57:56 +03005892static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
5893 struct io_comp_state *cs)
Jens Axboeedafcce2019-01-09 09:16:05 -07005894{
Jens Axboeedafcce2019-01-09 09:16:05 -07005895 struct io_ring_ctx *ctx = req->ctx;
Jens Axboed625c6e2019-12-17 19:53:05 -07005896 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07005897
Jens Axboed625c6e2019-12-17 19:53:05 -07005898 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07005899 case IORING_OP_NOP:
Jens Axboe229a7b62020-06-22 10:13:11 -06005900 ret = io_nop(req, cs);
Jens Axboe31b51512019-01-18 22:56:34 -07005901 break;
5902 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005903 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005904 case IORING_OP_READ:
Jens Axboea1d7c392020-06-22 11:09:46 -06005905 ret = io_read(req, force_nonblock, cs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005906 break;
5907 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07005908 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005909 case IORING_OP_WRITE:
Jens Axboea1d7c392020-06-22 11:09:46 -06005910 ret = io_write(req, force_nonblock, cs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005911 break;
5912 case IORING_OP_FSYNC:
Pavel Begunkov014db002020-03-03 21:33:12 +03005913 ret = io_fsync(req, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005914 break;
5915 case IORING_OP_POLL_ADD:
Pavel Begunkov014db002020-03-03 21:33:12 +03005916 ret = io_poll_add(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005917 break;
5918 case IORING_OP_POLL_REMOVE:
Jens Axboeb76da702019-11-20 13:05:32 -07005919 ret = io_poll_remove(req);
5920 break;
5921 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov014db002020-03-03 21:33:12 +03005922 ret = io_sync_file_range(req, force_nonblock);
Jens Axboeb76da702019-11-20 13:05:32 -07005923 break;
5924 case IORING_OP_SENDMSG:
Pavel Begunkov062d04d2020-10-10 18:34:12 +01005925 ret = io_sendmsg(req, force_nonblock, cs);
5926 break;
Jens Axboefddafac2020-01-04 20:19:44 -07005927 case IORING_OP_SEND:
Pavel Begunkov062d04d2020-10-10 18:34:12 +01005928 ret = io_send(req, force_nonblock, cs);
Jens Axboeb76da702019-11-20 13:05:32 -07005929 break;
5930 case IORING_OP_RECVMSG:
Pavel Begunkov062d04d2020-10-10 18:34:12 +01005931 ret = io_recvmsg(req, force_nonblock, cs);
5932 break;
Jens Axboefddafac2020-01-04 20:19:44 -07005933 case IORING_OP_RECV:
Pavel Begunkov062d04d2020-10-10 18:34:12 +01005934 ret = io_recv(req, force_nonblock, cs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005935 break;
5936 case IORING_OP_TIMEOUT:
5937 ret = io_timeout(req);
5938 break;
5939 case IORING_OP_TIMEOUT_REMOVE:
5940 ret = io_timeout_remove(req);
5941 break;
5942 case IORING_OP_ACCEPT:
Jens Axboe229a7b62020-06-22 10:13:11 -06005943 ret = io_accept(req, force_nonblock, cs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005944 break;
5945 case IORING_OP_CONNECT:
Jens Axboe229a7b62020-06-22 10:13:11 -06005946 ret = io_connect(req, force_nonblock, cs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005947 break;
5948 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov014db002020-03-03 21:33:12 +03005949 ret = io_async_cancel(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005950 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07005951 case IORING_OP_FALLOCATE:
Pavel Begunkov014db002020-03-03 21:33:12 +03005952 ret = io_fallocate(req, force_nonblock);
Jens Axboed63d1b52019-12-10 10:38:56 -07005953 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07005954 case IORING_OP_OPENAT:
Pavel Begunkov014db002020-03-03 21:33:12 +03005955 ret = io_openat(req, force_nonblock);
Jens Axboe15b71ab2019-12-11 11:20:36 -07005956 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07005957 case IORING_OP_CLOSE:
Jens Axboe229a7b62020-06-22 10:13:11 -06005958 ret = io_close(req, force_nonblock, cs);
Jens Axboeb5dba592019-12-11 14:02:38 -07005959 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005960 case IORING_OP_FILES_UPDATE:
Jens Axboe229a7b62020-06-22 10:13:11 -06005961 ret = io_files_update(req, force_nonblock, cs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005962 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07005963 case IORING_OP_STATX:
Pavel Begunkov014db002020-03-03 21:33:12 +03005964 ret = io_statx(req, force_nonblock);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07005965 break;
Jens Axboe4840e412019-12-25 22:03:45 -07005966 case IORING_OP_FADVISE:
Pavel Begunkov014db002020-03-03 21:33:12 +03005967 ret = io_fadvise(req, force_nonblock);
Jens Axboe4840e412019-12-25 22:03:45 -07005968 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07005969 case IORING_OP_MADVISE:
Pavel Begunkov014db002020-03-03 21:33:12 +03005970 ret = io_madvise(req, force_nonblock);
Jens Axboec1ca7572019-12-25 22:18:28 -07005971 break;
Jens Axboecebdb982020-01-08 17:59:24 -07005972 case IORING_OP_OPENAT2:
Pavel Begunkov014db002020-03-03 21:33:12 +03005973 ret = io_openat2(req, force_nonblock);
Jens Axboecebdb982020-01-08 17:59:24 -07005974 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07005975 case IORING_OP_EPOLL_CTL:
Jens Axboe229a7b62020-06-22 10:13:11 -06005976 ret = io_epoll_ctl(req, force_nonblock, cs);
Jens Axboe3e4827b2020-01-08 15:18:09 -07005977 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005978 case IORING_OP_SPLICE:
Pavel Begunkov014db002020-03-03 21:33:12 +03005979 ret = io_splice(req, force_nonblock);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005980 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07005981 case IORING_OP_PROVIDE_BUFFERS:
Jens Axboe229a7b62020-06-22 10:13:11 -06005982 ret = io_provide_buffers(req, force_nonblock, cs);
Jens Axboeddf0322d2020-02-23 16:41:33 -07005983 break;
Jens Axboe067524e2020-03-02 16:32:28 -07005984 case IORING_OP_REMOVE_BUFFERS:
Jens Axboe229a7b62020-06-22 10:13:11 -06005985 ret = io_remove_buffers(req, force_nonblock, cs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005986 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03005987 case IORING_OP_TEE:
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03005988 ret = io_tee(req, force_nonblock);
5989 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005990 default:
5991 ret = -EINVAL;
5992 break;
Jens Axboe31b51512019-01-18 22:56:34 -07005993 }
5994
5995 if (ret)
Jens Axboeedafcce2019-01-09 09:16:05 -07005996 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005997
Jens Axboeb5325762020-05-19 21:20:27 -06005998 /* If the op doesn't have a file, we're not polling for it */
5999 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
Jens Axboe11ba8202020-01-15 21:51:17 -07006000 const bool in_async = io_wq_current_is_worker();
6001
Jens Axboe11ba8202020-01-15 21:51:17 -07006002 /* workqueue context doesn't hold uring_lock, grab it now */
6003 if (in_async)
6004 mutex_lock(&ctx->uring_lock);
6005
Jens Axboe2b188cc2019-01-07 10:46:33 -07006006 io_iopoll_req_issued(req);
Jens Axboe11ba8202020-01-15 21:51:17 -07006007
6008 if (in_async)
6009 mutex_unlock(&ctx->uring_lock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006010 }
6011
6012 return 0;
6013}
6014
Pavel Begunkovf4db7182020-06-25 18:20:54 +03006015static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006016{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006017 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006018 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06006019 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006020
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006021 timeout = io_prep_linked_timeout(req);
6022 if (timeout)
6023 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006024
Jens Axboe0c9d5cc2019-12-11 19:29:43 -07006025 /* if NO_CANCEL is set, we must still run the work */
6026 if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
6027 IO_WQ_WORK_CANCEL) {
Jens Axboe561fb042019-10-24 07:25:42 -06006028 ret = -ECANCELED;
Jens Axboe0c9d5cc2019-12-11 19:29:43 -07006029 }
Jens Axboe31b51512019-01-18 22:56:34 -07006030
Jens Axboe561fb042019-10-24 07:25:42 -06006031 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06006032 do {
Pavel Begunkovc1379e22020-09-30 22:57:56 +03006033 ret = io_issue_sqe(req, false, NULL);
Jens Axboe561fb042019-10-24 07:25:42 -06006034 /*
6035 * We can get EAGAIN for polled IO even though we're
6036 * forcing a sync submission from here, since we can't
6037 * wait for request slots on the block side.
6038 */
6039 if (ret != -EAGAIN)
6040 break;
6041 cond_resched();
6042 } while (1);
6043 }
Jens Axboe31b51512019-01-18 22:56:34 -07006044
Jens Axboe561fb042019-10-24 07:25:42 -06006045 if (ret) {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006046 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06006047 io_req_complete(req, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07006048 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07006049
Pavel Begunkovf4db7182020-06-25 18:20:54 +03006050 return io_steal_work(req);
Jens Axboe31b51512019-01-18 22:56:34 -07006051}
Jens Axboe2b188cc2019-01-07 10:46:33 -07006052
Jens Axboe65e19f52019-10-26 07:20:21 -06006053static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6054 int index)
Jens Axboe09bb8392019-03-13 12:39:28 -06006055{
Jens Axboe65e19f52019-10-26 07:20:21 -06006056 struct fixed_file_table *table;
6057
Jens Axboe05f3fb32019-12-09 11:22:50 -07006058 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
Xiaoming Ni84695082020-05-11 19:25:43 +08006059 return table->files[index & IORING_FILE_TABLE_MASK];
Jens Axboe65e19f52019-10-26 07:20:21 -06006060}
6061
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006062static struct file *io_file_get(struct io_submit_state *state,
6063 struct io_kiocb *req, int fd, bool fixed)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006064{
6065 struct io_ring_ctx *ctx = req->ctx;
6066 struct file *file;
6067
6068 if (fixed) {
Pavel Begunkov479f5172020-10-10 18:34:07 +01006069 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006070 return NULL;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006071 fd = array_index_nospec(fd, ctx->nr_user_files);
6072 file = io_file_from_index(ctx, fd);
Jens Axboefd2206e2020-06-02 16:40:47 -06006073 if (file) {
Pavel Begunkovb2e96852020-10-10 18:34:16 +01006074 req->fixed_file_refs = &ctx->file_data->node->refs;
Jens Axboefd2206e2020-06-02 16:40:47 -06006075 percpu_ref_get(req->fixed_file_refs);
6076 }
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006077 } else {
6078 trace_io_uring_file_get(ctx, fd);
6079 file = __io_file_get(state, fd);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006080 }
6081
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006082 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006083}
6084
Jens Axboe3529d8c2019-12-19 18:24:38 -07006085static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
Jens Axboe63ff8222020-05-07 14:56:15 -06006086 int fd)
Jens Axboe09bb8392019-03-13 12:39:28 -06006087{
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006088 bool fixed;
Jens Axboe09bb8392019-03-13 12:39:28 -06006089
Jens Axboe63ff8222020-05-07 14:56:15 -06006090 fixed = (req->flags & REQ_F_FIXED_FILE) != 0;
Pavel Begunkov0cdaf762020-05-17 14:13:40 +03006091 if (unlikely(!fixed && io_async_submit(req->ctx)))
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006092 return -EBADF;
Jens Axboe09bb8392019-03-13 12:39:28 -06006093
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006094 req->file = io_file_get(state, req, fd, fixed);
6095 if (req->file || io_op_defs[req->opcode].needs_file_no_error)
Jens Axboef86cd202020-01-29 13:46:44 -07006096 return 0;
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006097 return -EBADF;
Pavel Begunkovf56040b2020-07-23 20:25:21 +03006098}
6099
Jens Axboe2665abf2019-11-05 12:40:47 -07006100static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6101{
Jens Axboead8a48a2019-11-15 08:49:11 -07006102 struct io_timeout_data *data = container_of(timer,
6103 struct io_timeout_data, timer);
6104 struct io_kiocb *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07006105 struct io_ring_ctx *ctx = req->ctx;
6106 struct io_kiocb *prev = NULL;
6107 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07006108
6109 spin_lock_irqsave(&ctx->completion_lock, flags);
6110
6111 /*
6112 * We don't expect the list to be empty, that will only happen if we
6113 * race with the completion of the linked work.
6114 */
Pavel Begunkov44932332019-12-05 16:16:35 +03006115 if (!list_empty(&req->link_list)) {
6116 prev = list_entry(req->link_list.prev, struct io_kiocb,
6117 link_list);
Jens Axboe5d960722019-11-19 15:31:28 -07006118 if (refcount_inc_not_zero(&prev->refs)) {
Pavel Begunkov44932332019-12-05 16:16:35 +03006119 list_del_init(&req->link_list);
Jens Axboe5d960722019-11-19 15:31:28 -07006120 prev->flags &= ~REQ_F_LINK_TIMEOUT;
6121 } else
Jens Axboe76a46e02019-11-10 23:34:16 -07006122 prev = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006123 }
6124
6125 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6126
6127 if (prev) {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006128 req_set_fail_links(prev);
Pavel Begunkov014db002020-03-03 21:33:12 +03006129 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
Jens Axboe76a46e02019-11-10 23:34:16 -07006130 io_put_req(prev);
Jens Axboe47f46762019-11-09 17:43:02 -07006131 } else {
Jens Axboee1e16092020-06-22 09:17:17 -06006132 io_req_complete(req, -ETIME);
Jens Axboe2665abf2019-11-05 12:40:47 -07006133 }
Jens Axboe2665abf2019-11-05 12:40:47 -07006134 return HRTIMER_NORESTART;
6135}
6136
Jens Axboe7271ef32020-08-10 09:55:22 -06006137static void __io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006138{
Jens Axboe76a46e02019-11-10 23:34:16 -07006139 /*
6140 * If the list is now empty, then our linked request finished before
6141 * we got a chance to setup the timer
6142 */
Pavel Begunkov44932332019-12-05 16:16:35 +03006143 if (!list_empty(&req->link_list)) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006144 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07006145
Jens Axboead8a48a2019-11-15 08:49:11 -07006146 data->timer.function = io_link_timeout_fn;
6147 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6148 data->mode);
Jens Axboe2665abf2019-11-05 12:40:47 -07006149 }
Jens Axboe7271ef32020-08-10 09:55:22 -06006150}
6151
6152static void io_queue_linked_timeout(struct io_kiocb *req)
6153{
6154 struct io_ring_ctx *ctx = req->ctx;
6155
6156 spin_lock_irq(&ctx->completion_lock);
6157 __io_queue_linked_timeout(req);
Jens Axboe76a46e02019-11-10 23:34:16 -07006158 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07006159
Jens Axboe2665abf2019-11-05 12:40:47 -07006160 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07006161 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006162}
6163
Jens Axboead8a48a2019-11-15 08:49:11 -07006164static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006165{
6166 struct io_kiocb *nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006167
Pavel Begunkovdea3b492020-04-12 02:05:04 +03006168 if (!(req->flags & REQ_F_LINK_HEAD))
Jens Axboe2665abf2019-11-05 12:40:47 -07006169 return NULL;
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006170 if (req->flags & REQ_F_LINK_TIMEOUT)
Jens Axboed7718a92020-02-14 22:23:12 -07006171 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006172
Pavel Begunkov44932332019-12-05 16:16:35 +03006173 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
6174 link_list);
Jens Axboed625c6e2019-12-17 19:53:05 -07006175 if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
Jens Axboe76a46e02019-11-10 23:34:16 -07006176 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006177
Jens Axboe76a46e02019-11-10 23:34:16 -07006178 req->flags |= REQ_F_LINK_TIMEOUT;
Jens Axboe76a46e02019-11-10 23:34:16 -07006179 return nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07006180}
6181
Pavel Begunkovc1379e22020-09-30 22:57:56 +03006182static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006183{
Jens Axboe4a0a7a12019-12-09 20:01:01 -07006184 struct io_kiocb *linked_timeout;
Pavel Begunkov4bc44942020-02-29 22:48:24 +03006185 struct io_kiocb *nxt;
Jens Axboe193155c2020-02-22 23:22:19 -07006186 const struct cred *old_creds = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006187 int ret;
6188
Jens Axboe4a0a7a12019-12-09 20:01:01 -07006189again:
6190 linked_timeout = io_prep_linked_timeout(req);
6191
Pavel Begunkov2e5aa6c2020-10-18 10:17:37 +01006192 if ((req->flags & REQ_F_WORK_INITIALIZED) &&
6193 (req->work.flags & IO_WQ_WORK_CREDS) &&
Jens Axboe98447d62020-10-14 10:48:51 -06006194 req->work.identity->creds != current_cred()) {
Jens Axboe193155c2020-02-22 23:22:19 -07006195 if (old_creds)
6196 revert_creds(old_creds);
Jens Axboe98447d62020-10-14 10:48:51 -06006197 if (old_creds == req->work.identity->creds)
Jens Axboe193155c2020-02-22 23:22:19 -07006198 old_creds = NULL; /* restored original creds */
6199 else
Jens Axboe98447d62020-10-14 10:48:51 -06006200 old_creds = override_creds(req->work.identity->creds);
Jens Axboe193155c2020-02-22 23:22:19 -07006201 }
6202
Pavel Begunkovc1379e22020-09-30 22:57:56 +03006203 ret = io_issue_sqe(req, true, cs);
Jens Axboe491381ce2019-10-17 09:20:46 -06006204
6205 /*
6206 * We async punt it if the file wasn't marked NOWAIT, or if the file
6207 * doesn't support non-blocking read/write attempts
6208 */
Pavel Begunkov24c74672020-06-21 13:09:51 +03006209 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Pavel Begunkovf063c542020-07-25 14:41:59 +03006210 if (!io_arm_poll_handler(req)) {
Pavel Begunkov86a761f2020-01-22 23:09:36 +03006211punt:
Pavel Begunkovf063c542020-07-25 14:41:59 +03006212 /*
6213 * Queued up for async execution, worker will release
6214 * submit reference when the iocb is actually submitted.
6215 */
6216 io_queue_async_work(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006217 }
Pavel Begunkovbbad27b2019-11-19 23:32:47 +03006218
Pavel Begunkovf063c542020-07-25 14:41:59 +03006219 if (linked_timeout)
6220 io_queue_linked_timeout(linked_timeout);
Pavel Begunkov4bc44942020-02-29 22:48:24 +03006221 goto exit;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006222 }
Jens Axboee65ef562019-03-12 10:16:44 -06006223
Pavel Begunkov652532a2020-07-03 22:15:07 +03006224 if (unlikely(ret)) {
Pavel Begunkov652532a2020-07-03 22:15:07 +03006225 /* un-prep timeout, so it'll be killed as any other linked */
6226 req->flags &= ~REQ_F_LINK_TIMEOUT;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006227 req_set_fail_links(req);
Jens Axboee65ef562019-03-12 10:16:44 -06006228 io_put_req(req);
Pavel Begunkov652532a2020-07-03 22:15:07 +03006229 io_req_complete(req, ret);
6230 goto exit;
Jens Axboe9e645e112019-05-10 16:07:28 -06006231 }
Pavel Begunkov652532a2020-07-03 22:15:07 +03006232
Jens Axboe6c271ce2019-01-10 11:22:30 -07006233 /* drop submission reference */
Pavel Begunkov9b5f7bd92020-06-29 13:13:00 +03006234 nxt = io_put_req_find_next(req);
Pavel Begunkov652532a2020-07-03 22:15:07 +03006235 if (linked_timeout)
6236 io_queue_linked_timeout(linked_timeout);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006237
Jens Axboe4a0a7a12019-12-09 20:01:01 -07006238 if (nxt) {
6239 req = nxt;
Pavel Begunkov86a761f2020-01-22 23:09:36 +03006240
6241 if (req->flags & REQ_F_FORCE_ASYNC)
6242 goto punt;
Jens Axboe4a0a7a12019-12-09 20:01:01 -07006243 goto again;
6244 }
Pavel Begunkov4bc44942020-02-29 22:48:24 +03006245exit:
Jens Axboe193155c2020-02-22 23:22:19 -07006246 if (old_creds)
6247 revert_creds(old_creds);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006248}
6249
Jens Axboef13fad72020-06-22 09:34:30 -06006250static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6251 struct io_comp_state *cs)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006252{
6253 int ret;
6254
Jens Axboe3529d8c2019-12-19 18:24:38 -07006255 ret = io_req_defer(req, sqe);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006256 if (ret) {
6257 if (ret != -EIOCBQUEUED) {
Pavel Begunkov11185912020-01-22 23:09:35 +03006258fail_req:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006259 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06006260 io_put_req(req);
6261 io_req_complete(req, ret);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006262 }
Pavel Begunkov25508782019-12-30 21:24:47 +03006263 } else if (req->flags & REQ_F_FORCE_ASYNC) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006264 if (!req->async_data) {
Pavel Begunkovbd2ab182020-05-17 14:02:12 +03006265 ret = io_req_defer_prep(req, sqe);
Pavel Begunkov327d6d92020-07-15 12:46:51 +03006266 if (unlikely(ret))
Pavel Begunkovbd2ab182020-05-17 14:02:12 +03006267 goto fail_req;
6268 }
6269
Jens Axboece35a472019-12-17 08:04:44 -07006270 /*
6271 * Never try inline submit of IOSQE_ASYNC is set, go straight
6272 * to async execution.
6273 */
Pavel Begunkov3e863ea2020-07-23 20:17:20 +03006274 io_req_init_async(req);
Jens Axboece35a472019-12-17 08:04:44 -07006275 req->work.flags |= IO_WQ_WORK_CONCURRENT;
6276 io_queue_async_work(req);
6277 } else {
Pavel Begunkovc1379e22020-09-30 22:57:56 +03006278 if (sqe) {
6279 ret = io_req_prep(req, sqe);
6280 if (unlikely(ret))
6281 goto fail_req;
6282 }
6283 __io_queue_sqe(req, cs);
Jens Axboece35a472019-12-17 08:04:44 -07006284 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006285}
6286
Jens Axboef13fad72020-06-22 09:34:30 -06006287static inline void io_queue_link_head(struct io_kiocb *req,
6288 struct io_comp_state *cs)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006289{
Jens Axboe94ae5e72019-11-14 19:39:52 -07006290 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
Jens Axboee1e16092020-06-22 09:17:17 -06006291 io_put_req(req);
6292 io_req_complete(req, -ECANCELED);
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03006293 } else
Jens Axboef13fad72020-06-22 09:34:30 -06006294 io_queue_sqe(req, NULL, cs);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006295}
6296
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03006297static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboef13fad72020-06-22 09:34:30 -06006298 struct io_kiocb **link, struct io_comp_state *cs)
Jens Axboe9e645e112019-05-10 16:07:28 -06006299{
Jackie Liua197f662019-11-08 08:09:12 -07006300 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006301 int ret;
Jens Axboe9e645e112019-05-10 16:07:28 -06006302
Jens Axboe9e645e112019-05-10 16:07:28 -06006303 /*
6304 * If we already have a head request, queue this one for async
6305 * submittal once the head completes. If we don't have a head but
6306 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6307 * submitted sync once the chain is complete. If none of those
6308 * conditions are true (normal request), then just queue it.
6309 */
6310 if (*link) {
Pavel Begunkov9d763772019-12-17 02:22:07 +03006311 struct io_kiocb *head = *link;
Jens Axboe9e645e112019-05-10 16:07:28 -06006312
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006313 /*
6314 * Taking sequential execution of a link, draining both sides
6315 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6316 * requests in the link. So, it drains the head and the
6317 * next after the link request. The last one is done via
6318 * drain_next flag to persist the effect across calls.
6319 */
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006320 if (req->flags & REQ_F_IO_DRAIN) {
Pavel Begunkov711be032020-01-17 03:57:59 +03006321 head->flags |= REQ_F_IO_DRAIN;
6322 ctx->drain_next = 1;
6323 }
Jens Axboe3529d8c2019-12-19 18:24:38 -07006324 ret = io_req_defer_prep(req, sqe);
Pavel Begunkov327d6d92020-07-15 12:46:51 +03006325 if (unlikely(ret)) {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006326 /* fail even hard links since we don't submit */
Pavel Begunkov9d763772019-12-17 02:22:07 +03006327 head->flags |= REQ_F_FAIL_LINK;
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03006328 return ret;
Jens Axboe2d283902019-12-04 11:08:05 -07006329 }
Pavel Begunkov9d763772019-12-17 02:22:07 +03006330 trace_io_uring_link(ctx, req, head);
6331 list_add_tail(&req->link_list, &head->link_list);
Jens Axboe9e645e112019-05-10 16:07:28 -06006332
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006333 /* last request of a link, enqueue the link */
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006334 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
Jens Axboef13fad72020-06-22 09:34:30 -06006335 io_queue_link_head(head, cs);
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006336 *link = NULL;
6337 }
Jens Axboe9e645e112019-05-10 16:07:28 -06006338 } else {
Pavel Begunkov711be032020-01-17 03:57:59 +03006339 if (unlikely(ctx->drain_next)) {
6340 req->flags |= REQ_F_IO_DRAIN;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006341 ctx->drain_next = 0;
Pavel Begunkov711be032020-01-17 03:57:59 +03006342 }
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006343 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Pavel Begunkovdea3b492020-04-12 02:05:04 +03006344 req->flags |= REQ_F_LINK_HEAD;
Pavel Begunkov711be032020-01-17 03:57:59 +03006345 INIT_LIST_HEAD(&req->link_list);
Pavel Begunkovf1d96a82020-03-13 22:29:14 +03006346
Pavel Begunkov711be032020-01-17 03:57:59 +03006347 ret = io_req_defer_prep(req, sqe);
Pavel Begunkov327d6d92020-07-15 12:46:51 +03006348 if (unlikely(ret))
Pavel Begunkov711be032020-01-17 03:57:59 +03006349 req->flags |= REQ_F_FAIL_LINK;
6350 *link = req;
6351 } else {
Jens Axboef13fad72020-06-22 09:34:30 -06006352 io_queue_sqe(req, sqe, cs);
Pavel Begunkov711be032020-01-17 03:57:59 +03006353 }
Jens Axboe9e645e112019-05-10 16:07:28 -06006354 }
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03006355
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03006356 return 0;
Jens Axboe9e645e112019-05-10 16:07:28 -06006357}
6358
Jens Axboe9a56a232019-01-09 09:06:50 -07006359/*
6360 * Batched submission is done, ensure local IO is flushed out.
6361 */
6362static void io_submit_state_end(struct io_submit_state *state)
6363{
Jens Axboef13fad72020-06-22 09:34:30 -06006364 if (!list_empty(&state->comp.list))
6365 io_submit_flush_completions(&state->comp);
Jens Axboe9a56a232019-01-09 09:06:50 -07006366 blk_finish_plug(&state->plug);
Pavel Begunkov9f13c352020-05-17 14:13:41 +03006367 io_state_file_put(state);
Jens Axboe2579f912019-01-09 09:10:43 -07006368 if (state->free_reqs)
Pavel Begunkov6c8a3132020-02-01 03:58:00 +03006369 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
Jens Axboe9a56a232019-01-09 09:06:50 -07006370}
6371
6372/*
6373 * Start submission side cache.
6374 */
6375static void io_submit_state_start(struct io_submit_state *state,
Jens Axboe013538b2020-06-22 09:29:15 -06006376 struct io_ring_ctx *ctx, unsigned int max_ios)
Jens Axboe9a56a232019-01-09 09:06:50 -07006377{
6378 blk_start_plug(&state->plug);
Jens Axboe013538b2020-06-22 09:29:15 -06006379 state->comp.nr = 0;
6380 INIT_LIST_HEAD(&state->comp.list);
6381 state->comp.ctx = ctx;
Jens Axboe2579f912019-01-09 09:10:43 -07006382 state->free_reqs = 0;
Jens Axboe9a56a232019-01-09 09:06:50 -07006383 state->file = NULL;
6384 state->ios_left = max_ios;
6385}
6386
Jens Axboe2b188cc2019-01-07 10:46:33 -07006387static void io_commit_sqring(struct io_ring_ctx *ctx)
6388{
Hristo Venev75b28af2019-08-26 17:23:46 +00006389 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006390
Pavel Begunkovcaf582c2019-12-30 21:24:46 +03006391 /*
6392 * Ensure any loads from the SQEs are done at this point,
6393 * since once we write the new head, the application could
6394 * write new data to them.
6395 */
6396 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006397}
6398
6399/*
Jens Axboe3529d8c2019-12-19 18:24:38 -07006400 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
Jens Axboe2b188cc2019-01-07 10:46:33 -07006401 * that is mapped by userspace. This means that care needs to be taken to
6402 * ensure that reads are stable, as we cannot rely on userspace always
6403 * being a good citizen. If members of the sqe are validated and then later
6404 * used, it's important that those reads are done through READ_ONCE() to
6405 * prevent a re-load down the line.
6406 */
Pavel Begunkov709b3022020-04-08 08:58:43 +03006407static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006408{
Hristo Venev75b28af2019-08-26 17:23:46 +00006409 u32 *sq_array = ctx->sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006410 unsigned head;
6411
6412 /*
6413 * The cached sq head (or cq tail) serves two purposes:
6414 *
6415 * 1) allows us to batch the cost of updating the user visible
6416 * head updates.
6417 * 2) allows the kernel side to track the head on its own, even
6418 * though the application is the one updating it.
6419 */
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006420 head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
Pavel Begunkov709b3022020-04-08 08:58:43 +03006421 if (likely(head < ctx->sq_entries))
6422 return &ctx->sq_sqes[head];
Jens Axboe2b188cc2019-01-07 10:46:33 -07006423
6424 /* drop invalid entries */
Jens Axboe498ccd92019-10-25 10:04:25 -06006425 ctx->cached_sq_dropped++;
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006426 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
Pavel Begunkov709b3022020-04-08 08:58:43 +03006427 return NULL;
6428}
6429
6430static inline void io_consume_sqe(struct io_ring_ctx *ctx)
6431{
6432 ctx->cached_sq_head++;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006433}
6434
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006435/*
6436 * Check SQE restrictions (opcode and flags).
6437 *
6438 * Returns 'true' if SQE is allowed, 'false' otherwise.
6439 */
6440static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6441 struct io_kiocb *req,
6442 unsigned int sqe_flags)
6443{
6444 if (!ctx->restricted)
6445 return true;
6446
6447 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6448 return false;
6449
6450 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6451 ctx->restrictions.sqe_flags_required)
6452 return false;
6453
6454 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6455 ctx->restrictions.sqe_flags_required))
6456 return false;
6457
6458 return true;
6459}
6460
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006461#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
6462 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
6463 IOSQE_BUFFER_SELECT)
6464
6465static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
6466 const struct io_uring_sqe *sqe,
Pavel Begunkov0cdaf762020-05-17 14:13:40 +03006467 struct io_submit_state *state)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006468{
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006469 unsigned int sqe_flags;
Pavel Begunkov71b547c2020-10-10 18:34:09 +01006470 int id, ret;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006471
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006472 req->opcode = READ_ONCE(sqe->opcode);
6473 req->user_data = READ_ONCE(sqe->user_data);
Jens Axboee8c2bc12020-08-15 18:44:09 -07006474 req->async_data = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006475 req->file = NULL;
6476 req->ctx = ctx;
6477 req->flags = 0;
6478 /* one is dropped after submission, the other at completion */
6479 refcount_set(&req->refs, 2);
Pavel Begunkov4dd28242020-06-15 10:33:13 +03006480 req->task = current;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006481 req->result = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006482
6483 if (unlikely(req->opcode >= IORING_OP_LAST))
6484 return -EINVAL;
6485
Jens Axboe9d8426a2020-06-16 18:42:49 -06006486 if (unlikely(io_sq_thread_acquire_mm(ctx, req)))
6487 return -EFAULT;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006488
6489 sqe_flags = READ_ONCE(sqe->flags);
6490 /* enforce forwards compatibility on users */
6491 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
6492 return -EINVAL;
6493
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006494 if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
6495 return -EACCES;
6496
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006497 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6498 !io_op_defs[req->opcode].buffer_select)
6499 return -EOPNOTSUPP;
6500
6501 id = READ_ONCE(sqe->personality);
6502 if (id) {
Jens Axboe1e6fa522020-10-15 08:46:24 -06006503 struct io_identity *iod;
6504
Jens Axboe1e6fa522020-10-15 08:46:24 -06006505 iod = idr_find(&ctx->personality_idr, id);
6506 if (unlikely(!iod))
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006507 return -EINVAL;
Jens Axboe1e6fa522020-10-15 08:46:24 -06006508 refcount_inc(&iod->count);
Pavel Begunkovec99ca62020-10-18 10:17:38 +01006509
6510 __io_req_init_async(req);
Jens Axboe1e6fa522020-10-15 08:46:24 -06006511 get_cred(iod->creds);
6512 req->work.identity = iod;
Jens Axboedfead8a2020-10-14 10:12:37 -06006513 req->work.flags |= IO_WQ_WORK_CREDS;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006514 }
6515
6516 /* same numerical values with corresponding REQ_F_*, safe to copy */
Pavel Begunkovc11368a52020-05-17 14:13:42 +03006517 req->flags |= sqe_flags;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006518
Jens Axboe63ff8222020-05-07 14:56:15 -06006519 if (!io_op_defs[req->opcode].needs_file)
6520 return 0;
6521
Pavel Begunkov71b547c2020-10-10 18:34:09 +01006522 ret = io_req_set_file(state, req, READ_ONCE(sqe->fd));
6523 state->ios_left--;
6524 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006525}
6526
Jens Axboe0f212202020-09-13 13:09:39 -06006527static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006528{
Jens Axboeac8691c2020-06-01 08:30:41 -06006529 struct io_submit_state state;
Jens Axboe9e645e112019-05-10 16:07:28 -06006530 struct io_kiocb *link = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06006531 int i, submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006532
Jens Axboec4a2ed72019-11-21 21:01:26 -07006533 /* if we have a backlog and couldn't flush it all, return BUSY */
Jens Axboead3eb2c2019-12-18 17:12:20 -07006534 if (test_bit(0, &ctx->sq_check_overflow)) {
6535 if (!list_empty(&ctx->cq_overflow_list) &&
Jens Axboee6c8aa92020-09-28 13:10:13 -06006536 !io_cqring_overflow_flush(ctx, false, NULL, NULL))
Jens Axboead3eb2c2019-12-18 17:12:20 -07006537 return -EBUSY;
6538 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006539
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006540 /* make sure SQ entry isn't read before tail */
6541 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov9ef4f122019-12-30 21:24:44 +03006542
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03006543 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6544 return -EAGAIN;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006545
Jens Axboed8a6df12020-10-15 16:24:45 -06006546 percpu_counter_add(&current->io_uring->inflight, nr);
Jens Axboefaf7b512020-10-07 12:48:53 -06006547 refcount_add(nr, &current->usage);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006548
Jens Axboe6c271ce2019-01-10 11:22:30 -07006549 io_submit_state_start(&state, ctx, nr);
Pavel Begunkovb14cca02020-01-17 04:45:59 +03006550
Jens Axboe6c271ce2019-01-10 11:22:30 -07006551 for (i = 0; i < nr; i++) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07006552 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03006553 struct io_kiocb *req;
Pavel Begunkov1cb1edb2020-02-06 21:16:09 +03006554 int err;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006555
Pavel Begunkovb1e50e52020-04-08 08:58:44 +03006556 sqe = io_get_sqe(ctx);
6557 if (unlikely(!sqe)) {
6558 io_consume_sqe(ctx);
6559 break;
6560 }
Jens Axboeac8691c2020-06-01 08:30:41 -06006561 req = io_alloc_req(ctx, &state);
Pavel Begunkov196be952019-11-07 01:41:06 +03006562 if (unlikely(!req)) {
6563 if (!submitted)
6564 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006565 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06006566 }
Pavel Begunkov709b3022020-04-08 08:58:43 +03006567 io_consume_sqe(ctx);
Jens Axboed3656342019-12-18 09:50:26 -07006568 /* will complete beyond this point, count as submitted */
6569 submitted++;
6570
Pavel Begunkov692d8362020-10-10 18:34:13 +01006571 err = io_init_req(ctx, req, sqe, &state);
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006572 if (unlikely(err)) {
Pavel Begunkov1cb1edb2020-02-06 21:16:09 +03006573fail_req:
Jens Axboee1e16092020-06-22 09:17:17 -06006574 io_put_req(req);
6575 io_req_complete(req, err);
Jens Axboed3656342019-12-18 09:50:26 -07006576 break;
6577 }
6578
Jens Axboe354420f2020-01-08 18:55:15 -07006579 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
Pavel Begunkov0cdaf762020-05-17 14:13:40 +03006580 true, io_async_submit(ctx));
Jens Axboef13fad72020-06-22 09:34:30 -06006581 err = io_submit_sqe(req, sqe, &link, &state.comp);
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03006582 if (err)
6583 goto fail_req;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006584 }
6585
Pavel Begunkov9466f432020-01-25 22:34:01 +03006586 if (unlikely(submitted != nr)) {
6587 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06006588 struct io_uring_task *tctx = current->io_uring;
6589 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03006590
Jens Axboed8a6df12020-10-15 16:24:45 -06006591 percpu_ref_put_many(&ctx->refs, unused);
6592 percpu_counter_sub(&tctx->inflight, unused);
6593 put_task_struct_many(current, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03006594 }
Jens Axboe9e645e112019-05-10 16:07:28 -06006595 if (link)
Jens Axboef13fad72020-06-22 09:34:30 -06006596 io_queue_link_head(link, &state.comp);
Jens Axboeac8691c2020-06-01 08:30:41 -06006597 io_submit_state_end(&state);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006598
Pavel Begunkovae9428c2019-11-06 00:22:14 +03006599 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6600 io_commit_sqring(ctx);
6601
Jens Axboe6c271ce2019-01-10 11:22:30 -07006602 return submitted;
6603}
6604
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006605static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6606{
6607 /* Tell userspace we may need a wakeup call */
6608 spin_lock_irq(&ctx->completion_lock);
6609 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6610 spin_unlock_irq(&ctx->completion_lock);
6611}
6612
6613static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6614{
6615 spin_lock_irq(&ctx->completion_lock);
6616 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6617 spin_unlock_irq(&ctx->completion_lock);
6618}
6619
Jens Axboe3f0e64d2020-09-02 12:42:47 -06006620static int io_sq_wake_function(struct wait_queue_entry *wqe, unsigned mode,
6621 int sync, void *key)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006622{
Jens Axboe3f0e64d2020-09-02 12:42:47 -06006623 struct io_ring_ctx *ctx = container_of(wqe, struct io_ring_ctx, sqo_wait_entry);
6624 int ret;
6625
6626 ret = autoremove_wake_function(wqe, mode, sync, key);
6627 if (ret) {
6628 unsigned long flags;
6629
6630 spin_lock_irqsave(&ctx->completion_lock, flags);
6631 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6632 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6633 }
6634 return ret;
6635}
6636
Jens Axboec8d1ba52020-09-14 11:07:26 -06006637enum sq_ret {
6638 SQT_IDLE = 1,
6639 SQT_SPIN = 2,
6640 SQT_DID_WORK = 4,
6641};
6642
6643static enum sq_ret __io_sq_thread(struct io_ring_ctx *ctx,
Jens Axboee95eee22020-09-08 09:11:32 -06006644 unsigned long start_jiffies, bool cap_entries)
Jens Axboec8d1ba52020-09-14 11:07:26 -06006645{
6646 unsigned long timeout = start_jiffies + ctx->sq_thread_idle;
Jens Axboe534ca6d2020-09-02 13:52:19 -06006647 struct io_sq_data *sqd = ctx->sq_data;
Jens Axboec8d1ba52020-09-14 11:07:26 -06006648 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08006649 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006650
Jens Axboec8d1ba52020-09-14 11:07:26 -06006651again:
6652 if (!list_empty(&ctx->iopoll_list)) {
6653 unsigned nr_events = 0;
Jackie Liua4c0b3d2019-07-08 13:41:12 +08006654
Jens Axboec8d1ba52020-09-14 11:07:26 -06006655 mutex_lock(&ctx->uring_lock);
6656 if (!list_empty(&ctx->iopoll_list) && !need_resched())
6657 io_do_iopoll(ctx, &nr_events, 0);
6658 mutex_unlock(&ctx->uring_lock);
6659 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006660
Jens Axboec8d1ba52020-09-14 11:07:26 -06006661 to_submit = io_sqring_entries(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006662
Jens Axboec8d1ba52020-09-14 11:07:26 -06006663 /*
6664 * If submit got -EBUSY, flag us as needing the application
6665 * to enter the kernel to reap and flush events.
6666 */
6667 if (!to_submit || ret == -EBUSY || need_resched()) {
6668 /*
6669 * Drop cur_mm before scheduling, we can't hold it for
6670 * long periods (or over schedule()). Do this before
6671 * adding ourselves to the waitqueue, as the unuse/drop
6672 * may sleep.
6673 */
6674 io_sq_thread_drop_mm();
Jens Axboe6c271ce2019-01-10 11:22:30 -07006675
Jens Axboec8d1ba52020-09-14 11:07:26 -06006676 /*
6677 * We're polling. If we're within the defined idle
6678 * period, then let us spin without work before going
6679 * to sleep. The exception is if we got EBUSY doing
6680 * more IO, we should wait for the application to
6681 * reap events and wake us up.
6682 */
6683 if (!list_empty(&ctx->iopoll_list) || need_resched() ||
6684 (!time_after(jiffies, timeout) && ret != -EBUSY &&
6685 !percpu_ref_is_dying(&ctx->refs)))
6686 return SQT_SPIN;
6687
Jens Axboe534ca6d2020-09-02 13:52:19 -06006688 prepare_to_wait(&sqd->wait, &ctx->sqo_wait_entry,
Jens Axboec8d1ba52020-09-14 11:07:26 -06006689 TASK_INTERRUPTIBLE);
6690
6691 /*
6692 * While doing polled IO, before going to sleep, we need
6693 * to check if there are new reqs added to iopoll_list,
6694 * it is because reqs may have been punted to io worker
6695 * and will be added to iopoll_list later, hence check
6696 * the iopoll_list again.
6697 */
6698 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6699 !list_empty_careful(&ctx->iopoll_list)) {
Jens Axboe534ca6d2020-09-02 13:52:19 -06006700 finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
Jens Axboec8d1ba52020-09-14 11:07:26 -06006701 goto again;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006702 }
6703
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006704 to_submit = io_sqring_entries(ctx);
Jens Axboec8d1ba52020-09-14 11:07:26 -06006705 if (!to_submit || ret == -EBUSY)
6706 return SQT_IDLE;
6707 }
6708
Jens Axboe534ca6d2020-09-02 13:52:19 -06006709 finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
Jens Axboec8d1ba52020-09-14 11:07:26 -06006710 io_ring_clear_wakeup_flag(ctx);
6711
Jens Axboee95eee22020-09-08 09:11:32 -06006712 /* if we're handling multiple rings, cap submit size for fairness */
6713 if (cap_entries && to_submit > 8)
6714 to_submit = 8;
6715
Jens Axboec8d1ba52020-09-14 11:07:26 -06006716 mutex_lock(&ctx->uring_lock);
6717 if (likely(!percpu_ref_is_dying(&ctx->refs)))
6718 ret = io_submit_sqes(ctx, to_submit);
6719 mutex_unlock(&ctx->uring_lock);
Jens Axboe90554202020-09-03 12:12:41 -06006720
6721 if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
6722 wake_up(&ctx->sqo_sq_wait);
6723
Jens Axboec8d1ba52020-09-14 11:07:26 -06006724 return SQT_DID_WORK;
6725}
6726
Jens Axboe69fb2132020-09-14 11:16:23 -06006727static void io_sqd_init_new(struct io_sq_data *sqd)
6728{
6729 struct io_ring_ctx *ctx;
6730
6731 while (!list_empty(&sqd->ctx_new_list)) {
6732 ctx = list_first_entry(&sqd->ctx_new_list, struct io_ring_ctx, sqd_list);
6733 init_wait(&ctx->sqo_wait_entry);
6734 ctx->sqo_wait_entry.func = io_sq_wake_function;
6735 list_move_tail(&ctx->sqd_list, &sqd->ctx_list);
6736 complete(&ctx->sq_thread_comp);
6737 }
6738}
6739
Jens Axboe6c271ce2019-01-10 11:22:30 -07006740static int io_sq_thread(void *data)
6741{
Dennis Zhou91d8f512020-09-16 13:41:05 -07006742 struct cgroup_subsys_state *cur_css = NULL;
Jens Axboe69fb2132020-09-14 11:16:23 -06006743 const struct cred *old_cred = NULL;
6744 struct io_sq_data *sqd = data;
6745 struct io_ring_ctx *ctx;
Jens Axboec8d1ba52020-09-14 11:07:26 -06006746 unsigned long start_jiffies;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006747
Jens Axboec8d1ba52020-09-14 11:07:26 -06006748 start_jiffies = jiffies;
Jens Axboe69fb2132020-09-14 11:16:23 -06006749 while (!kthread_should_stop()) {
6750 enum sq_ret ret = 0;
Jens Axboee95eee22020-09-08 09:11:32 -06006751 bool cap_entries;
Jens Axboec1edbf52019-11-10 16:56:04 -07006752
6753 /*
Jens Axboe69fb2132020-09-14 11:16:23 -06006754 * Any changes to the sqd lists are synchronized through the
6755 * kthread parking. This synchronizes the thread vs users,
6756 * the users are synchronized on the sqd->ctx_lock.
Jens Axboec1edbf52019-11-10 16:56:04 -07006757 */
Jens Axboe69fb2132020-09-14 11:16:23 -06006758 if (kthread_should_park())
6759 kthread_parkme();
6760
6761 if (unlikely(!list_empty(&sqd->ctx_new_list)))
6762 io_sqd_init_new(sqd);
6763
Jens Axboee95eee22020-09-08 09:11:32 -06006764 cap_entries = !list_is_singular(&sqd->ctx_list);
6765
Jens Axboe69fb2132020-09-14 11:16:23 -06006766 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6767 if (current->cred != ctx->creds) {
6768 if (old_cred)
6769 revert_creds(old_cred);
6770 old_cred = override_creds(ctx->creds);
6771 }
Dennis Zhou91d8f512020-09-16 13:41:05 -07006772 io_sq_thread_associate_blkcg(ctx, &cur_css);
Jens Axboe4ea33a92020-10-15 13:46:44 -06006773#ifdef CONFIG_AUDIT
6774 current->loginuid = ctx->loginuid;
6775 current->sessionid = ctx->sessionid;
6776#endif
Jens Axboe69fb2132020-09-14 11:16:23 -06006777
Jens Axboee95eee22020-09-08 09:11:32 -06006778 ret |= __io_sq_thread(ctx, start_jiffies, cap_entries);
Jens Axboe69fb2132020-09-14 11:16:23 -06006779
Jens Axboe4349f302020-07-09 15:07:01 -06006780 io_sq_thread_drop_mm();
Jens Axboe6c271ce2019-01-10 11:22:30 -07006781 }
6782
Jens Axboe69fb2132020-09-14 11:16:23 -06006783 if (ret & SQT_SPIN) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06006784 io_run_task_work();
6785 cond_resched();
Jens Axboe69fb2132020-09-14 11:16:23 -06006786 } else if (ret == SQT_IDLE) {
6787 if (kthread_should_park())
6788 continue;
6789 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6790 io_ring_set_wakeup_flag(ctx);
6791 schedule();
6792 start_jiffies = jiffies;
6793 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6794 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006795 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006796 }
6797
Jens Axboe4c6e2772020-07-01 11:29:10 -06006798 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07006799
Dennis Zhou91d8f512020-09-16 13:41:05 -07006800 if (cur_css)
6801 io_sq_thread_unassociate_blkcg();
Jens Axboe69fb2132020-09-14 11:16:23 -06006802 if (old_cred)
6803 revert_creds(old_cred);
Jens Axboe06058632019-04-13 09:26:03 -06006804
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02006805 kthread_parkme();
Jens Axboe06058632019-04-13 09:26:03 -06006806
Jens Axboe6c271ce2019-01-10 11:22:30 -07006807 return 0;
6808}
6809
Jens Axboebda52162019-09-24 13:47:15 -06006810struct io_wait_queue {
6811 struct wait_queue_entry wq;
6812 struct io_ring_ctx *ctx;
6813 unsigned to_wait;
6814 unsigned nr_timeouts;
6815};
6816
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07006817static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
Jens Axboebda52162019-09-24 13:47:15 -06006818{
6819 struct io_ring_ctx *ctx = iowq->ctx;
6820
6821 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08006822 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06006823 * started waiting. For timeouts, we always want to return to userspace,
6824 * regardless of event count.
6825 */
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07006826 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
Jens Axboebda52162019-09-24 13:47:15 -06006827 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6828}
6829
6830static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6831 int wake_flags, void *key)
6832{
6833 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6834 wq);
6835
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07006836 /* use noflush == true, as we can't safely rely on locking context */
6837 if (!io_should_wake(iowq, true))
Jens Axboebda52162019-09-24 13:47:15 -06006838 return -1;
6839
6840 return autoremove_wake_function(curr, mode, wake_flags, key);
6841}
6842
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006843static int io_run_task_work_sig(void)
6844{
6845 if (io_run_task_work())
6846 return 1;
6847 if (!signal_pending(current))
6848 return 0;
6849 if (current->jobctl & JOBCTL_TASK_WORK) {
6850 spin_lock_irq(&current->sighand->siglock);
6851 current->jobctl &= ~JOBCTL_TASK_WORK;
6852 recalc_sigpending();
6853 spin_unlock_irq(&current->sighand->siglock);
6854 return 1;
6855 }
6856 return -EINTR;
6857}
6858
Jens Axboe2b188cc2019-01-07 10:46:33 -07006859/*
6860 * Wait until events become available, if we don't already have some. The
6861 * application must reap them itself, as they reside on the shared cq ring.
6862 */
6863static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
6864 const sigset_t __user *sig, size_t sigsz)
6865{
Jens Axboebda52162019-09-24 13:47:15 -06006866 struct io_wait_queue iowq = {
6867 .wq = {
6868 .private = current,
6869 .func = io_wake_function,
6870 .entry = LIST_HEAD_INIT(iowq.wq.entry),
6871 },
6872 .ctx = ctx,
6873 .to_wait = min_events,
6874 };
Hristo Venev75b28af2019-08-26 17:23:46 +00006875 struct io_rings *rings = ctx->rings;
Jackie Liue9ffa5c2019-10-29 11:16:42 +08006876 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006877
Jens Axboeb41e9852020-02-17 09:52:41 -07006878 do {
6879 if (io_cqring_events(ctx, false) >= min_events)
6880 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06006881 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07006882 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07006883 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006884
6885 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006886#ifdef CONFIG_COMPAT
6887 if (in_compat_syscall())
6888 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07006889 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006890 else
6891#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07006892 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006893
Jens Axboe2b188cc2019-01-07 10:46:33 -07006894 if (ret)
6895 return ret;
6896 }
6897
Jens Axboebda52162019-09-24 13:47:15 -06006898 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02006899 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06006900 do {
6901 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
6902 TASK_INTERRUPTIBLE);
Jens Axboece593a62020-06-30 12:39:05 -06006903 /* make sure we run task_work before checking for signals */
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006904 ret = io_run_task_work_sig();
6905 if (ret > 0)
Jens Axboe4c6e2772020-07-01 11:29:10 -06006906 continue;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006907 else if (ret < 0)
Jens Axboece593a62020-06-30 12:39:05 -06006908 break;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07006909 if (io_should_wake(&iowq, false))
Jens Axboebda52162019-09-24 13:47:15 -06006910 break;
6911 schedule();
Jens Axboebda52162019-09-24 13:47:15 -06006912 } while (1);
6913 finish_wait(&ctx->wait, &iowq.wq);
6914
Jens Axboeb7db41c2020-07-04 08:55:50 -06006915 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006916
Hristo Venev75b28af2019-08-26 17:23:46 +00006917 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006918}
6919
Jens Axboe6b063142019-01-10 22:13:58 -07006920static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
6921{
6922#if defined(CONFIG_UNIX)
6923 if (ctx->ring_sock) {
6924 struct sock *sock = ctx->ring_sock->sk;
6925 struct sk_buff *skb;
6926
6927 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
6928 kfree_skb(skb);
6929 }
6930#else
6931 int i;
6932
Jens Axboe65e19f52019-10-26 07:20:21 -06006933 for (i = 0; i < ctx->nr_user_files; i++) {
6934 struct file *file;
6935
6936 file = io_file_from_index(ctx, i);
6937 if (file)
6938 fput(file);
6939 }
Jens Axboe6b063142019-01-10 22:13:58 -07006940#endif
6941}
6942
Jens Axboe05f3fb32019-12-09 11:22:50 -07006943static void io_file_ref_kill(struct percpu_ref *ref)
6944{
6945 struct fixed_file_data *data;
6946
6947 data = container_of(ref, struct fixed_file_data, refs);
6948 complete(&data->done);
6949}
6950
Jens Axboe6b063142019-01-10 22:13:58 -07006951static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
6952{
Jens Axboe05f3fb32019-12-09 11:22:50 -07006953 struct fixed_file_data *data = ctx->file_data;
Xiaoguang Wang05589552020-03-31 14:05:18 +08006954 struct fixed_file_ref_node *ref_node = NULL;
Jens Axboe65e19f52019-10-26 07:20:21 -06006955 unsigned nr_tables, i;
6956
Jens Axboe05f3fb32019-12-09 11:22:50 -07006957 if (!data)
Jens Axboe6b063142019-01-10 22:13:58 -07006958 return -ENXIO;
6959
Jens Axboe6a4d07c2020-05-15 14:30:38 -06006960 spin_lock(&data->lock);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006961 if (!list_empty(&data->ref_list))
6962 ref_node = list_first_entry(&data->ref_list,
6963 struct fixed_file_ref_node, node);
Jens Axboe6a4d07c2020-05-15 14:30:38 -06006964 spin_unlock(&data->lock);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006965 if (ref_node)
6966 percpu_ref_kill(&ref_node->refs);
6967
6968 percpu_ref_kill(&data->refs);
6969
6970 /* wait for all refs nodes to complete */
Jens Axboe4a38aed22020-05-14 17:21:15 -06006971 flush_delayed_work(&ctx->file_put_work);
Jens Axboe2faf8522020-02-04 19:54:55 -07006972 wait_for_completion(&data->done);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006973
Jens Axboe6b063142019-01-10 22:13:58 -07006974 __io_sqe_files_unregister(ctx);
Jens Axboe65e19f52019-10-26 07:20:21 -06006975 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
6976 for (i = 0; i < nr_tables; i++)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006977 kfree(data->table[i].files);
6978 kfree(data->table);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006979 percpu_ref_exit(&data->refs);
6980 kfree(data);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006981 ctx->file_data = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07006982 ctx->nr_user_files = 0;
6983 return 0;
6984}
6985
Jens Axboe534ca6d2020-09-02 13:52:19 -06006986static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006987{
Jens Axboe534ca6d2020-09-02 13:52:19 -06006988 if (refcount_dec_and_test(&sqd->refs)) {
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02006989 /*
6990 * The park is a bit of a work-around, without it we get
6991 * warning spews on shutdown with SQPOLL set and affinity
6992 * set to a single CPU.
6993 */
Jens Axboe534ca6d2020-09-02 13:52:19 -06006994 if (sqd->thread) {
6995 kthread_park(sqd->thread);
6996 kthread_stop(sqd->thread);
6997 }
6998
6999 kfree(sqd);
7000 }
7001}
7002
Jens Axboeaa061652020-09-02 14:50:27 -06007003static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7004{
7005 struct io_ring_ctx *ctx_attach;
7006 struct io_sq_data *sqd;
7007 struct fd f;
7008
7009 f = fdget(p->wq_fd);
7010 if (!f.file)
7011 return ERR_PTR(-ENXIO);
7012 if (f.file->f_op != &io_uring_fops) {
7013 fdput(f);
7014 return ERR_PTR(-EINVAL);
7015 }
7016
7017 ctx_attach = f.file->private_data;
7018 sqd = ctx_attach->sq_data;
7019 if (!sqd) {
7020 fdput(f);
7021 return ERR_PTR(-EINVAL);
7022 }
7023
7024 refcount_inc(&sqd->refs);
7025 fdput(f);
7026 return sqd;
7027}
7028
Jens Axboe534ca6d2020-09-02 13:52:19 -06007029static struct io_sq_data *io_get_sq_data(struct io_uring_params *p)
7030{
7031 struct io_sq_data *sqd;
7032
Jens Axboeaa061652020-09-02 14:50:27 -06007033 if (p->flags & IORING_SETUP_ATTACH_WQ)
7034 return io_attach_sq_data(p);
7035
Jens Axboe534ca6d2020-09-02 13:52:19 -06007036 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7037 if (!sqd)
7038 return ERR_PTR(-ENOMEM);
7039
7040 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06007041 INIT_LIST_HEAD(&sqd->ctx_list);
7042 INIT_LIST_HEAD(&sqd->ctx_new_list);
7043 mutex_init(&sqd->ctx_lock);
7044 mutex_init(&sqd->lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007045 init_waitqueue_head(&sqd->wait);
7046 return sqd;
7047}
7048
Jens Axboe69fb2132020-09-14 11:16:23 -06007049static void io_sq_thread_unpark(struct io_sq_data *sqd)
7050 __releases(&sqd->lock)
7051{
7052 if (!sqd->thread)
7053 return;
7054 kthread_unpark(sqd->thread);
7055 mutex_unlock(&sqd->lock);
7056}
7057
7058static void io_sq_thread_park(struct io_sq_data *sqd)
7059 __acquires(&sqd->lock)
7060{
7061 if (!sqd->thread)
7062 return;
7063 mutex_lock(&sqd->lock);
7064 kthread_park(sqd->thread);
7065}
7066
Jens Axboe534ca6d2020-09-02 13:52:19 -06007067static void io_sq_thread_stop(struct io_ring_ctx *ctx)
7068{
7069 struct io_sq_data *sqd = ctx->sq_data;
7070
7071 if (sqd) {
7072 if (sqd->thread) {
7073 /*
7074 * We may arrive here from the error branch in
7075 * io_sq_offload_create() where the kthread is created
7076 * without being waked up, thus wake it up now to make
7077 * sure the wait will complete.
7078 */
7079 wake_up_process(sqd->thread);
7080 wait_for_completion(&ctx->sq_thread_comp);
Jens Axboe69fb2132020-09-14 11:16:23 -06007081
7082 io_sq_thread_park(sqd);
7083 }
7084
7085 mutex_lock(&sqd->ctx_lock);
7086 list_del(&ctx->sqd_list);
7087 mutex_unlock(&sqd->ctx_lock);
7088
7089 if (sqd->thread) {
7090 finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
7091 io_sq_thread_unpark(sqd);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007092 }
7093
7094 io_put_sq_data(sqd);
7095 ctx->sq_data = NULL;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007096 }
7097}
7098
Jens Axboe6b063142019-01-10 22:13:58 -07007099static void io_finish_async(struct io_ring_ctx *ctx)
7100{
Jens Axboe6c271ce2019-01-10 11:22:30 -07007101 io_sq_thread_stop(ctx);
7102
Jens Axboe561fb042019-10-24 07:25:42 -06007103 if (ctx->io_wq) {
7104 io_wq_destroy(ctx->io_wq);
7105 ctx->io_wq = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07007106 }
7107}
7108
7109#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07007110/*
7111 * Ensure the UNIX gc is aware of our file set, so we are certain that
7112 * the io_uring can be safely unregistered on process exit, even if we have
7113 * loops in the file referencing.
7114 */
7115static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7116{
7117 struct sock *sk = ctx->ring_sock->sk;
7118 struct scm_fp_list *fpl;
7119 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06007120 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07007121
Jens Axboe6b063142019-01-10 22:13:58 -07007122 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7123 if (!fpl)
7124 return -ENOMEM;
7125
7126 skb = alloc_skb(0, GFP_KERNEL);
7127 if (!skb) {
7128 kfree(fpl);
7129 return -ENOMEM;
7130 }
7131
7132 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07007133
Jens Axboe08a45172019-10-03 08:11:03 -06007134 nr_files = 0;
Jens Axboe6b063142019-01-10 22:13:58 -07007135 fpl->user = get_uid(ctx->user);
7136 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007137 struct file *file = io_file_from_index(ctx, i + offset);
7138
7139 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06007140 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06007141 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06007142 unix_inflight(fpl->user, fpl->fp[nr_files]);
7143 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07007144 }
7145
Jens Axboe08a45172019-10-03 08:11:03 -06007146 if (nr_files) {
7147 fpl->max = SCM_MAX_FD;
7148 fpl->count = nr_files;
7149 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007150 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06007151 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7152 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07007153
Jens Axboe08a45172019-10-03 08:11:03 -06007154 for (i = 0; i < nr_files; i++)
7155 fput(fpl->fp[i]);
7156 } else {
7157 kfree_skb(skb);
7158 kfree(fpl);
7159 }
Jens Axboe6b063142019-01-10 22:13:58 -07007160
7161 return 0;
7162}
7163
7164/*
7165 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7166 * causes regular reference counting to break down. We rely on the UNIX
7167 * garbage collection to take care of this problem for us.
7168 */
7169static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7170{
7171 unsigned left, total;
7172 int ret = 0;
7173
7174 total = 0;
7175 left = ctx->nr_user_files;
7176 while (left) {
7177 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07007178
7179 ret = __io_sqe_files_scm(ctx, this_files, total);
7180 if (ret)
7181 break;
7182 left -= this_files;
7183 total += this_files;
7184 }
7185
7186 if (!ret)
7187 return 0;
7188
7189 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007190 struct file *file = io_file_from_index(ctx, total);
7191
7192 if (file)
7193 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07007194 total++;
7195 }
7196
7197 return ret;
7198}
7199#else
7200static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7201{
7202 return 0;
7203}
7204#endif
7205
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007206static int io_sqe_alloc_file_tables(struct fixed_file_data *file_data,
7207 unsigned nr_tables, unsigned nr_files)
Jens Axboe65e19f52019-10-26 07:20:21 -06007208{
7209 int i;
7210
7211 for (i = 0; i < nr_tables; i++) {
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007212 struct fixed_file_table *table = &file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06007213 unsigned this_files;
7214
7215 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
7216 table->files = kcalloc(this_files, sizeof(struct file *),
7217 GFP_KERNEL);
7218 if (!table->files)
7219 break;
7220 nr_files -= this_files;
7221 }
7222
7223 if (i == nr_tables)
7224 return 0;
7225
7226 for (i = 0; i < nr_tables; i++) {
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007227 struct fixed_file_table *table = &file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06007228 kfree(table->files);
7229 }
7230 return 1;
7231}
7232
Jens Axboe05f3fb32019-12-09 11:22:50 -07007233static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
Jens Axboec3a31e62019-10-03 13:59:56 -06007234{
7235#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06007236 struct sock *sock = ctx->ring_sock->sk;
7237 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7238 struct sk_buff *skb;
7239 int i;
7240
7241 __skb_queue_head_init(&list);
7242
7243 /*
7244 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7245 * remove this entry and rearrange the file array.
7246 */
7247 skb = skb_dequeue(head);
7248 while (skb) {
7249 struct scm_fp_list *fp;
7250
7251 fp = UNIXCB(skb).fp;
7252 for (i = 0; i < fp->count; i++) {
7253 int left;
7254
7255 if (fp->fp[i] != file)
7256 continue;
7257
7258 unix_notinflight(fp->user, fp->fp[i]);
7259 left = fp->count - 1 - i;
7260 if (left) {
7261 memmove(&fp->fp[i], &fp->fp[i + 1],
7262 left * sizeof(struct file *));
7263 }
7264 fp->count--;
7265 if (!fp->count) {
7266 kfree_skb(skb);
7267 skb = NULL;
7268 } else {
7269 __skb_queue_tail(&list, skb);
7270 }
7271 fput(file);
7272 file = NULL;
7273 break;
7274 }
7275
7276 if (!file)
7277 break;
7278
7279 __skb_queue_tail(&list, skb);
7280
7281 skb = skb_dequeue(head);
7282 }
7283
7284 if (skb_peek(&list)) {
7285 spin_lock_irq(&head->lock);
7286 while ((skb = __skb_dequeue(&list)) != NULL)
7287 __skb_queue_tail(head, skb);
7288 spin_unlock_irq(&head->lock);
7289 }
7290#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07007291 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007292#endif
7293}
7294
Jens Axboe05f3fb32019-12-09 11:22:50 -07007295struct io_file_put {
Xiaoguang Wang05589552020-03-31 14:05:18 +08007296 struct list_head list;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007297 struct file *file;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007298};
7299
Jens Axboe4a38aed22020-05-14 17:21:15 -06007300static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007301{
Jens Axboe4a38aed22020-05-14 17:21:15 -06007302 struct fixed_file_data *file_data = ref_node->file_data;
7303 struct io_ring_ctx *ctx = file_data->ctx;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007304 struct io_file_put *pfile, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007305
7306 list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
Jens Axboe6a4d07c2020-05-15 14:30:38 -06007307 list_del(&pfile->list);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007308 io_ring_file_put(ctx, pfile->file);
7309 kfree(pfile);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007310 }
7311
Jens Axboe6a4d07c2020-05-15 14:30:38 -06007312 spin_lock(&file_data->lock);
7313 list_del(&ref_node->node);
7314 spin_unlock(&file_data->lock);
Jens Axboe2faf8522020-02-04 19:54:55 -07007315
Xiaoguang Wang05589552020-03-31 14:05:18 +08007316 percpu_ref_exit(&ref_node->refs);
7317 kfree(ref_node);
7318 percpu_ref_put(&file_data->refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007319}
7320
Jens Axboe4a38aed22020-05-14 17:21:15 -06007321static void io_file_put_work(struct work_struct *work)
7322{
7323 struct io_ring_ctx *ctx;
7324 struct llist_node *node;
7325
7326 ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
7327 node = llist_del_all(&ctx->file_put_llist);
7328
7329 while (node) {
7330 struct fixed_file_ref_node *ref_node;
7331 struct llist_node *next = node->next;
7332
7333 ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
7334 __io_file_put_work(ref_node);
7335 node = next;
7336 }
7337}
7338
Jens Axboe05f3fb32019-12-09 11:22:50 -07007339static void io_file_data_ref_zero(struct percpu_ref *ref)
7340{
Xiaoguang Wang05589552020-03-31 14:05:18 +08007341 struct fixed_file_ref_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007342 struct io_ring_ctx *ctx;
7343 bool first_add;
7344 int delay = HZ;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007345
Xiaoguang Wang05589552020-03-31 14:05:18 +08007346 ref_node = container_of(ref, struct fixed_file_ref_node, refs);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007347 ctx = ref_node->file_data->ctx;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007348
Jens Axboe4a38aed22020-05-14 17:21:15 -06007349 if (percpu_ref_is_dying(&ctx->file_data->refs))
7350 delay = 0;
7351
7352 first_add = llist_add(&ref_node->llist, &ctx->file_put_llist);
7353 if (!delay)
7354 mod_delayed_work(system_wq, &ctx->file_put_work, 0);
7355 else if (first_add)
7356 queue_delayed_work(system_wq, &ctx->file_put_work, delay);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007357}
7358
7359static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
7360 struct io_ring_ctx *ctx)
7361{
7362 struct fixed_file_ref_node *ref_node;
7363
7364 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7365 if (!ref_node)
7366 return ERR_PTR(-ENOMEM);
7367
7368 if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
7369 0, GFP_KERNEL)) {
7370 kfree(ref_node);
7371 return ERR_PTR(-ENOMEM);
7372 }
7373 INIT_LIST_HEAD(&ref_node->node);
7374 INIT_LIST_HEAD(&ref_node->file_list);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007375 ref_node->file_data = ctx->file_data;
7376 return ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007377}
7378
7379static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
7380{
7381 percpu_ref_exit(&ref_node->refs);
7382 kfree(ref_node);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007383}
7384
7385static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
7386 unsigned nr_args)
7387{
7388 __s32 __user *fds = (__s32 __user *) arg;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007389 unsigned nr_tables, i;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007390 struct file *file;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007391 int fd, ret = -ENOMEM;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007392 struct fixed_file_ref_node *ref_node;
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007393 struct fixed_file_data *file_data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007394
7395 if (ctx->file_data)
7396 return -EBUSY;
7397 if (!nr_args)
7398 return -EINVAL;
7399 if (nr_args > IORING_MAX_FIXED_FILES)
7400 return -EMFILE;
7401
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007402 file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
7403 if (!file_data)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007404 return -ENOMEM;
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007405 file_data->ctx = ctx;
7406 init_completion(&file_data->done);
7407 INIT_LIST_HEAD(&file_data->ref_list);
7408 spin_lock_init(&file_data->lock);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007409
7410 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
Colin Ian King035fbaf2020-10-12 15:03:41 +01007411 file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007412 GFP_KERNEL);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007413 if (!file_data->table)
7414 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007415
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007416 if (percpu_ref_init(&file_data->refs, io_file_ref_kill,
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007417 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
7418 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007419
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007420 if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
7421 goto out_ref;
Jens Axboe55cbc252020-10-14 07:35:57 -06007422 ctx->file_data = file_data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007423
7424 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
7425 struct fixed_file_table *table;
7426 unsigned index;
7427
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007428 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
7429 ret = -EFAULT;
7430 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007431 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007432 /* allow sparse sets */
7433 if (fd == -1)
7434 continue;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007435
Jens Axboe05f3fb32019-12-09 11:22:50 -07007436 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007437 ret = -EBADF;
7438 if (!file)
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007439 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007440
7441 /*
7442 * Don't allow io_uring instances to be registered. If UNIX
7443 * isn't enabled, then this causes a reference cycle and this
7444 * instance can never get freed. If UNIX is enabled we'll
7445 * handle it just fine, but there's still no point in allowing
7446 * a ring fd as it doesn't support regular read/write anyway.
7447 */
7448 if (file->f_op == &io_uring_fops) {
7449 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007450 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007451 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007452 table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7453 index = i & IORING_FILE_TABLE_MASK;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007454 table->files[index] = file;
7455 }
7456
Jens Axboe05f3fb32019-12-09 11:22:50 -07007457 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007458 if (ret) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07007459 io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007460 return ret;
7461 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007462
Xiaoguang Wang05589552020-03-31 14:05:18 +08007463 ref_node = alloc_fixed_file_ref_node(ctx);
7464 if (IS_ERR(ref_node)) {
7465 io_sqe_files_unregister(ctx);
7466 return PTR_ERR(ref_node);
7467 }
7468
Pavel Begunkovb2e96852020-10-10 18:34:16 +01007469 file_data->node = ref_node;
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007470 spin_lock(&file_data->lock);
7471 list_add(&ref_node->node, &file_data->ref_list);
7472 spin_unlock(&file_data->lock);
7473 percpu_ref_get(&file_data->refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007474 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007475out_fput:
7476 for (i = 0; i < ctx->nr_user_files; i++) {
7477 file = io_file_from_index(ctx, i);
7478 if (file)
7479 fput(file);
7480 }
7481 for (i = 0; i < nr_tables; i++)
7482 kfree(file_data->table[i].files);
7483 ctx->nr_user_files = 0;
7484out_ref:
7485 percpu_ref_exit(&file_data->refs);
7486out_free:
7487 kfree(file_data->table);
7488 kfree(file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06007489 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007490 return ret;
7491}
7492
Jens Axboec3a31e62019-10-03 13:59:56 -06007493static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7494 int index)
7495{
7496#if defined(CONFIG_UNIX)
7497 struct sock *sock = ctx->ring_sock->sk;
7498 struct sk_buff_head *head = &sock->sk_receive_queue;
7499 struct sk_buff *skb;
7500
7501 /*
7502 * See if we can merge this file into an existing skb SCM_RIGHTS
7503 * file set. If there's no room, fall back to allocating a new skb
7504 * and filling it in.
7505 */
7506 spin_lock_irq(&head->lock);
7507 skb = skb_peek(head);
7508 if (skb) {
7509 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7510
7511 if (fpl->count < SCM_MAX_FD) {
7512 __skb_unlink(skb, head);
7513 spin_unlock_irq(&head->lock);
7514 fpl->fp[fpl->count] = get_file(file);
7515 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7516 fpl->count++;
7517 spin_lock_irq(&head->lock);
7518 __skb_queue_head(head, skb);
7519 } else {
7520 skb = NULL;
7521 }
7522 }
7523 spin_unlock_irq(&head->lock);
7524
7525 if (skb) {
7526 fput(file);
7527 return 0;
7528 }
7529
7530 return __io_sqe_files_scm(ctx, 1, index);
7531#else
7532 return 0;
7533#endif
7534}
7535
Hillf Dantona5318d32020-03-23 17:47:15 +08007536static int io_queue_file_removal(struct fixed_file_data *data,
Xiaoguang Wang05589552020-03-31 14:05:18 +08007537 struct file *file)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007538{
Hillf Dantona5318d32020-03-23 17:47:15 +08007539 struct io_file_put *pfile;
Pavel Begunkovb2e96852020-10-10 18:34:16 +01007540 struct fixed_file_ref_node *ref_node = data->node;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007541
Jens Axboe05f3fb32019-12-09 11:22:50 -07007542 pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
Hillf Dantona5318d32020-03-23 17:47:15 +08007543 if (!pfile)
7544 return -ENOMEM;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007545
7546 pfile->file = file;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007547 list_add(&pfile->list, &ref_node->file_list);
7548
Hillf Dantona5318d32020-03-23 17:47:15 +08007549 return 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007550}
7551
7552static int __io_sqe_files_update(struct io_ring_ctx *ctx,
7553 struct io_uring_files_update *up,
7554 unsigned nr_args)
7555{
7556 struct fixed_file_data *data = ctx->file_data;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007557 struct fixed_file_ref_node *ref_node;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007558 struct file *file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007559 __s32 __user *fds;
7560 int fd, i, err;
7561 __u32 done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007562 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06007563
Jens Axboe05f3fb32019-12-09 11:22:50 -07007564 if (check_add_overflow(up->offset, nr_args, &done))
Jens Axboec3a31e62019-10-03 13:59:56 -06007565 return -EOVERFLOW;
7566 if (done > ctx->nr_user_files)
7567 return -EINVAL;
7568
Xiaoguang Wang05589552020-03-31 14:05:18 +08007569 ref_node = alloc_fixed_file_ref_node(ctx);
7570 if (IS_ERR(ref_node))
7571 return PTR_ERR(ref_node);
7572
Jens Axboec3a31e62019-10-03 13:59:56 -06007573 done = 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007574 fds = u64_to_user_ptr(up->fds);
Jens Axboec3a31e62019-10-03 13:59:56 -06007575 while (nr_args) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007576 struct fixed_file_table *table;
7577 unsigned index;
7578
Jens Axboec3a31e62019-10-03 13:59:56 -06007579 err = 0;
7580 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
7581 err = -EFAULT;
7582 break;
7583 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007584 i = array_index_nospec(up->offset, ctx->nr_user_files);
7585 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
Jens Axboe65e19f52019-10-26 07:20:21 -06007586 index = i & IORING_FILE_TABLE_MASK;
7587 if (table->files[index]) {
Jiufei Xue98dfd502020-09-01 13:35:02 +08007588 file = table->files[index];
Hillf Dantona5318d32020-03-23 17:47:15 +08007589 err = io_queue_file_removal(data, file);
7590 if (err)
7591 break;
Jens Axboe65e19f52019-10-26 07:20:21 -06007592 table->files[index] = NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007593 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06007594 }
7595 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007596 file = fget(fd);
7597 if (!file) {
7598 err = -EBADF;
7599 break;
7600 }
7601 /*
7602 * Don't allow io_uring instances to be registered. If
7603 * UNIX isn't enabled, then this causes a reference
7604 * cycle and this instance can never get freed. If UNIX
7605 * is enabled we'll handle it just fine, but there's
7606 * still no point in allowing a ring fd as it doesn't
7607 * support regular read/write anyway.
7608 */
7609 if (file->f_op == &io_uring_fops) {
7610 fput(file);
7611 err = -EBADF;
7612 break;
7613 }
Jens Axboe65e19f52019-10-26 07:20:21 -06007614 table->files[index] = file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007615 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007616 if (err) {
Jiufei Xue95d1c8e2020-09-02 17:59:39 +08007617 table->files[index] = NULL;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007618 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007619 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007620 }
Jens Axboec3a31e62019-10-03 13:59:56 -06007621 }
7622 nr_args--;
7623 done++;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007624 up->offset++;
7625 }
7626
Xiaoguang Wang05589552020-03-31 14:05:18 +08007627 if (needs_switch) {
Pavel Begunkovb2e96852020-10-10 18:34:16 +01007628 percpu_ref_kill(&data->node->refs);
Jens Axboe6a4d07c2020-05-15 14:30:38 -06007629 spin_lock(&data->lock);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007630 list_add(&ref_node->node, &data->ref_list);
Pavel Begunkovb2e96852020-10-10 18:34:16 +01007631 data->node = ref_node;
Jens Axboe6a4d07c2020-05-15 14:30:38 -06007632 spin_unlock(&data->lock);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007633 percpu_ref_get(&ctx->file_data->refs);
7634 } else
7635 destroy_fixed_file_ref_node(ref_node);
Jens Axboec3a31e62019-10-03 13:59:56 -06007636
7637 return done ? done : err;
7638}
Xiaoguang Wang05589552020-03-31 14:05:18 +08007639
Jens Axboe05f3fb32019-12-09 11:22:50 -07007640static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
7641 unsigned nr_args)
7642{
7643 struct io_uring_files_update up;
7644
7645 if (!ctx->file_data)
7646 return -ENXIO;
7647 if (!nr_args)
7648 return -EINVAL;
7649 if (copy_from_user(&up, arg, sizeof(up)))
7650 return -EFAULT;
7651 if (up.resv)
7652 return -EINVAL;
7653
7654 return __io_sqe_files_update(ctx, &up, nr_args);
7655}
Jens Axboec3a31e62019-10-03 13:59:56 -06007656
Pavel Begunkove9fd9392020-03-04 16:14:12 +03007657static void io_free_work(struct io_wq_work *work)
Jens Axboe7d723062019-11-12 22:31:31 -07007658{
7659 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7660
Pavel Begunkove9fd9392020-03-04 16:14:12 +03007661 /* Consider that io_steal_work() relies on this ref */
Jens Axboe7d723062019-11-12 22:31:31 -07007662 io_put_req(req);
7663}
7664
Pavel Begunkov24369c22020-01-28 03:15:48 +03007665static int io_init_wq_offload(struct io_ring_ctx *ctx,
7666 struct io_uring_params *p)
7667{
7668 struct io_wq_data data;
7669 struct fd f;
7670 struct io_ring_ctx *ctx_attach;
7671 unsigned int concurrency;
7672 int ret = 0;
7673
7674 data.user = ctx->user;
Pavel Begunkove9fd9392020-03-04 16:14:12 +03007675 data.free_work = io_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03007676 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007677
7678 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
7679 /* Do QD, or 4 * CPUS, whatever is smallest */
7680 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
7681
7682 ctx->io_wq = io_wq_create(concurrency, &data);
7683 if (IS_ERR(ctx->io_wq)) {
7684 ret = PTR_ERR(ctx->io_wq);
7685 ctx->io_wq = NULL;
7686 }
7687 return ret;
7688 }
7689
7690 f = fdget(p->wq_fd);
7691 if (!f.file)
7692 return -EBADF;
7693
7694 if (f.file->f_op != &io_uring_fops) {
7695 ret = -EINVAL;
7696 goto out_fput;
7697 }
7698
7699 ctx_attach = f.file->private_data;
7700 /* @io_wq is protected by holding the fd */
7701 if (!io_wq_get(ctx_attach->io_wq, &data)) {
7702 ret = -EINVAL;
7703 goto out_fput;
7704 }
7705
7706 ctx->io_wq = ctx_attach->io_wq;
7707out_fput:
7708 fdput(f);
7709 return ret;
7710}
7711
Jens Axboe0f212202020-09-13 13:09:39 -06007712static int io_uring_alloc_task_context(struct task_struct *task)
7713{
7714 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06007715 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06007716
7717 tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
7718 if (unlikely(!tctx))
7719 return -ENOMEM;
7720
Jens Axboed8a6df12020-10-15 16:24:45 -06007721 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7722 if (unlikely(ret)) {
7723 kfree(tctx);
7724 return ret;
7725 }
7726
Jens Axboe0f212202020-09-13 13:09:39 -06007727 xa_init(&tctx->xa);
7728 init_waitqueue_head(&tctx->wait);
7729 tctx->last = NULL;
7730 tctx->in_idle = 0;
Jens Axboe500a3732020-10-15 17:38:03 -06007731 io_init_identity(&tctx->__identity);
7732 tctx->identity = &tctx->__identity;
Jens Axboe0f212202020-09-13 13:09:39 -06007733 task->io_uring = tctx;
7734 return 0;
7735}
7736
7737void __io_uring_free(struct task_struct *tsk)
7738{
7739 struct io_uring_task *tctx = tsk->io_uring;
7740
7741 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Jens Axboe500a3732020-10-15 17:38:03 -06007742 WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1);
7743 if (tctx->identity != &tctx->__identity)
7744 kfree(tctx->identity);
Jens Axboed8a6df12020-10-15 16:24:45 -06007745 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06007746 kfree(tctx);
7747 tsk->io_uring = NULL;
7748}
7749
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02007750static int io_sq_offload_create(struct io_ring_ctx *ctx,
7751 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007752{
7753 int ret;
7754
Jens Axboe6c271ce2019-01-10 11:22:30 -07007755 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe534ca6d2020-09-02 13:52:19 -06007756 struct io_sq_data *sqd;
7757
Jens Axboe3ec482d2019-04-08 10:51:01 -06007758 ret = -EPERM;
7759 if (!capable(CAP_SYS_ADMIN))
7760 goto err;
7761
Jens Axboe534ca6d2020-09-02 13:52:19 -06007762 sqd = io_get_sq_data(p);
7763 if (IS_ERR(sqd)) {
7764 ret = PTR_ERR(sqd);
7765 goto err;
7766 }
Jens Axboe69fb2132020-09-14 11:16:23 -06007767
Jens Axboe534ca6d2020-09-02 13:52:19 -06007768 ctx->sq_data = sqd;
Jens Axboe69fb2132020-09-14 11:16:23 -06007769 io_sq_thread_park(sqd);
7770 mutex_lock(&sqd->ctx_lock);
7771 list_add(&ctx->sqd_list, &sqd->ctx_new_list);
7772 mutex_unlock(&sqd->ctx_lock);
7773 io_sq_thread_unpark(sqd);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007774
Jens Axboe917257d2019-04-13 09:28:55 -06007775 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
7776 if (!ctx->sq_thread_idle)
7777 ctx->sq_thread_idle = HZ;
7778
Jens Axboeaa061652020-09-02 14:50:27 -06007779 if (sqd->thread)
7780 goto done;
7781
Jens Axboe6c271ce2019-01-10 11:22:30 -07007782 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06007783 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007784
Jens Axboe917257d2019-04-13 09:28:55 -06007785 ret = -EINVAL;
Jens Axboe44a9bd12019-05-14 20:00:30 -06007786 if (cpu >= nr_cpu_ids)
7787 goto err;
Shenghui Wang7889f442019-05-07 16:03:19 +08007788 if (!cpu_online(cpu))
Jens Axboe917257d2019-04-13 09:28:55 -06007789 goto err;
7790
Jens Axboe69fb2132020-09-14 11:16:23 -06007791 sqd->thread = kthread_create_on_cpu(io_sq_thread, sqd,
Jens Axboe534ca6d2020-09-02 13:52:19 -06007792 cpu, "io_uring-sq");
Jens Axboe6c271ce2019-01-10 11:22:30 -07007793 } else {
Jens Axboe69fb2132020-09-14 11:16:23 -06007794 sqd->thread = kthread_create(io_sq_thread, sqd,
Jens Axboe6c271ce2019-01-10 11:22:30 -07007795 "io_uring-sq");
7796 }
Jens Axboe534ca6d2020-09-02 13:52:19 -06007797 if (IS_ERR(sqd->thread)) {
7798 ret = PTR_ERR(sqd->thread);
7799 sqd->thread = NULL;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007800 goto err;
7801 }
Jens Axboe534ca6d2020-09-02 13:52:19 -06007802 ret = io_uring_alloc_task_context(sqd->thread);
Jens Axboe0f212202020-09-13 13:09:39 -06007803 if (ret)
7804 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007805 } else if (p->flags & IORING_SETUP_SQ_AFF) {
7806 /* Can't have SQ_AFF without SQPOLL */
7807 ret = -EINVAL;
7808 goto err;
7809 }
7810
Jens Axboeaa061652020-09-02 14:50:27 -06007811done:
Pavel Begunkov24369c22020-01-28 03:15:48 +03007812 ret = io_init_wq_offload(ctx, p);
7813 if (ret)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007814 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007815
7816 return 0;
7817err:
Jens Axboe54a91f32019-09-10 09:15:04 -06007818 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007819 return ret;
7820}
7821
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02007822static void io_sq_offload_start(struct io_ring_ctx *ctx)
7823{
Jens Axboe534ca6d2020-09-02 13:52:19 -06007824 struct io_sq_data *sqd = ctx->sq_data;
7825
7826 if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd->thread)
7827 wake_up_process(sqd->thread);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02007828}
7829
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007830static inline void __io_unaccount_mem(struct user_struct *user,
7831 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007832{
7833 atomic_long_sub(nr_pages, &user->locked_vm);
7834}
7835
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007836static inline int __io_account_mem(struct user_struct *user,
7837 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007838{
7839 unsigned long page_limit, cur_pages, new_pages;
7840
7841 /* Don't allow more pages than we can safely lock */
7842 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
7843
7844 do {
7845 cur_pages = atomic_long_read(&user->locked_vm);
7846 new_pages = cur_pages + nr_pages;
7847 if (new_pages > page_limit)
7848 return -ENOMEM;
7849 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
7850 new_pages) != cur_pages);
7851
7852 return 0;
7853}
7854
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07007855static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
7856 enum io_mem_account acct)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007857{
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -07007858 if (ctx->limit_mem)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007859 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07007860
Jens Axboe2aede0e2020-09-14 10:45:53 -06007861 if (ctx->mm_account) {
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07007862 if (acct == ACCT_LOCKED)
Jens Axboe2aede0e2020-09-14 10:45:53 -06007863 ctx->mm_account->locked_vm -= nr_pages;
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07007864 else if (acct == ACCT_PINNED)
Jens Axboe2aede0e2020-09-14 10:45:53 -06007865 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07007866 }
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007867}
7868
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07007869static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
7870 enum io_mem_account acct)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007871{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07007872 int ret;
7873
7874 if (ctx->limit_mem) {
7875 ret = __io_account_mem(ctx->user, nr_pages);
7876 if (ret)
7877 return ret;
7878 }
7879
Jens Axboe2aede0e2020-09-14 10:45:53 -06007880 if (ctx->mm_account) {
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07007881 if (acct == ACCT_LOCKED)
Jens Axboe2aede0e2020-09-14 10:45:53 -06007882 ctx->mm_account->locked_vm += nr_pages;
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07007883 else if (acct == ACCT_PINNED)
Jens Axboe2aede0e2020-09-14 10:45:53 -06007884 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07007885 }
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007886
7887 return 0;
7888}
7889
Jens Axboe2b188cc2019-01-07 10:46:33 -07007890static void io_mem_free(void *ptr)
7891{
Mark Rutland52e04ef2019-04-30 17:30:21 +01007892 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007893
Mark Rutland52e04ef2019-04-30 17:30:21 +01007894 if (!ptr)
7895 return;
7896
7897 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007898 if (put_page_testzero(page))
7899 free_compound_page(page);
7900}
7901
7902static void *io_mem_alloc(size_t size)
7903{
7904 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
7905 __GFP_NORETRY;
7906
7907 return (void *) __get_free_pages(gfp_flags, get_order(size));
7908}
7909
Hristo Venev75b28af2019-08-26 17:23:46 +00007910static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
7911 size_t *sq_offset)
7912{
7913 struct io_rings *rings;
7914 size_t off, sq_array_size;
7915
7916 off = struct_size(rings, cqes, cq_entries);
7917 if (off == SIZE_MAX)
7918 return SIZE_MAX;
7919
7920#ifdef CONFIG_SMP
7921 off = ALIGN(off, SMP_CACHE_BYTES);
7922 if (off == 0)
7923 return SIZE_MAX;
7924#endif
7925
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02007926 if (sq_offset)
7927 *sq_offset = off;
7928
Hristo Venev75b28af2019-08-26 17:23:46 +00007929 sq_array_size = array_size(sizeof(u32), sq_entries);
7930 if (sq_array_size == SIZE_MAX)
7931 return SIZE_MAX;
7932
7933 if (check_add_overflow(off, sq_array_size, &off))
7934 return SIZE_MAX;
7935
Hristo Venev75b28af2019-08-26 17:23:46 +00007936 return off;
7937}
7938
Jens Axboe2b188cc2019-01-07 10:46:33 -07007939static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
7940{
Hristo Venev75b28af2019-08-26 17:23:46 +00007941 size_t pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007942
Hristo Venev75b28af2019-08-26 17:23:46 +00007943 pages = (size_t)1 << get_order(
7944 rings_size(sq_entries, cq_entries, NULL));
7945 pages += (size_t)1 << get_order(
7946 array_size(sizeof(struct io_uring_sqe), sq_entries));
Jens Axboe2b188cc2019-01-07 10:46:33 -07007947
Hristo Venev75b28af2019-08-26 17:23:46 +00007948 return pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007949}
7950
Jens Axboeedafcce2019-01-09 09:16:05 -07007951static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
7952{
7953 int i, j;
7954
7955 if (!ctx->user_bufs)
7956 return -ENXIO;
7957
7958 for (i = 0; i < ctx->nr_user_bufs; i++) {
7959 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
7960
7961 for (j = 0; j < imu->nr_bvecs; j++)
John Hubbardf1f6a7d2020-01-30 22:13:35 -08007962 unpin_user_page(imu->bvec[j].bv_page);
Jens Axboeedafcce2019-01-09 09:16:05 -07007963
Jens Axboede293932020-09-17 16:19:16 -06007964 if (imu->acct_pages)
7965 io_unaccount_mem(ctx, imu->acct_pages, ACCT_PINNED);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01007966 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07007967 imu->nr_bvecs = 0;
7968 }
7969
7970 kfree(ctx->user_bufs);
7971 ctx->user_bufs = NULL;
7972 ctx->nr_user_bufs = 0;
7973 return 0;
7974}
7975
7976static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
7977 void __user *arg, unsigned index)
7978{
7979 struct iovec __user *src;
7980
7981#ifdef CONFIG_COMPAT
7982 if (ctx->compat) {
7983 struct compat_iovec __user *ciovs;
7984 struct compat_iovec ciov;
7985
7986 ciovs = (struct compat_iovec __user *) arg;
7987 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
7988 return -EFAULT;
7989
Jens Axboed55e5f52019-12-11 16:12:15 -07007990 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07007991 dst->iov_len = ciov.iov_len;
7992 return 0;
7993 }
7994#endif
7995 src = (struct iovec __user *) arg;
7996 if (copy_from_user(dst, &src[index], sizeof(*dst)))
7997 return -EFAULT;
7998 return 0;
7999}
8000
Jens Axboede293932020-09-17 16:19:16 -06008001/*
8002 * Not super efficient, but this is just a registration time. And we do cache
8003 * the last compound head, so generally we'll only do a full search if we don't
8004 * match that one.
8005 *
8006 * We check if the given compound head page has already been accounted, to
8007 * avoid double accounting it. This allows us to account the full size of the
8008 * page, not just the constituent pages of a huge page.
8009 */
8010static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8011 int nr_pages, struct page *hpage)
8012{
8013 int i, j;
8014
8015 /* check current page array */
8016 for (i = 0; i < nr_pages; i++) {
8017 if (!PageCompound(pages[i]))
8018 continue;
8019 if (compound_head(pages[i]) == hpage)
8020 return true;
8021 }
8022
8023 /* check previously registered pages */
8024 for (i = 0; i < ctx->nr_user_bufs; i++) {
8025 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8026
8027 for (j = 0; j < imu->nr_bvecs; j++) {
8028 if (!PageCompound(imu->bvec[j].bv_page))
8029 continue;
8030 if (compound_head(imu->bvec[j].bv_page) == hpage)
8031 return true;
8032 }
8033 }
8034
8035 return false;
8036}
8037
8038static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8039 int nr_pages, struct io_mapped_ubuf *imu,
8040 struct page **last_hpage)
8041{
8042 int i, ret;
8043
8044 for (i = 0; i < nr_pages; i++) {
8045 if (!PageCompound(pages[i])) {
8046 imu->acct_pages++;
8047 } else {
8048 struct page *hpage;
8049
8050 hpage = compound_head(pages[i]);
8051 if (hpage == *last_hpage)
8052 continue;
8053 *last_hpage = hpage;
8054 if (headpage_already_acct(ctx, pages, i, hpage))
8055 continue;
8056 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8057 }
8058 }
8059
8060 if (!imu->acct_pages)
8061 return 0;
8062
8063 ret = io_account_mem(ctx, imu->acct_pages, ACCT_PINNED);
8064 if (ret)
8065 imu->acct_pages = 0;
8066 return ret;
8067}
8068
Jens Axboeedafcce2019-01-09 09:16:05 -07008069static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
8070 unsigned nr_args)
8071{
8072 struct vm_area_struct **vmas = NULL;
8073 struct page **pages = NULL;
Jens Axboede293932020-09-17 16:19:16 -06008074 struct page *last_hpage = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07008075 int i, j, got_pages = 0;
8076 int ret = -EINVAL;
8077
8078 if (ctx->user_bufs)
8079 return -EBUSY;
8080 if (!nr_args || nr_args > UIO_MAXIOV)
8081 return -EINVAL;
8082
8083 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
8084 GFP_KERNEL);
8085 if (!ctx->user_bufs)
8086 return -ENOMEM;
8087
8088 for (i = 0; i < nr_args; i++) {
8089 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8090 unsigned long off, start, end, ubuf;
8091 int pret, nr_pages;
8092 struct iovec iov;
8093 size_t size;
8094
8095 ret = io_copy_iov(ctx, &iov, arg, i);
8096 if (ret)
Pavel Begunkova2786822019-05-26 12:35:47 +03008097 goto err;
Jens Axboeedafcce2019-01-09 09:16:05 -07008098
8099 /*
8100 * Don't impose further limits on the size and buffer
8101 * constraints here, we'll -EINVAL later when IO is
8102 * submitted if they are wrong.
8103 */
8104 ret = -EFAULT;
8105 if (!iov.iov_base || !iov.iov_len)
8106 goto err;
8107
8108 /* arbitrary limit, but we need something */
8109 if (iov.iov_len > SZ_1G)
8110 goto err;
8111
8112 ubuf = (unsigned long) iov.iov_base;
8113 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8114 start = ubuf >> PAGE_SHIFT;
8115 nr_pages = end - start;
8116
Jens Axboeedafcce2019-01-09 09:16:05 -07008117 ret = 0;
8118 if (!pages || nr_pages > got_pages) {
Denis Efremova8c73c12020-06-05 12:32:03 +03008119 kvfree(vmas);
8120 kvfree(pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008121 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
Jens Axboeedafcce2019-01-09 09:16:05 -07008122 GFP_KERNEL);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008123 vmas = kvmalloc_array(nr_pages,
Jens Axboeedafcce2019-01-09 09:16:05 -07008124 sizeof(struct vm_area_struct *),
8125 GFP_KERNEL);
8126 if (!pages || !vmas) {
8127 ret = -ENOMEM;
Jens Axboeedafcce2019-01-09 09:16:05 -07008128 goto err;
8129 }
8130 got_pages = nr_pages;
8131 }
8132
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008133 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
Jens Axboeedafcce2019-01-09 09:16:05 -07008134 GFP_KERNEL);
8135 ret = -ENOMEM;
Jens Axboede293932020-09-17 16:19:16 -06008136 if (!imu->bvec)
Jens Axboeedafcce2019-01-09 09:16:05 -07008137 goto err;
Jens Axboeedafcce2019-01-09 09:16:05 -07008138
8139 ret = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07008140 mmap_read_lock(current->mm);
John Hubbard2113b052020-01-30 22:13:13 -08008141 pret = pin_user_pages(ubuf, nr_pages,
Ira Weiny932f4a62019-05-13 17:17:03 -07008142 FOLL_WRITE | FOLL_LONGTERM,
8143 pages, vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07008144 if (pret == nr_pages) {
8145 /* don't support file backed memory */
8146 for (j = 0; j < nr_pages; j++) {
8147 struct vm_area_struct *vma = vmas[j];
8148
8149 if (vma->vm_file &&
8150 !is_file_hugepages(vma->vm_file)) {
8151 ret = -EOPNOTSUPP;
8152 break;
8153 }
8154 }
8155 } else {
8156 ret = pret < 0 ? pret : -EFAULT;
8157 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07008158 mmap_read_unlock(current->mm);
Jens Axboeedafcce2019-01-09 09:16:05 -07008159 if (ret) {
8160 /*
8161 * if we did partial map, or found file backed vmas,
8162 * release any pages we did get
8163 */
John Hubbard27c4d3a2019-08-04 19:32:06 -07008164 if (pret > 0)
John Hubbardf1f6a7d2020-01-30 22:13:35 -08008165 unpin_user_pages(pages, pret);
Jens Axboede293932020-09-17 16:19:16 -06008166 kvfree(imu->bvec);
8167 goto err;
8168 }
8169
8170 ret = io_buffer_account_pin(ctx, pages, pret, imu, &last_hpage);
8171 if (ret) {
8172 unpin_user_pages(pages, pret);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008173 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07008174 goto err;
8175 }
8176
8177 off = ubuf & ~PAGE_MASK;
8178 size = iov.iov_len;
8179 for (j = 0; j < nr_pages; j++) {
8180 size_t vec_len;
8181
8182 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8183 imu->bvec[j].bv_page = pages[j];
8184 imu->bvec[j].bv_len = vec_len;
8185 imu->bvec[j].bv_offset = off;
8186 off = 0;
8187 size -= vec_len;
8188 }
8189 /* store original address for later verification */
8190 imu->ubuf = ubuf;
8191 imu->len = iov.iov_len;
8192 imu->nr_bvecs = nr_pages;
8193
8194 ctx->nr_user_bufs++;
8195 }
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008196 kvfree(pages);
8197 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07008198 return 0;
8199err:
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008200 kvfree(pages);
8201 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07008202 io_sqe_buffer_unregister(ctx);
8203 return ret;
8204}
8205
Jens Axboe9b402842019-04-11 11:45:41 -06008206static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8207{
8208 __s32 __user *fds = arg;
8209 int fd;
8210
8211 if (ctx->cq_ev_fd)
8212 return -EBUSY;
8213
8214 if (copy_from_user(&fd, fds, sizeof(*fds)))
8215 return -EFAULT;
8216
8217 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8218 if (IS_ERR(ctx->cq_ev_fd)) {
8219 int ret = PTR_ERR(ctx->cq_ev_fd);
8220 ctx->cq_ev_fd = NULL;
8221 return ret;
8222 }
8223
8224 return 0;
8225}
8226
8227static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8228{
8229 if (ctx->cq_ev_fd) {
8230 eventfd_ctx_put(ctx->cq_ev_fd);
8231 ctx->cq_ev_fd = NULL;
8232 return 0;
8233 }
8234
8235 return -ENXIO;
8236}
8237
Jens Axboe5a2e7452020-02-23 16:23:11 -07008238static int __io_destroy_buffers(int id, void *p, void *data)
8239{
8240 struct io_ring_ctx *ctx = data;
8241 struct io_buffer *buf = p;
8242
Jens Axboe067524e2020-03-02 16:32:28 -07008243 __io_remove_buffers(ctx, buf, id, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008244 return 0;
8245}
8246
8247static void io_destroy_buffers(struct io_ring_ctx *ctx)
8248{
8249 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
8250 idr_destroy(&ctx->io_buffer_idr);
8251}
8252
Jens Axboe2b188cc2019-01-07 10:46:33 -07008253static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8254{
Jens Axboe6b063142019-01-10 22:13:58 -07008255 io_finish_async(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -07008256 io_sqe_buffer_unregister(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06008257
8258 if (ctx->sqo_task) {
8259 put_task_struct(ctx->sqo_task);
8260 ctx->sqo_task = NULL;
8261 mmdrop(ctx->mm_account);
8262 ctx->mm_account = NULL;
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008263 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07008264
Dennis Zhou91d8f512020-09-16 13:41:05 -07008265#ifdef CONFIG_BLK_CGROUP
8266 if (ctx->sqo_blkcg_css)
8267 css_put(ctx->sqo_blkcg_css);
8268#endif
8269
Jens Axboe6b063142019-01-10 22:13:58 -07008270 io_sqe_files_unregister(ctx);
Jens Axboe9b402842019-04-11 11:45:41 -06008271 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008272 io_destroy_buffers(ctx);
Jens Axboe41726c92020-02-23 13:11:42 -07008273 idr_destroy(&ctx->personality_idr);
Jens Axboedef596e2019-01-09 08:59:42 -07008274
Jens Axboe2b188cc2019-01-07 10:46:33 -07008275#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07008276 if (ctx->ring_sock) {
8277 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008278 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07008279 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07008280#endif
8281
Hristo Venev75b28af2019-08-26 17:23:46 +00008282 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008283 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008284
8285 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008286 free_uid(ctx->user);
Jens Axboe181e4482019-11-25 08:52:30 -07008287 put_cred(ctx->creds);
Jens Axboe78076bb2019-12-04 19:56:40 -07008288 kfree(ctx->cancel_hash);
Jens Axboe0ddf92e2019-11-08 08:52:53 -07008289 kmem_cache_free(req_cachep, ctx->fallback_req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008290 kfree(ctx);
8291}
8292
8293static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8294{
8295 struct io_ring_ctx *ctx = file->private_data;
8296 __poll_t mask = 0;
8297
8298 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02008299 /*
8300 * synchronizes with barrier from wq_has_sleeper call in
8301 * io_commit_cqring
8302 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008303 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06008304 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008305 mask |= EPOLLOUT | EPOLLWRNORM;
Stefano Garzarella63e5d812020-02-07 13:18:28 +01008306 if (io_cqring_events(ctx, false))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008307 mask |= EPOLLIN | EPOLLRDNORM;
8308
8309 return mask;
8310}
8311
8312static int io_uring_fasync(int fd, struct file *file, int on)
8313{
8314 struct io_ring_ctx *ctx = file->private_data;
8315
8316 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8317}
8318
Jens Axboe071698e2020-01-28 10:04:42 -07008319static int io_remove_personalities(int id, void *p, void *data)
8320{
8321 struct io_ring_ctx *ctx = data;
Jens Axboe1e6fa522020-10-15 08:46:24 -06008322 struct io_identity *iod;
Jens Axboe071698e2020-01-28 10:04:42 -07008323
Jens Axboe1e6fa522020-10-15 08:46:24 -06008324 iod = idr_remove(&ctx->personality_idr, id);
8325 if (iod) {
8326 put_cred(iod->creds);
8327 if (refcount_dec_and_test(&iod->count))
8328 kfree(iod);
8329 }
Jens Axboe071698e2020-01-28 10:04:42 -07008330 return 0;
8331}
8332
Jens Axboe85faa7b2020-04-09 18:14:00 -06008333static void io_ring_exit_work(struct work_struct *work)
8334{
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008335 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
8336 exit_work);
Jens Axboe85faa7b2020-04-09 18:14:00 -06008337
Jens Axboe56952e92020-06-17 15:00:04 -06008338 /*
8339 * If we're doing polled IO and end up having requests being
8340 * submitted async (out-of-line), then completions can come in while
8341 * we're waiting for refs to drop. We need to reap these manually,
8342 * as nobody else will be looking for them.
8343 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008344 do {
Jens Axboe56952e92020-06-17 15:00:04 -06008345 if (ctx->rings)
Jens Axboee6c8aa92020-09-28 13:10:13 -06008346 io_cqring_overflow_flush(ctx, true, NULL, NULL);
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008347 io_iopoll_try_reap_events(ctx);
8348 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
Jens Axboe85faa7b2020-04-09 18:14:00 -06008349 io_ring_ctx_free(ctx);
8350}
8351
Jens Axboe2b188cc2019-01-07 10:46:33 -07008352static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8353{
8354 mutex_lock(&ctx->uring_lock);
8355 percpu_ref_kill(&ctx->refs);
8356 mutex_unlock(&ctx->uring_lock);
8357
Jens Axboef3606e32020-09-22 08:18:24 -06008358 io_kill_timeouts(ctx, NULL);
8359 io_poll_remove_all(ctx, NULL);
Jens Axboe561fb042019-10-24 07:25:42 -06008360
8361 if (ctx->io_wq)
8362 io_wq_cancel_all(ctx->io_wq);
8363
Jens Axboe15dff282019-11-13 09:09:23 -07008364 /* if we failed setting up the ctx, we might not have any rings */
8365 if (ctx->rings)
Jens Axboee6c8aa92020-09-28 13:10:13 -06008366 io_cqring_overflow_flush(ctx, true, NULL, NULL);
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008367 io_iopoll_try_reap_events(ctx);
Jens Axboe071698e2020-01-28 10:04:42 -07008368 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06008369
8370 /*
8371 * Do this upfront, so we won't have a grace period where the ring
8372 * is closed but resources aren't reaped yet. This can cause
8373 * spurious failure in setting up a new ring.
8374 */
Jens Axboe760618f2020-07-24 12:53:31 -06008375 io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries),
8376 ACCT_LOCKED);
Jens Axboe309fc032020-07-10 09:13:34 -06008377
Jens Axboe85faa7b2020-04-09 18:14:00 -06008378 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06008379 /*
8380 * Use system_unbound_wq to avoid spawning tons of event kworkers
8381 * if we're exiting a ton of rings at the same time. It just adds
8382 * noise and overhead, there's no discernable change in runtime
8383 * over using system_wq.
8384 */
8385 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008386}
8387
8388static int io_uring_release(struct inode *inode, struct file *file)
8389{
8390 struct io_ring_ctx *ctx = file->private_data;
8391
8392 file->private_data = NULL;
8393 io_ring_ctx_wait_and_kill(ctx);
8394 return 0;
8395}
8396
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008397static bool io_wq_files_match(struct io_wq_work *work, void *data)
8398{
8399 struct files_struct *files = data;
8400
Jens Axboedfead8a2020-10-14 10:12:37 -06008401 return !files || ((work->flags & IO_WQ_WORK_FILES) &&
Jens Axboe98447d62020-10-14 10:48:51 -06008402 work->identity->files == files);
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008403}
8404
Jens Axboef254ac02020-08-12 17:33:30 -06008405/*
8406 * Returns true if 'preq' is the link parent of 'req'
8407 */
8408static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req)
8409{
8410 struct io_kiocb *link;
8411
8412 if (!(preq->flags & REQ_F_LINK_HEAD))
8413 return false;
8414
8415 list_for_each_entry(link, &preq->link_list, link_list) {
8416 if (link == req)
8417 return true;
8418 }
8419
8420 return false;
8421}
8422
Pavel Begunkovc127a2a2020-09-06 00:45:15 +03008423static bool io_match_link_files(struct io_kiocb *req,
8424 struct files_struct *files)
8425{
8426 struct io_kiocb *link;
8427
8428 if (io_match_files(req, files))
8429 return true;
8430 if (req->flags & REQ_F_LINK_HEAD) {
8431 list_for_each_entry(link, &req->link_list, link_list) {
8432 if (io_match_files(link, files))
8433 return true;
8434 }
8435 }
8436 return false;
8437}
8438
Jens Axboef254ac02020-08-12 17:33:30 -06008439/*
8440 * We're looking to cancel 'req' because it's holding on to our files, but
8441 * 'req' could be a link to another request. See if it is, and cancel that
8442 * parent request if so.
8443 */
8444static bool io_poll_remove_link(struct io_ring_ctx *ctx, struct io_kiocb *req)
8445{
8446 struct hlist_node *tmp;
8447 struct io_kiocb *preq;
8448 bool found = false;
8449 int i;
8450
8451 spin_lock_irq(&ctx->completion_lock);
8452 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
8453 struct hlist_head *list;
8454
8455 list = &ctx->cancel_hash[i];
8456 hlist_for_each_entry_safe(preq, tmp, list, hash_node) {
8457 found = io_match_link(preq, req);
8458 if (found) {
8459 io_poll_remove_one(preq);
8460 break;
8461 }
8462 }
8463 }
8464 spin_unlock_irq(&ctx->completion_lock);
8465 return found;
8466}
8467
8468static bool io_timeout_remove_link(struct io_ring_ctx *ctx,
8469 struct io_kiocb *req)
8470{
8471 struct io_kiocb *preq;
8472 bool found = false;
8473
8474 spin_lock_irq(&ctx->completion_lock);
8475 list_for_each_entry(preq, &ctx->timeout_list, timeout.list) {
8476 found = io_match_link(preq, req);
8477 if (found) {
8478 __io_timeout_cancel(preq);
8479 break;
8480 }
8481 }
8482 spin_unlock_irq(&ctx->completion_lock);
8483 return found;
8484}
8485
Jens Axboeb711d4e2020-08-16 08:23:05 -07008486static bool io_cancel_link_cb(struct io_wq_work *work, void *data)
8487{
8488 return io_match_link(container_of(work, struct io_kiocb, work), data);
8489}
8490
8491static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
8492{
8493 enum io_wq_cancel cret;
8494
8495 /* cancel this particular work, if it's running */
8496 cret = io_wq_cancel_work(ctx->io_wq, &req->work);
8497 if (cret != IO_WQ_CANCEL_NOTFOUND)
8498 return;
8499
8500 /* find links that hold this pending, cancel those */
8501 cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true);
8502 if (cret != IO_WQ_CANCEL_NOTFOUND)
8503 return;
8504
8505 /* if we have a poll link holding this pending, cancel that */
8506 if (io_poll_remove_link(ctx, req))
8507 return;
8508
8509 /* final option, timeout link is holding this req pending */
8510 io_timeout_remove_link(ctx, req);
8511}
8512
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008513static void io_cancel_defer_files(struct io_ring_ctx *ctx,
8514 struct files_struct *files)
8515{
8516 struct io_defer_entry *de = NULL;
8517 LIST_HEAD(list);
8518
8519 spin_lock_irq(&ctx->completion_lock);
8520 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkovc127a2a2020-09-06 00:45:15 +03008521 if (io_match_link_files(de->req, files)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008522 list_cut_position(&list, &ctx->defer_list, &de->list);
8523 break;
8524 }
8525 }
8526 spin_unlock_irq(&ctx->completion_lock);
8527
8528 while (!list_empty(&list)) {
8529 de = list_first_entry(&list, struct io_defer_entry, list);
8530 list_del_init(&de->list);
8531 req_set_fail_links(de->req);
8532 io_put_req(de->req);
8533 io_req_complete(de->req, -ECANCELED);
8534 kfree(de);
8535 }
8536}
8537
Jens Axboe76e1b642020-09-26 15:05:03 -06008538/*
8539 * Returns true if we found and killed one or more files pinning requests
8540 */
8541static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
Jens Axboefcb323c2019-10-24 12:39:47 -06008542 struct files_struct *files)
8543{
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008544 if (list_empty_careful(&ctx->inflight_list))
Jens Axboe76e1b642020-09-26 15:05:03 -06008545 return false;
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008546
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008547 io_cancel_defer_files(ctx, files);
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008548 /* cancel all at once, should be faster than doing it one by one*/
8549 io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
8550
Jens Axboefcb323c2019-10-24 12:39:47 -06008551 while (!list_empty_careful(&ctx->inflight_list)) {
Xiaoguang Wangd8f1b972020-04-26 15:54:43 +08008552 struct io_kiocb *cancel_req = NULL, *req;
8553 DEFINE_WAIT(wait);
Jens Axboefcb323c2019-10-24 12:39:47 -06008554
8555 spin_lock_irq(&ctx->inflight_lock);
8556 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
Jens Axboedfead8a2020-10-14 10:12:37 -06008557 if (files && (req->work.flags & IO_WQ_WORK_FILES) &&
Jens Axboe98447d62020-10-14 10:48:51 -06008558 req->work.identity->files != files)
Jens Axboe768134d2019-11-10 20:30:53 -07008559 continue;
8560 /* req is being completed, ignore */
8561 if (!refcount_inc_not_zero(&req->refs))
8562 continue;
8563 cancel_req = req;
8564 break;
Jens Axboefcb323c2019-10-24 12:39:47 -06008565 }
Jens Axboe768134d2019-11-10 20:30:53 -07008566 if (cancel_req)
Jens Axboefcb323c2019-10-24 12:39:47 -06008567 prepare_to_wait(&ctx->inflight_wait, &wait,
Jens Axboe768134d2019-11-10 20:30:53 -07008568 TASK_UNINTERRUPTIBLE);
Jens Axboefcb323c2019-10-24 12:39:47 -06008569 spin_unlock_irq(&ctx->inflight_lock);
8570
Jens Axboe768134d2019-11-10 20:30:53 -07008571 /* We need to keep going until we don't find a matching req */
8572 if (!cancel_req)
Jens Axboefcb323c2019-10-24 12:39:47 -06008573 break;
Pavel Begunkovbb175342020-08-20 11:33:35 +03008574 /* cancel this request, or head link requests */
8575 io_attempt_cancel(ctx, cancel_req);
8576 io_put_req(cancel_req);
Jens Axboe6200b0a2020-09-13 14:38:30 -06008577 /* cancellations _may_ trigger task work */
8578 io_run_task_work();
Jens Axboefcb323c2019-10-24 12:39:47 -06008579 schedule();
Xiaoguang Wangd8f1b972020-04-26 15:54:43 +08008580 finish_wait(&ctx->inflight_wait, &wait);
Jens Axboefcb323c2019-10-24 12:39:47 -06008581 }
Jens Axboe76e1b642020-09-26 15:05:03 -06008582
8583 return true;
Jens Axboefcb323c2019-10-24 12:39:47 -06008584}
8585
Pavel Begunkov801dd572020-06-15 10:33:14 +03008586static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Pavel Begunkov44e728b2020-06-15 10:24:04 +03008587{
Pavel Begunkov801dd572020-06-15 10:33:14 +03008588 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8589 struct task_struct *task = data;
Pavel Begunkov44e728b2020-06-15 10:24:04 +03008590
Jens Axboef3606e32020-09-22 08:18:24 -06008591 return io_task_match(req, task);
Pavel Begunkov44e728b2020-06-15 10:24:04 +03008592}
8593
Jens Axboe0f212202020-09-13 13:09:39 -06008594static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
8595 struct task_struct *task,
8596 struct files_struct *files)
8597{
8598 bool ret;
8599
8600 ret = io_uring_cancel_files(ctx, files);
8601 if (!files) {
8602 enum io_wq_cancel cret;
8603
8604 cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, task, true);
8605 if (cret != IO_WQ_CANCEL_NOTFOUND)
8606 ret = true;
8607
8608 /* SQPOLL thread does its own polling */
8609 if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
8610 while (!list_empty_careful(&ctx->iopoll_list)) {
8611 io_iopoll_try_reap_events(ctx);
8612 ret = true;
8613 }
8614 }
8615
8616 ret |= io_poll_remove_all(ctx, task);
8617 ret |= io_kill_timeouts(ctx, task);
8618 }
8619
8620 return ret;
8621}
8622
8623/*
8624 * We need to iteratively cancel requests, in case a request has dependent
8625 * hard links. These persist even for failure of cancelations, hence keep
8626 * looping until none are found.
8627 */
8628static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
8629 struct files_struct *files)
8630{
8631 struct task_struct *task = current;
8632
Jens Axboe534ca6d2020-09-02 13:52:19 -06008633 if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data)
8634 task = ctx->sq_data->thread;
Jens Axboe0f212202020-09-13 13:09:39 -06008635
8636 io_cqring_overflow_flush(ctx, true, task, files);
8637
8638 while (__io_uring_cancel_task_requests(ctx, task, files)) {
8639 io_run_task_work();
8640 cond_resched();
8641 }
8642}
8643
8644/*
8645 * Note that this task has used io_uring. We use it for cancelation purposes.
8646 */
8647static int io_uring_add_task_file(struct file *file)
8648{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008649 struct io_uring_task *tctx = current->io_uring;
8650
8651 if (unlikely(!tctx)) {
Jens Axboe0f212202020-09-13 13:09:39 -06008652 int ret;
8653
8654 ret = io_uring_alloc_task_context(current);
8655 if (unlikely(ret))
8656 return ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008657 tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06008658 }
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008659 if (tctx->last != file) {
8660 void *old = xa_load(&tctx->xa, (unsigned long)file);
Jens Axboe0f212202020-09-13 13:09:39 -06008661
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008662 if (!old) {
Jens Axboe0f212202020-09-13 13:09:39 -06008663 get_file(file);
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008664 xa_store(&tctx->xa, (unsigned long)file, file, GFP_KERNEL);
Jens Axboe0f212202020-09-13 13:09:39 -06008665 }
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008666 tctx->last = file;
Jens Axboe0f212202020-09-13 13:09:39 -06008667 }
8668
8669 return 0;
8670}
8671
8672/*
8673 * Remove this io_uring_file -> task mapping.
8674 */
8675static void io_uring_del_task_file(struct file *file)
8676{
8677 struct io_uring_task *tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06008678
8679 if (tctx->last == file)
8680 tctx->last = NULL;
Matthew Wilcox (Oracle)5e2ed8c2020-10-09 13:49:53 +01008681 file = xa_erase(&tctx->xa, (unsigned long)file);
Jens Axboe0f212202020-09-13 13:09:39 -06008682 if (file)
8683 fput(file);
8684}
8685
8686static void __io_uring_attempt_task_drop(struct file *file)
8687{
Matthew Wilcox (Oracle)5e2ed8c2020-10-09 13:49:53 +01008688 struct file *old = xa_load(&current->io_uring->xa, (unsigned long)file);
Jens Axboe0f212202020-09-13 13:09:39 -06008689
8690 if (old == file)
8691 io_uring_del_task_file(file);
8692}
8693
8694/*
8695 * Drop task note for this file if we're the only ones that hold it after
8696 * pending fput()
8697 */
8698static void io_uring_attempt_task_drop(struct file *file, bool exiting)
8699{
8700 if (!current->io_uring)
8701 return;
8702 /*
8703 * fput() is pending, will be 2 if the only other ref is our potential
8704 * task file note. If the task is exiting, drop regardless of count.
8705 */
8706 if (!exiting && atomic_long_read(&file->f_count) != 2)
8707 return;
8708
8709 __io_uring_attempt_task_drop(file);
8710}
8711
8712void __io_uring_files_cancel(struct files_struct *files)
8713{
8714 struct io_uring_task *tctx = current->io_uring;
Matthew Wilcox (Oracle)ce765372020-10-09 13:49:51 +01008715 struct file *file;
8716 unsigned long index;
Jens Axboe0f212202020-09-13 13:09:39 -06008717
8718 /* make sure overflow events are dropped */
8719 tctx->in_idle = true;
8720
Matthew Wilcox (Oracle)ce765372020-10-09 13:49:51 +01008721 xa_for_each(&tctx->xa, index, file) {
8722 struct io_ring_ctx *ctx = file->private_data;
Jens Axboe0f212202020-09-13 13:09:39 -06008723
8724 io_uring_cancel_task_requests(ctx, files);
8725 if (files)
8726 io_uring_del_task_file(file);
Matthew Wilcox (Oracle)ce765372020-10-09 13:49:51 +01008727 }
Jens Axboe0f212202020-09-13 13:09:39 -06008728}
8729
Jens Axboe0f212202020-09-13 13:09:39 -06008730/*
8731 * Find any io_uring fd that this task has registered or done IO on, and cancel
8732 * requests.
8733 */
8734void __io_uring_task_cancel(void)
8735{
8736 struct io_uring_task *tctx = current->io_uring;
8737 DEFINE_WAIT(wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06008738 s64 inflight;
Jens Axboe0f212202020-09-13 13:09:39 -06008739
8740 /* make sure overflow events are dropped */
8741 tctx->in_idle = true;
8742
Jens Axboed8a6df12020-10-15 16:24:45 -06008743 do {
Jens Axboe0f212202020-09-13 13:09:39 -06008744 /* read completions before cancelations */
Jens Axboed8a6df12020-10-15 16:24:45 -06008745 inflight = percpu_counter_sum(&tctx->inflight);
8746 if (!inflight)
8747 break;
Jens Axboe0f212202020-09-13 13:09:39 -06008748 __io_uring_files_cancel(NULL);
8749
8750 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
8751
8752 /*
8753 * If we've seen completions, retry. This avoids a race where
8754 * a completion comes in before we did prepare_to_wait().
8755 */
Jens Axboed8a6df12020-10-15 16:24:45 -06008756 if (inflight != percpu_counter_sum(&tctx->inflight))
Jens Axboe0f212202020-09-13 13:09:39 -06008757 continue;
Jens Axboe0f212202020-09-13 13:09:39 -06008758 schedule();
Jens Axboed8a6df12020-10-15 16:24:45 -06008759 } while (1);
Jens Axboe0f212202020-09-13 13:09:39 -06008760
8761 finish_wait(&tctx->wait, &wait);
8762 tctx->in_idle = false;
Jens Axboefcb323c2019-10-24 12:39:47 -06008763}
8764
8765static int io_uring_flush(struct file *file, void *data)
8766{
8767 struct io_ring_ctx *ctx = file->private_data;
8768
Jens Axboe6ab23142020-02-08 20:23:59 -07008769 /*
8770 * If the task is going away, cancel work it may have pending
8771 */
Pavel Begunkov801dd572020-06-15 10:33:14 +03008772 if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
Jens Axboe0f212202020-09-13 13:09:39 -06008773 data = NULL;
Jens Axboe6ab23142020-02-08 20:23:59 -07008774
Jens Axboe0f212202020-09-13 13:09:39 -06008775 io_uring_cancel_task_requests(ctx, data);
8776 io_uring_attempt_task_drop(file, !data);
Jens Axboefcb323c2019-10-24 12:39:47 -06008777 return 0;
8778}
8779
Roman Penyaev6c5c2402019-11-28 12:53:22 +01008780static void *io_uring_validate_mmap_request(struct file *file,
8781 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008782{
Jens Axboe2b188cc2019-01-07 10:46:33 -07008783 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01008784 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008785 struct page *page;
8786 void *ptr;
8787
8788 switch (offset) {
8789 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00008790 case IORING_OFF_CQ_RING:
8791 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008792 break;
8793 case IORING_OFF_SQES:
8794 ptr = ctx->sq_sqes;
8795 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008796 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01008797 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008798 }
8799
8800 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07008801 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01008802 return ERR_PTR(-EINVAL);
8803
8804 return ptr;
8805}
8806
8807#ifdef CONFIG_MMU
8808
8809static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
8810{
8811 size_t sz = vma->vm_end - vma->vm_start;
8812 unsigned long pfn;
8813 void *ptr;
8814
8815 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
8816 if (IS_ERR(ptr))
8817 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008818
8819 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
8820 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
8821}
8822
Roman Penyaev6c5c2402019-11-28 12:53:22 +01008823#else /* !CONFIG_MMU */
8824
8825static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
8826{
8827 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
8828}
8829
8830static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
8831{
8832 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
8833}
8834
8835static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
8836 unsigned long addr, unsigned long len,
8837 unsigned long pgoff, unsigned long flags)
8838{
8839 void *ptr;
8840
8841 ptr = io_uring_validate_mmap_request(file, pgoff, len);
8842 if (IS_ERR(ptr))
8843 return PTR_ERR(ptr);
8844
8845 return (unsigned long) ptr;
8846}
8847
8848#endif /* !CONFIG_MMU */
8849
Jens Axboe90554202020-09-03 12:12:41 -06008850static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
8851{
8852 DEFINE_WAIT(wait);
8853
8854 do {
8855 if (!io_sqring_full(ctx))
8856 break;
8857
8858 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
8859
8860 if (!io_sqring_full(ctx))
8861 break;
8862
8863 schedule();
8864 } while (!signal_pending(current));
8865
8866 finish_wait(&ctx->sqo_sq_wait, &wait);
8867}
8868
Jens Axboe2b188cc2019-01-07 10:46:33 -07008869SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
8870 u32, min_complete, u32, flags, const sigset_t __user *, sig,
8871 size_t, sigsz)
8872{
8873 struct io_ring_ctx *ctx;
8874 long ret = -EBADF;
8875 int submitted = 0;
8876 struct fd f;
8877
Jens Axboe4c6e2772020-07-01 11:29:10 -06008878 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07008879
Jens Axboe90554202020-09-03 12:12:41 -06008880 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
8881 IORING_ENTER_SQ_WAIT))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008882 return -EINVAL;
8883
8884 f = fdget(fd);
8885 if (!f.file)
8886 return -EBADF;
8887
8888 ret = -EOPNOTSUPP;
8889 if (f.file->f_op != &io_uring_fops)
8890 goto out_fput;
8891
8892 ret = -ENXIO;
8893 ctx = f.file->private_data;
8894 if (!percpu_ref_tryget(&ctx->refs))
8895 goto out_fput;
8896
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02008897 ret = -EBADFD;
8898 if (ctx->flags & IORING_SETUP_R_DISABLED)
8899 goto out;
8900
Jens Axboe6c271ce2019-01-10 11:22:30 -07008901 /*
8902 * For SQ polling, the thread will do all submissions and completions.
8903 * Just return the requested submit count, and wake the thread if
8904 * we were asked to.
8905 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06008906 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008907 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboec1edbf52019-11-10 16:56:04 -07008908 if (!list_empty_careful(&ctx->cq_overflow_list))
Jens Axboee6c8aa92020-09-28 13:10:13 -06008909 io_cqring_overflow_flush(ctx, false, NULL, NULL);
Jens Axboe6c271ce2019-01-10 11:22:30 -07008910 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -06008911 wake_up(&ctx->sq_data->wait);
Jens Axboe90554202020-09-03 12:12:41 -06008912 if (flags & IORING_ENTER_SQ_WAIT)
8913 io_sqpoll_wait_sq(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07008914 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06008915 } else if (to_submit) {
Jens Axboe0f212202020-09-13 13:09:39 -06008916 ret = io_uring_add_task_file(f.file);
8917 if (unlikely(ret))
8918 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008919 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06008920 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008921 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03008922
8923 if (submitted != to_submit)
8924 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008925 }
8926 if (flags & IORING_ENTER_GETEVENTS) {
8927 min_complete = min(min_complete, ctx->cq_entries);
8928
Xiaoguang Wang32b22442020-03-11 09:26:09 +08008929 /*
8930 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
8931 * space applications don't need to do io completion events
8932 * polling again, they can rely on io_sq_thread to do polling
8933 * work, which can reduce cpu usage and uring_lock contention.
8934 */
8935 if (ctx->flags & IORING_SETUP_IOPOLL &&
8936 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +03008937 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07008938 } else {
8939 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
8940 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07008941 }
8942
Pavel Begunkov7c504e652019-12-18 19:53:45 +03008943out:
Pavel Begunkov6805b322019-10-08 02:18:42 +03008944 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008945out_fput:
8946 fdput(f);
8947 return submitted ? submitted : ret;
8948}
8949
Tobias Klauserbebdb652020-02-26 18:38:32 +01008950#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07008951static int io_uring_show_cred(int id, void *p, void *data)
8952{
8953 const struct cred *cred = p;
8954 struct seq_file *m = data;
8955 struct user_namespace *uns = seq_user_ns(m);
8956 struct group_info *gi;
8957 kernel_cap_t cap;
8958 unsigned __capi;
8959 int g;
8960
8961 seq_printf(m, "%5d\n", id);
8962 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
8963 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
8964 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
8965 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
8966 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
8967 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
8968 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
8969 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
8970 seq_puts(m, "\n\tGroups:\t");
8971 gi = cred->group_info;
8972 for (g = 0; g < gi->ngroups; g++) {
8973 seq_put_decimal_ull(m, g ? " " : "",
8974 from_kgid_munged(uns, gi->gid[g]));
8975 }
8976 seq_puts(m, "\n\tCapEff:\t");
8977 cap = cred->cap_effective;
8978 CAP_FOR_EACH_U32(__capi)
8979 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
8980 seq_putc(m, '\n');
8981 return 0;
8982}
8983
8984static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
8985{
Joseph Qidbbe9c62020-09-29 09:01:22 -06008986 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -06008987 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -07008988 int i;
8989
Jens Axboefad8e0d2020-09-28 08:57:48 -06008990 /*
8991 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
8992 * since fdinfo case grabs it in the opposite direction of normal use
8993 * cases. If we fail to get the lock, we just don't iterate any
8994 * structures that could be going away outside the io_uring mutex.
8995 */
8996 has_lock = mutex_trylock(&ctx->uring_lock);
8997
Joseph Qidbbe9c62020-09-29 09:01:22 -06008998 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL))
8999 sq = ctx->sq_data;
9000
9001 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9002 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -07009003 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009004 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Jens Axboe87ce9552020-01-30 08:25:34 -07009005 struct fixed_file_table *table;
9006 struct file *f;
9007
9008 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
9009 f = table->files[i & IORING_FILE_TABLE_MASK];
9010 if (f)
9011 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9012 else
9013 seq_printf(m, "%5u: <none>\n", i);
9014 }
9015 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009016 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Jens Axboe87ce9552020-01-30 08:25:34 -07009017 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
9018
9019 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
9020 (unsigned int) buf->len);
9021 }
Jens Axboefad8e0d2020-09-28 08:57:48 -06009022 if (has_lock && !idr_is_empty(&ctx->personality_idr)) {
Jens Axboe87ce9552020-01-30 08:25:34 -07009023 seq_printf(m, "Personalities:\n");
9024 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
9025 }
Jens Axboed7718a92020-02-14 22:23:12 -07009026 seq_printf(m, "PollList:\n");
9027 spin_lock_irq(&ctx->completion_lock);
9028 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9029 struct hlist_head *list = &ctx->cancel_hash[i];
9030 struct io_kiocb *req;
9031
9032 hlist_for_each_entry(req, list, hash_node)
9033 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9034 req->task->task_works != NULL);
9035 }
9036 spin_unlock_irq(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009037 if (has_lock)
9038 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -07009039}
9040
9041static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9042{
9043 struct io_ring_ctx *ctx = f->private_data;
9044
9045 if (percpu_ref_tryget(&ctx->refs)) {
9046 __io_uring_show_fdinfo(ctx, m);
9047 percpu_ref_put(&ctx->refs);
9048 }
9049}
Tobias Klauserbebdb652020-02-26 18:38:32 +01009050#endif
Jens Axboe87ce9552020-01-30 08:25:34 -07009051
Jens Axboe2b188cc2019-01-07 10:46:33 -07009052static const struct file_operations io_uring_fops = {
9053 .release = io_uring_release,
Jens Axboefcb323c2019-10-24 12:39:47 -06009054 .flush = io_uring_flush,
Jens Axboe2b188cc2019-01-07 10:46:33 -07009055 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009056#ifndef CONFIG_MMU
9057 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9058 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9059#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009060 .poll = io_uring_poll,
9061 .fasync = io_uring_fasync,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009062#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07009063 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009064#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009065};
9066
9067static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9068 struct io_uring_params *p)
9069{
Hristo Venev75b28af2019-08-26 17:23:46 +00009070 struct io_rings *rings;
9071 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009072
Jens Axboebd740482020-08-05 12:58:23 -06009073 /* make sure these are sane, as we already accounted them */
9074 ctx->sq_entries = p->sq_entries;
9075 ctx->cq_entries = p->cq_entries;
9076
Hristo Venev75b28af2019-08-26 17:23:46 +00009077 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9078 if (size == SIZE_MAX)
9079 return -EOVERFLOW;
9080
9081 rings = io_mem_alloc(size);
9082 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009083 return -ENOMEM;
9084
Hristo Venev75b28af2019-08-26 17:23:46 +00009085 ctx->rings = rings;
9086 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9087 rings->sq_ring_mask = p->sq_entries - 1;
9088 rings->cq_ring_mask = p->cq_entries - 1;
9089 rings->sq_ring_entries = p->sq_entries;
9090 rings->cq_ring_entries = p->cq_entries;
9091 ctx->sq_mask = rings->sq_ring_mask;
9092 ctx->cq_mask = rings->cq_ring_mask;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009093
9094 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -07009095 if (size == SIZE_MAX) {
9096 io_mem_free(ctx->rings);
9097 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009098 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -07009099 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009100
9101 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -07009102 if (!ctx->sq_sqes) {
9103 io_mem_free(ctx->rings);
9104 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009105 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -07009106 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009107
Jens Axboe2b188cc2019-01-07 10:46:33 -07009108 return 0;
9109}
9110
9111/*
9112 * Allocate an anonymous fd, this is what constitutes the application
9113 * visible backing of an io_uring instance. The application mmaps this
9114 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9115 * we have to tie this fd to a socket for file garbage collection purposes.
9116 */
9117static int io_uring_get_fd(struct io_ring_ctx *ctx)
9118{
9119 struct file *file;
9120 int ret;
9121
9122#if defined(CONFIG_UNIX)
9123 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9124 &ctx->ring_sock);
9125 if (ret)
9126 return ret;
9127#endif
9128
9129 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9130 if (ret < 0)
9131 goto err;
9132
9133 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9134 O_RDWR | O_CLOEXEC);
9135 if (IS_ERR(file)) {
Jens Axboe0f212202020-09-13 13:09:39 -06009136err_fd:
Jens Axboe2b188cc2019-01-07 10:46:33 -07009137 put_unused_fd(ret);
9138 ret = PTR_ERR(file);
9139 goto err;
9140 }
9141
9142#if defined(CONFIG_UNIX)
9143 ctx->ring_sock->file = file;
9144#endif
Jens Axboe0f212202020-09-13 13:09:39 -06009145 if (unlikely(io_uring_add_task_file(file))) {
9146 file = ERR_PTR(-ENOMEM);
9147 goto err_fd;
9148 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009149 fd_install(ret, file);
9150 return ret;
9151err:
9152#if defined(CONFIG_UNIX)
9153 sock_release(ctx->ring_sock);
9154 ctx->ring_sock = NULL;
9155#endif
9156 return ret;
9157}
9158
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009159static int io_uring_create(unsigned entries, struct io_uring_params *p,
9160 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009161{
9162 struct user_struct *user = NULL;
9163 struct io_ring_ctx *ctx;
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -07009164 bool limit_mem;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009165 int ret;
9166
Jens Axboe8110c1a2019-12-28 15:39:54 -07009167 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009168 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009169 if (entries > IORING_MAX_ENTRIES) {
9170 if (!(p->flags & IORING_SETUP_CLAMP))
9171 return -EINVAL;
9172 entries = IORING_MAX_ENTRIES;
9173 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009174
9175 /*
9176 * Use twice as many entries for the CQ ring. It's possible for the
9177 * application to drive a higher depth than the size of the SQ ring,
9178 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06009179 * some flexibility in overcommitting a bit. If the application has
9180 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9181 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07009182 */
9183 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06009184 if (p->flags & IORING_SETUP_CQSIZE) {
9185 /*
9186 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9187 * to a power-of-two, if it isn't already. We do NOT impose
9188 * any cq vs sq ring sizing.
9189 */
Jens Axboe8110c1a2019-12-28 15:39:54 -07009190 if (p->cq_entries < p->sq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -06009191 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009192 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9193 if (!(p->flags & IORING_SETUP_CLAMP))
9194 return -EINVAL;
9195 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9196 }
Jens Axboe33a107f2019-10-04 12:10:03 -06009197 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9198 } else {
9199 p->cq_entries = 2 * p->sq_entries;
9200 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009201
9202 user = get_uid(current_user());
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -07009203 limit_mem = !capable(CAP_IPC_LOCK);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009204
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -07009205 if (limit_mem) {
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07009206 ret = __io_account_mem(user,
Jens Axboe2b188cc2019-01-07 10:46:33 -07009207 ring_pages(p->sq_entries, p->cq_entries));
9208 if (ret) {
9209 free_uid(user);
9210 return ret;
9211 }
9212 }
9213
9214 ctx = io_ring_ctx_alloc(p);
9215 if (!ctx) {
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -07009216 if (limit_mem)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07009217 __io_unaccount_mem(user, ring_pages(p->sq_entries,
Jens Axboe2b188cc2019-01-07 10:46:33 -07009218 p->cq_entries));
9219 free_uid(user);
9220 return -ENOMEM;
9221 }
9222 ctx->compat = in_compat_syscall();
Jens Axboe2b188cc2019-01-07 10:46:33 -07009223 ctx->user = user;
Jens Axboe0b8c0ec2019-12-02 08:50:00 -07009224 ctx->creds = get_current_cred();
Jens Axboe4ea33a92020-10-15 13:46:44 -06009225#ifdef CONFIG_AUDIT
9226 ctx->loginuid = current->loginuid;
9227 ctx->sessionid = current->sessionid;
9228#endif
Jens Axboe2aede0e2020-09-14 10:45:53 -06009229 ctx->sqo_task = get_task_struct(current);
9230
9231 /*
9232 * This is just grabbed for accounting purposes. When a process exits,
9233 * the mm is exited and dropped before the files, hence we need to hang
9234 * on to this mm purely for the purposes of being able to unaccount
9235 * memory (locked/pinned vm). It's not used for anything else.
9236 */
Jens Axboe6b7898e2020-08-25 07:58:00 -06009237 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009238 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -06009239
Dennis Zhou91d8f512020-09-16 13:41:05 -07009240#ifdef CONFIG_BLK_CGROUP
9241 /*
9242 * The sq thread will belong to the original cgroup it was inited in.
9243 * If the cgroup goes offline (e.g. disabling the io controller), then
9244 * issued bios will be associated with the closest cgroup later in the
9245 * block layer.
9246 */
9247 rcu_read_lock();
9248 ctx->sqo_blkcg_css = blkcg_css();
9249 ret = css_tryget_online(ctx->sqo_blkcg_css);
9250 rcu_read_unlock();
9251 if (!ret) {
9252 /* don't init against a dying cgroup, have the user try again */
9253 ctx->sqo_blkcg_css = NULL;
9254 ret = -ENODEV;
9255 goto err;
9256 }
9257#endif
Jens Axboe6c271ce2019-01-10 11:22:30 -07009258
Jens Axboe2b188cc2019-01-07 10:46:33 -07009259 /*
9260 * Account memory _before_ installing the file descriptor. Once
9261 * the descriptor is installed, it can get closed at any time. Also
Jens Axboe2b188cc2019-01-07 10:46:33 -07009262 * do this before hitting the general error path, as ring freeing
Hristo Venev75b28af2019-08-26 17:23:46 +00009263 * will un-account as well.
9264 */
9265 io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
9266 ACCT_LOCKED);
9267 ctx->limit_mem = limit_mem;
9268
9269 ret = io_allocate_scq_urings(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009270 if (ret)
9271 goto err;
Hristo Venev75b28af2019-08-26 17:23:46 +00009272
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009273 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009274 if (ret)
9275 goto err;
9276
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009277 if (!(p->flags & IORING_SETUP_R_DISABLED))
9278 io_sq_offload_start(ctx);
9279
Jens Axboe2b188cc2019-01-07 10:46:33 -07009280 memset(&p->sq_off, 0, sizeof(p->sq_off));
9281 p->sq_off.head = offsetof(struct io_rings, sq.head);
9282 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9283 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9284 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9285 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9286 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9287 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
9288
9289 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009290 p->cq_off.head = offsetof(struct io_rings, cq.head);
9291 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9292 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9293 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9294 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9295 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +02009296 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -06009297
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009298 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9299 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +08009300 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
9301 IORING_FEAT_POLL_32BITS;
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009302
9303 if (copy_to_user(params, p, sizeof(*p))) {
9304 ret = -EFAULT;
9305 goto err;
9306 }
Jens Axboed1719f72020-07-30 13:43:53 -06009307
9308 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -06009309 * Install ring fd as the very last thing, so we don't risk someone
9310 * having closed it before we finish setup
9311 */
9312 ret = io_uring_get_fd(ctx);
9313 if (ret < 0)
9314 goto err;
9315
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009316 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009317 return ret;
9318err:
9319 io_ring_ctx_wait_and_kill(ctx);
9320 return ret;
9321}
9322
9323/*
9324 * Sets up an aio uring context, and returns the fd. Applications asks for a
9325 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9326 * params structure passed in.
9327 */
9328static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9329{
9330 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009331 int i;
9332
9333 if (copy_from_user(&p, params, sizeof(p)))
9334 return -EFAULT;
9335 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9336 if (p.resv[i])
9337 return -EINVAL;
9338 }
9339
Jens Axboe6c271ce2019-01-10 11:22:30 -07009340 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -07009341 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009342 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9343 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009344 return -EINVAL;
9345
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009346 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009347}
9348
9349SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9350 struct io_uring_params __user *, params)
9351{
9352 return io_uring_setup(entries, params);
9353}
9354
Jens Axboe66f4af92020-01-16 15:36:52 -07009355static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9356{
9357 struct io_uring_probe *p;
9358 size_t size;
9359 int i, ret;
9360
9361 size = struct_size(p, ops, nr_args);
9362 if (size == SIZE_MAX)
9363 return -EOVERFLOW;
9364 p = kzalloc(size, GFP_KERNEL);
9365 if (!p)
9366 return -ENOMEM;
9367
9368 ret = -EFAULT;
9369 if (copy_from_user(p, arg, size))
9370 goto out;
9371 ret = -EINVAL;
9372 if (memchr_inv(p, 0, size))
9373 goto out;
9374
9375 p->last_op = IORING_OP_LAST - 1;
9376 if (nr_args > IORING_OP_LAST)
9377 nr_args = IORING_OP_LAST;
9378
9379 for (i = 0; i < nr_args; i++) {
9380 p->ops[i].op = i;
9381 if (!io_op_defs[i].not_supported)
9382 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9383 }
9384 p->ops_len = i;
9385
9386 ret = 0;
9387 if (copy_to_user(arg, p, size))
9388 ret = -EFAULT;
9389out:
9390 kfree(p);
9391 return ret;
9392}
9393
Jens Axboe071698e2020-01-28 10:04:42 -07009394static int io_register_personality(struct io_ring_ctx *ctx)
9395{
Jens Axboe1e6fa522020-10-15 08:46:24 -06009396 struct io_identity *id;
9397 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009398
Jens Axboe1e6fa522020-10-15 08:46:24 -06009399 id = kmalloc(sizeof(*id), GFP_KERNEL);
9400 if (unlikely(!id))
9401 return -ENOMEM;
9402
9403 io_init_identity(id);
9404 id->creds = get_current_cred();
9405
9406 ret = idr_alloc_cyclic(&ctx->personality_idr, id, 1, USHRT_MAX, GFP_KERNEL);
9407 if (ret < 0) {
9408 put_cred(id->creds);
9409 kfree(id);
9410 }
9411 return ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009412}
9413
9414static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
9415{
Jens Axboe1e6fa522020-10-15 08:46:24 -06009416 struct io_identity *iod;
Jens Axboe071698e2020-01-28 10:04:42 -07009417
Jens Axboe1e6fa522020-10-15 08:46:24 -06009418 iod = idr_remove(&ctx->personality_idr, id);
9419 if (iod) {
9420 put_cred(iod->creds);
9421 if (refcount_dec_and_test(&iod->count))
9422 kfree(iod);
Jens Axboe071698e2020-01-28 10:04:42 -07009423 return 0;
9424 }
9425
9426 return -EINVAL;
9427}
9428
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009429static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9430 unsigned int nr_args)
9431{
9432 struct io_uring_restriction *res;
9433 size_t size;
9434 int i, ret;
9435
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009436 /* Restrictions allowed only if rings started disabled */
9437 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9438 return -EBADFD;
9439
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009440 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009441 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009442 return -EBUSY;
9443
9444 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9445 return -EINVAL;
9446
9447 size = array_size(nr_args, sizeof(*res));
9448 if (size == SIZE_MAX)
9449 return -EOVERFLOW;
9450
9451 res = memdup_user(arg, size);
9452 if (IS_ERR(res))
9453 return PTR_ERR(res);
9454
9455 ret = 0;
9456
9457 for (i = 0; i < nr_args; i++) {
9458 switch (res[i].opcode) {
9459 case IORING_RESTRICTION_REGISTER_OP:
9460 if (res[i].register_op >= IORING_REGISTER_LAST) {
9461 ret = -EINVAL;
9462 goto out;
9463 }
9464
9465 __set_bit(res[i].register_op,
9466 ctx->restrictions.register_op);
9467 break;
9468 case IORING_RESTRICTION_SQE_OP:
9469 if (res[i].sqe_op >= IORING_OP_LAST) {
9470 ret = -EINVAL;
9471 goto out;
9472 }
9473
9474 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9475 break;
9476 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9477 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9478 break;
9479 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9480 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9481 break;
9482 default:
9483 ret = -EINVAL;
9484 goto out;
9485 }
9486 }
9487
9488out:
9489 /* Reset all restrictions if an error happened */
9490 if (ret != 0)
9491 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9492 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009493 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009494
9495 kfree(res);
9496 return ret;
9497}
9498
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009499static int io_register_enable_rings(struct io_ring_ctx *ctx)
9500{
9501 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9502 return -EBADFD;
9503
9504 if (ctx->restrictions.registered)
9505 ctx->restricted = 1;
9506
9507 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9508
9509 io_sq_offload_start(ctx);
9510
9511 return 0;
9512}
9513
Jens Axboe071698e2020-01-28 10:04:42 -07009514static bool io_register_op_must_quiesce(int op)
9515{
9516 switch (op) {
9517 case IORING_UNREGISTER_FILES:
9518 case IORING_REGISTER_FILES_UPDATE:
9519 case IORING_REGISTER_PROBE:
9520 case IORING_REGISTER_PERSONALITY:
9521 case IORING_UNREGISTER_PERSONALITY:
9522 return false;
9523 default:
9524 return true;
9525 }
9526}
9527
Jens Axboeedafcce2019-01-09 09:16:05 -07009528static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
9529 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -06009530 __releases(ctx->uring_lock)
9531 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -07009532{
9533 int ret;
9534
Jens Axboe35fa71a2019-04-22 10:23:23 -06009535 /*
9536 * We're inside the ring mutex, if the ref is already dying, then
9537 * someone else killed the ctx or is already going through
9538 * io_uring_register().
9539 */
9540 if (percpu_ref_is_dying(&ctx->refs))
9541 return -ENXIO;
9542
Jens Axboe071698e2020-01-28 10:04:42 -07009543 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07009544 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -06009545
Jens Axboe05f3fb32019-12-09 11:22:50 -07009546 /*
9547 * Drop uring mutex before waiting for references to exit. If
9548 * another thread is currently inside io_uring_enter() it might
9549 * need to grab the uring_lock to make progress. If we hold it
9550 * here across the drain wait, then we can deadlock. It's safe
9551 * to drop the mutex here, since no new references will come in
9552 * after we've killed the percpu ref.
9553 */
9554 mutex_unlock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009555 do {
9556 ret = wait_for_completion_interruptible(&ctx->ref_comp);
9557 if (!ret)
9558 break;
Jens Axboeed6930c2020-10-08 19:09:46 -06009559 ret = io_run_task_work_sig();
9560 if (ret < 0)
9561 break;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009562 } while (1);
9563
Jens Axboe05f3fb32019-12-09 11:22:50 -07009564 mutex_lock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009565
Jens Axboec1503682020-01-08 08:26:07 -07009566 if (ret) {
9567 percpu_ref_resurrect(&ctx->refs);
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009568 goto out_quiesce;
9569 }
9570 }
9571
9572 if (ctx->restricted) {
9573 if (opcode >= IORING_REGISTER_LAST) {
9574 ret = -EINVAL;
9575 goto out;
9576 }
9577
9578 if (!test_bit(opcode, ctx->restrictions.register_op)) {
9579 ret = -EACCES;
Jens Axboec1503682020-01-08 08:26:07 -07009580 goto out;
9581 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07009582 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009583
9584 switch (opcode) {
9585 case IORING_REGISTER_BUFFERS:
9586 ret = io_sqe_buffer_register(ctx, arg, nr_args);
9587 break;
9588 case IORING_UNREGISTER_BUFFERS:
9589 ret = -EINVAL;
9590 if (arg || nr_args)
9591 break;
9592 ret = io_sqe_buffer_unregister(ctx);
9593 break;
Jens Axboe6b063142019-01-10 22:13:58 -07009594 case IORING_REGISTER_FILES:
9595 ret = io_sqe_files_register(ctx, arg, nr_args);
9596 break;
9597 case IORING_UNREGISTER_FILES:
9598 ret = -EINVAL;
9599 if (arg || nr_args)
9600 break;
9601 ret = io_sqe_files_unregister(ctx);
9602 break;
Jens Axboec3a31e62019-10-03 13:59:56 -06009603 case IORING_REGISTER_FILES_UPDATE:
9604 ret = io_sqe_files_update(ctx, arg, nr_args);
9605 break;
Jens Axboe9b402842019-04-11 11:45:41 -06009606 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -07009607 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -06009608 ret = -EINVAL;
9609 if (nr_args != 1)
9610 break;
9611 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -07009612 if (ret)
9613 break;
9614 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
9615 ctx->eventfd_async = 1;
9616 else
9617 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -06009618 break;
9619 case IORING_UNREGISTER_EVENTFD:
9620 ret = -EINVAL;
9621 if (arg || nr_args)
9622 break;
9623 ret = io_eventfd_unregister(ctx);
9624 break;
Jens Axboe66f4af92020-01-16 15:36:52 -07009625 case IORING_REGISTER_PROBE:
9626 ret = -EINVAL;
9627 if (!arg || nr_args > 256)
9628 break;
9629 ret = io_probe(ctx, arg, nr_args);
9630 break;
Jens Axboe071698e2020-01-28 10:04:42 -07009631 case IORING_REGISTER_PERSONALITY:
9632 ret = -EINVAL;
9633 if (arg || nr_args)
9634 break;
9635 ret = io_register_personality(ctx);
9636 break;
9637 case IORING_UNREGISTER_PERSONALITY:
9638 ret = -EINVAL;
9639 if (arg)
9640 break;
9641 ret = io_unregister_personality(ctx, nr_args);
9642 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009643 case IORING_REGISTER_ENABLE_RINGS:
9644 ret = -EINVAL;
9645 if (arg || nr_args)
9646 break;
9647 ret = io_register_enable_rings(ctx);
9648 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009649 case IORING_REGISTER_RESTRICTIONS:
9650 ret = io_register_restrictions(ctx, arg, nr_args);
9651 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07009652 default:
9653 ret = -EINVAL;
9654 break;
9655 }
9656
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009657out:
Jens Axboe071698e2020-01-28 10:04:42 -07009658 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07009659 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -07009660 percpu_ref_reinit(&ctx->refs);
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009661out_quiesce:
Jens Axboe0f158b42020-05-14 17:18:39 -06009662 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -07009663 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009664 return ret;
9665}
9666
9667SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
9668 void __user *, arg, unsigned int, nr_args)
9669{
9670 struct io_ring_ctx *ctx;
9671 long ret = -EBADF;
9672 struct fd f;
9673
9674 f = fdget(fd);
9675 if (!f.file)
9676 return -EBADF;
9677
9678 ret = -EOPNOTSUPP;
9679 if (f.file->f_op != &io_uring_fops)
9680 goto out_fput;
9681
9682 ctx = f.file->private_data;
9683
9684 mutex_lock(&ctx->uring_lock);
9685 ret = __io_uring_register(ctx, opcode, arg, nr_args);
9686 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009687 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
9688 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07009689out_fput:
9690 fdput(f);
9691 return ret;
9692}
9693
Jens Axboe2b188cc2019-01-07 10:46:33 -07009694static int __init io_uring_init(void)
9695{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009696#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
9697 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
9698 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
9699} while (0)
9700
9701#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
9702 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
9703 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
9704 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
9705 BUILD_BUG_SQE_ELEM(1, __u8, flags);
9706 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
9707 BUILD_BUG_SQE_ELEM(4, __s32, fd);
9708 BUILD_BUG_SQE_ELEM(8, __u64, off);
9709 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
9710 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009711 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009712 BUILD_BUG_SQE_ELEM(24, __u32, len);
9713 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
9714 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
9715 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
9716 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +08009717 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
9718 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009719 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
9720 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
9721 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
9722 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
9723 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
9724 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
9725 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
9726 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009727 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009728 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
9729 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
9730 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009731 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009732
Jens Axboed3656342019-12-18 09:50:26 -07009733 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Jens Axboe84557872020-03-03 15:28:17 -07009734 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
Jens Axboe2b188cc2019-01-07 10:46:33 -07009735 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
9736 return 0;
9737};
9738__initcall(io_uring_init);