blob: 02894df7656dd1df1b4a13141c3089bf4ce03518 [file] [log] [blame]
Jens Axboe771b53d02019-10-22 10:25:58 -06001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Basic worker thread pool for io_uring
4 *
5 * Copyright (C) 2019 Jens Axboe
6 *
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/errno.h>
11#include <linux/sched/signal.h>
12#include <linux/mm.h>
Jens Axboe771b53d02019-10-22 10:25:58 -060013#include <linux/sched/mm.h>
14#include <linux/percpu.h>
15#include <linux/slab.h>
16#include <linux/kthread.h>
17#include <linux/rculist_nulls.h>
Jens Axboe9392a272020-02-06 21:42:51 -070018#include <linux/fs_struct.h>
Jens Axboeaa96bf82020-04-03 11:26:26 -060019#include <linux/task_work.h>
Dennis Zhou91d8f512020-09-16 13:41:05 -070020#include <linux/blk-cgroup.h>
Jens Axboe4ea33a92020-10-15 13:46:44 -060021#include <linux/audit.h>
Jens Axboe43c01fb2020-10-22 09:02:50 -060022#include <linux/cpu.h>
Jens Axboe771b53d02019-10-22 10:25:58 -060023
Jens Axboe43c01fb2020-10-22 09:02:50 -060024#include "../kernel/sched/sched.h"
Jens Axboe771b53d02019-10-22 10:25:58 -060025#include "io-wq.h"
26
27#define WORKER_IDLE_TIMEOUT (5 * HZ)
28
29enum {
30 IO_WORKER_F_UP = 1, /* up and active */
31 IO_WORKER_F_RUNNING = 2, /* account as running */
32 IO_WORKER_F_FREE = 4, /* worker on free list */
Jens Axboe145cc8c2020-09-26 12:37:46 -060033 IO_WORKER_F_FIXED = 8, /* static idle worker */
34 IO_WORKER_F_BOUND = 16, /* is doing bounded work */
Jens Axboe771b53d02019-10-22 10:25:58 -060035};
36
37enum {
38 IO_WQ_BIT_EXIT = 0, /* wq exiting */
39 IO_WQ_BIT_CANCEL = 1, /* cancel work on list */
Jens Axboeb60fda62019-11-19 08:37:07 -070040 IO_WQ_BIT_ERROR = 2, /* error on setup */
Jens Axboe771b53d02019-10-22 10:25:58 -060041};
42
43enum {
44 IO_WQE_FLAG_STALLED = 1, /* stalled on hash */
45};
46
47/*
48 * One for each thread in a wqe pool
49 */
50struct io_worker {
51 refcount_t ref;
52 unsigned flags;
53 struct hlist_nulls_node nulls_node;
Jens Axboee61df662019-11-13 13:54:49 -070054 struct list_head all_list;
Jens Axboe771b53d02019-10-22 10:25:58 -060055 struct task_struct *task;
Jens Axboe771b53d02019-10-22 10:25:58 -060056 struct io_wqe *wqe;
Jens Axboe36c2f922019-11-13 09:43:34 -070057
Jens Axboe771b53d02019-10-22 10:25:58 -060058 struct io_wq_work *cur_work;
Jens Axboe36c2f922019-11-13 09:43:34 -070059 spinlock_t lock;
Jens Axboe771b53d02019-10-22 10:25:58 -060060
61 struct rcu_head rcu;
62 struct mm_struct *mm;
Dennis Zhou91d8f512020-09-16 13:41:05 -070063#ifdef CONFIG_BLK_CGROUP
64 struct cgroup_subsys_state *blkcg_css;
65#endif
Jens Axboecccf0ee2020-01-27 16:34:48 -070066 const struct cred *cur_creds;
67 const struct cred *saved_creds;
Jens Axboefcb323c2019-10-24 12:39:47 -060068 struct files_struct *restore_files;
Jens Axboe9b828492020-09-18 20:13:06 -060069 struct nsproxy *restore_nsproxy;
Jens Axboe9392a272020-02-06 21:42:51 -070070 struct fs_struct *restore_fs;
Jens Axboe771b53d02019-10-22 10:25:58 -060071};
72
Jens Axboe771b53d02019-10-22 10:25:58 -060073#if BITS_PER_LONG == 64
74#define IO_WQ_HASH_ORDER 6
75#else
76#define IO_WQ_HASH_ORDER 5
77#endif
78
Pavel Begunkov86f3cd12020-03-23 22:57:22 +030079#define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
80
Jens Axboec5def4a2019-11-07 11:41:16 -070081struct io_wqe_acct {
82 unsigned nr_workers;
83 unsigned max_workers;
84 atomic_t nr_running;
85};
86
87enum {
88 IO_WQ_ACCT_BOUND,
89 IO_WQ_ACCT_UNBOUND,
90};
91
Jens Axboe771b53d02019-10-22 10:25:58 -060092/*
93 * Per-node worker thread pool
94 */
95struct io_wqe {
96 struct {
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +020097 raw_spinlock_t lock;
Jens Axboe6206f0e2019-11-26 11:59:32 -070098 struct io_wq_work_list work_list;
Jens Axboe771b53d02019-10-22 10:25:58 -060099 unsigned long hash_map;
100 unsigned flags;
101 } ____cacheline_aligned_in_smp;
102
103 int node;
Jens Axboec5def4a2019-11-07 11:41:16 -0700104 struct io_wqe_acct acct[2];
Jens Axboe771b53d02019-10-22 10:25:58 -0600105
Jens Axboe021d1cd2019-11-14 08:00:41 -0700106 struct hlist_nulls_head free_list;
Jens Axboee61df662019-11-13 13:54:49 -0700107 struct list_head all_list;
Jens Axboe771b53d02019-10-22 10:25:58 -0600108
109 struct io_wq *wq;
Pavel Begunkov86f3cd12020-03-23 22:57:22 +0300110 struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
Jens Axboe771b53d02019-10-22 10:25:58 -0600111};
112
113/*
114 * Per io_wq state
115 */
116struct io_wq {
117 struct io_wqe **wqes;
118 unsigned long state;
Jens Axboe771b53d02019-10-22 10:25:58 -0600119
Pavel Begunkove9fd9392020-03-04 16:14:12 +0300120 free_work_fn *free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +0300121 io_wq_work_fn *do_work;
Jens Axboe7d723062019-11-12 22:31:31 -0700122
Jens Axboe771b53d02019-10-22 10:25:58 -0600123 struct task_struct *manager;
Jens Axboec5def4a2019-11-07 11:41:16 -0700124 struct user_struct *user;
Jens Axboe771b53d02019-10-22 10:25:58 -0600125 refcount_t refs;
126 struct completion done;
Jens Axboe848f7e12020-01-23 15:33:32 -0700127
Jens Axboe43c01fb2020-10-22 09:02:50 -0600128 struct hlist_node cpuhp_node;
129
Jens Axboe848f7e12020-01-23 15:33:32 -0700130 refcount_t use_refs;
Jens Axboe771b53d02019-10-22 10:25:58 -0600131};
132
Jens Axboe43c01fb2020-10-22 09:02:50 -0600133static enum cpuhp_state io_wq_online;
134
Jens Axboe771b53d02019-10-22 10:25:58 -0600135static bool io_worker_get(struct io_worker *worker)
136{
137 return refcount_inc_not_zero(&worker->ref);
138}
139
140static void io_worker_release(struct io_worker *worker)
141{
142 if (refcount_dec_and_test(&worker->ref))
143 wake_up_process(worker->task);
144}
145
146/*
147 * Note: drops the wqe->lock if returning true! The caller must re-acquire
148 * the lock in that case. Some callers need to restart handling if this
149 * happens, so we can't just re-acquire the lock on behalf of the caller.
150 */
151static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
152{
Jens Axboefcb323c2019-10-24 12:39:47 -0600153 bool dropped_lock = false;
154
Jens Axboecccf0ee2020-01-27 16:34:48 -0700155 if (worker->saved_creds) {
156 revert_creds(worker->saved_creds);
157 worker->cur_creds = worker->saved_creds = NULL;
Jens Axboe181e4482019-11-25 08:52:30 -0700158 }
159
Jens Axboefcb323c2019-10-24 12:39:47 -0600160 if (current->files != worker->restore_files) {
161 __acquire(&wqe->lock);
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200162 raw_spin_unlock_irq(&wqe->lock);
Jens Axboefcb323c2019-10-24 12:39:47 -0600163 dropped_lock = true;
164
165 task_lock(current);
166 current->files = worker->restore_files;
Jens Axboe9b828492020-09-18 20:13:06 -0600167 current->nsproxy = worker->restore_nsproxy;
Jens Axboefcb323c2019-10-24 12:39:47 -0600168 task_unlock(current);
169 }
170
Jens Axboe9392a272020-02-06 21:42:51 -0700171 if (current->fs != worker->restore_fs)
172 current->fs = worker->restore_fs;
173
Jens Axboe771b53d02019-10-22 10:25:58 -0600174 /*
175 * If we have an active mm, we need to drop the wq lock before unusing
176 * it. If we do, return true and let the caller retry the idle loop.
177 */
178 if (worker->mm) {
Jens Axboefcb323c2019-10-24 12:39:47 -0600179 if (!dropped_lock) {
180 __acquire(&wqe->lock);
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200181 raw_spin_unlock_irq(&wqe->lock);
Jens Axboefcb323c2019-10-24 12:39:47 -0600182 dropped_lock = true;
183 }
Jens Axboe771b53d02019-10-22 10:25:58 -0600184 __set_current_state(TASK_RUNNING);
Christoph Hellwigf5678e72020-06-10 18:42:06 -0700185 kthread_unuse_mm(worker->mm);
Jens Axboe771b53d02019-10-22 10:25:58 -0600186 mmput(worker->mm);
187 worker->mm = NULL;
Jens Axboe771b53d02019-10-22 10:25:58 -0600188 }
189
Dennis Zhou91d8f512020-09-16 13:41:05 -0700190#ifdef CONFIG_BLK_CGROUP
191 if (worker->blkcg_css) {
192 kthread_associate_blkcg(NULL);
193 worker->blkcg_css = NULL;
194 }
195#endif
Jens Axboe69228332020-10-20 14:28:41 -0600196 if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY)
197 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
Jens Axboefcb323c2019-10-24 12:39:47 -0600198 return dropped_lock;
Jens Axboe771b53d02019-10-22 10:25:58 -0600199}
200
Jens Axboec5def4a2019-11-07 11:41:16 -0700201static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
202 struct io_wq_work *work)
203{
204 if (work->flags & IO_WQ_WORK_UNBOUND)
205 return &wqe->acct[IO_WQ_ACCT_UNBOUND];
206
207 return &wqe->acct[IO_WQ_ACCT_BOUND];
208}
209
210static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe,
211 struct io_worker *worker)
212{
213 if (worker->flags & IO_WORKER_F_BOUND)
214 return &wqe->acct[IO_WQ_ACCT_BOUND];
215
216 return &wqe->acct[IO_WQ_ACCT_UNBOUND];
217}
218
Jens Axboe771b53d02019-10-22 10:25:58 -0600219static void io_worker_exit(struct io_worker *worker)
220{
221 struct io_wqe *wqe = worker->wqe;
Jens Axboec5def4a2019-11-07 11:41:16 -0700222 struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
Jens Axboe771b53d02019-10-22 10:25:58 -0600223
224 /*
225 * If we're not at zero, someone else is holding a brief reference
226 * to the worker. Wait for that to go away.
227 */
228 set_current_state(TASK_INTERRUPTIBLE);
229 if (!refcount_dec_and_test(&worker->ref))
230 schedule();
231 __set_current_state(TASK_RUNNING);
232
233 preempt_disable();
234 current->flags &= ~PF_IO_WORKER;
235 if (worker->flags & IO_WORKER_F_RUNNING)
Jens Axboec5def4a2019-11-07 11:41:16 -0700236 atomic_dec(&acct->nr_running);
237 if (!(worker->flags & IO_WORKER_F_BOUND))
238 atomic_dec(&wqe->wq->user->processes);
Jens Axboe771b53d02019-10-22 10:25:58 -0600239 worker->flags = 0;
240 preempt_enable();
241
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200242 raw_spin_lock_irq(&wqe->lock);
Jens Axboe771b53d02019-10-22 10:25:58 -0600243 hlist_nulls_del_rcu(&worker->nulls_node);
Jens Axboee61df662019-11-13 13:54:49 -0700244 list_del_rcu(&worker->all_list);
Jens Axboe771b53d02019-10-22 10:25:58 -0600245 if (__io_worker_unuse(wqe, worker)) {
246 __release(&wqe->lock);
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200247 raw_spin_lock_irq(&wqe->lock);
Jens Axboe771b53d02019-10-22 10:25:58 -0600248 }
Jens Axboec5def4a2019-11-07 11:41:16 -0700249 acct->nr_workers--;
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200250 raw_spin_unlock_irq(&wqe->lock);
Jens Axboe771b53d02019-10-22 10:25:58 -0600251
YueHaibing364b05f2019-11-02 15:55:01 +0800252 kfree_rcu(worker, rcu);
Hillf Dantonc4068bf2020-09-26 21:26:55 +0800253 if (refcount_dec_and_test(&wqe->wq->refs))
254 complete(&wqe->wq->done);
Jens Axboe771b53d02019-10-22 10:25:58 -0600255}
256
Jens Axboec5def4a2019-11-07 11:41:16 -0700257static inline bool io_wqe_run_queue(struct io_wqe *wqe)
258 __must_hold(wqe->lock)
259{
Jens Axboe6206f0e2019-11-26 11:59:32 -0700260 if (!wq_list_empty(&wqe->work_list) &&
261 !(wqe->flags & IO_WQE_FLAG_STALLED))
Jens Axboec5def4a2019-11-07 11:41:16 -0700262 return true;
263 return false;
264}
265
266/*
267 * Check head of free list for an available worker. If one isn't available,
268 * caller must wake up the wq manager to create one.
269 */
270static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
271 __must_hold(RCU)
272{
273 struct hlist_nulls_node *n;
274 struct io_worker *worker;
275
Jens Axboe021d1cd2019-11-14 08:00:41 -0700276 n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
Jens Axboec5def4a2019-11-07 11:41:16 -0700277 if (is_a_nulls(n))
278 return false;
279
280 worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
281 if (io_worker_get(worker)) {
Jens Axboe506d95f2019-12-07 21:03:59 -0700282 wake_up_process(worker->task);
Jens Axboec5def4a2019-11-07 11:41:16 -0700283 io_worker_release(worker);
284 return true;
285 }
286
287 return false;
288}
289
290/*
291 * We need a worker. If we find a free one, we're good. If not, and we're
292 * below the max number of workers, wake up the manager to create one.
293 */
294static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
295{
296 bool ret;
297
298 /*
299 * Most likely an attempt to queue unbounded work on an io_wq that
300 * wasn't setup with any unbounded workers.
301 */
302 WARN_ON_ONCE(!acct->max_workers);
303
304 rcu_read_lock();
305 ret = io_wqe_activate_free_worker(wqe);
306 rcu_read_unlock();
307
308 if (!ret && acct->nr_workers < acct->max_workers)
309 wake_up_process(wqe->wq->manager);
310}
311
312static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker)
313{
314 struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
315
316 atomic_inc(&acct->nr_running);
317}
318
319static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker)
320 __must_hold(wqe->lock)
321{
322 struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
323
324 if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
325 io_wqe_wake_worker(wqe, acct);
326}
327
Jens Axboe771b53d02019-10-22 10:25:58 -0600328static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
329{
330 allow_kernel_signal(SIGINT);
331
332 current->flags |= PF_IO_WORKER;
333
334 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
Jens Axboefcb323c2019-10-24 12:39:47 -0600335 worker->restore_files = current->files;
Jens Axboe9b828492020-09-18 20:13:06 -0600336 worker->restore_nsproxy = current->nsproxy;
Jens Axboe9392a272020-02-06 21:42:51 -0700337 worker->restore_fs = current->fs;
Jens Axboec5def4a2019-11-07 11:41:16 -0700338 io_wqe_inc_running(wqe, worker);
Jens Axboe771b53d02019-10-22 10:25:58 -0600339}
340
341/*
342 * Worker will start processing some work. Move it to the busy list, if
343 * it's currently on the freelist
344 */
345static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
346 struct io_wq_work *work)
347 __must_hold(wqe->lock)
348{
Jens Axboec5def4a2019-11-07 11:41:16 -0700349 bool worker_bound, work_bound;
350
Jens Axboe771b53d02019-10-22 10:25:58 -0600351 if (worker->flags & IO_WORKER_F_FREE) {
352 worker->flags &= ~IO_WORKER_F_FREE;
353 hlist_nulls_del_init_rcu(&worker->nulls_node);
Jens Axboe771b53d02019-10-22 10:25:58 -0600354 }
Jens Axboec5def4a2019-11-07 11:41:16 -0700355
356 /*
357 * If worker is moving from bound to unbound (or vice versa), then
358 * ensure we update the running accounting.
359 */
Dan Carpenterb2e9c7d62019-11-19 09:22:16 +0300360 worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
361 work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
362 if (worker_bound != work_bound) {
Jens Axboec5def4a2019-11-07 11:41:16 -0700363 io_wqe_dec_running(wqe, worker);
364 if (work_bound) {
365 worker->flags |= IO_WORKER_F_BOUND;
366 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
367 wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
368 atomic_dec(&wqe->wq->user->processes);
369 } else {
370 worker->flags &= ~IO_WORKER_F_BOUND;
371 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
372 wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
373 atomic_inc(&wqe->wq->user->processes);
374 }
375 io_wqe_inc_running(wqe, worker);
376 }
Jens Axboe771b53d02019-10-22 10:25:58 -0600377}
378
379/*
380 * No work, worker going to sleep. Move to freelist, and unuse mm if we
381 * have one attached. Dropping the mm may potentially sleep, so we drop
382 * the lock in that case and return success. Since the caller has to
383 * retry the loop in that case (we changed task state), we don't regrab
384 * the lock if we return success.
385 */
386static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
387 __must_hold(wqe->lock)
388{
389 if (!(worker->flags & IO_WORKER_F_FREE)) {
390 worker->flags |= IO_WORKER_F_FREE;
Jens Axboe021d1cd2019-11-14 08:00:41 -0700391 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
Jens Axboe771b53d02019-10-22 10:25:58 -0600392 }
393
394 return __io_worker_unuse(wqe, worker);
395}
396
Pavel Begunkov60cf46a2020-03-14 00:31:05 +0300397static inline unsigned int io_get_work_hash(struct io_wq_work *work)
398{
399 return work->flags >> IO_WQ_HASH_SHIFT;
400}
401
402static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
Jens Axboe771b53d02019-10-22 10:25:58 -0600403 __must_hold(wqe->lock)
404{
Jens Axboe6206f0e2019-11-26 11:59:32 -0700405 struct io_wq_work_node *node, *prev;
Pavel Begunkov86f3cd12020-03-23 22:57:22 +0300406 struct io_wq_work *work, *tail;
Pavel Begunkov60cf46a2020-03-14 00:31:05 +0300407 unsigned int hash;
Jens Axboe771b53d02019-10-22 10:25:58 -0600408
Jens Axboe6206f0e2019-11-26 11:59:32 -0700409 wq_list_for_each(node, prev, &wqe->work_list) {
410 work = container_of(node, struct io_wq_work, list);
411
Jens Axboe771b53d02019-10-22 10:25:58 -0600412 /* not hashed, can run anytime */
Pavel Begunkov8766dd52020-03-14 00:31:04 +0300413 if (!io_wq_is_hashed(work)) {
Pavel Begunkov86f3cd12020-03-23 22:57:22 +0300414 wq_list_del(&wqe->work_list, node, prev);
Jens Axboe771b53d02019-10-22 10:25:58 -0600415 return work;
416 }
417
418 /* hashed, can run if not already running */
Pavel Begunkov60cf46a2020-03-14 00:31:05 +0300419 hash = io_get_work_hash(work);
420 if (!(wqe->hash_map & BIT(hash))) {
421 wqe->hash_map |= BIT(hash);
Pavel Begunkov86f3cd12020-03-23 22:57:22 +0300422 /* all items with this hash lie in [work, tail] */
423 tail = wqe->hash_tail[hash];
424 wqe->hash_tail[hash] = NULL;
425 wq_list_cut(&wqe->work_list, &tail->list, prev);
Jens Axboe771b53d02019-10-22 10:25:58 -0600426 return work;
427 }
428 }
429
430 return NULL;
431}
432
Jens Axboecccf0ee2020-01-27 16:34:48 -0700433static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
434{
435 if (worker->mm) {
Christoph Hellwigf5678e72020-06-10 18:42:06 -0700436 kthread_unuse_mm(worker->mm);
Jens Axboecccf0ee2020-01-27 16:34:48 -0700437 mmput(worker->mm);
438 worker->mm = NULL;
439 }
Christoph Hellwig37c54f92020-06-10 18:42:10 -0700440
Jens Axboe98447d62020-10-14 10:48:51 -0600441 if (mmget_not_zero(work->identity->mm)) {
442 kthread_use_mm(work->identity->mm);
443 worker->mm = work->identity->mm;
Jens Axboecccf0ee2020-01-27 16:34:48 -0700444 return;
445 }
446
447 /* failed grabbing mm, ensure work gets cancelled */
448 work->flags |= IO_WQ_WORK_CANCEL;
449}
450
Dennis Zhou91d8f512020-09-16 13:41:05 -0700451static inline void io_wq_switch_blkcg(struct io_worker *worker,
452 struct io_wq_work *work)
453{
454#ifdef CONFIG_BLK_CGROUP
Jens Axboe0f203762020-10-14 09:23:55 -0600455 if (!(work->flags & IO_WQ_WORK_BLKCG))
456 return;
Jens Axboe98447d62020-10-14 10:48:51 -0600457 if (work->identity->blkcg_css != worker->blkcg_css) {
458 kthread_associate_blkcg(work->identity->blkcg_css);
459 worker->blkcg_css = work->identity->blkcg_css;
Dennis Zhou91d8f512020-09-16 13:41:05 -0700460 }
461#endif
462}
463
Jens Axboecccf0ee2020-01-27 16:34:48 -0700464static void io_wq_switch_creds(struct io_worker *worker,
465 struct io_wq_work *work)
466{
Jens Axboe98447d62020-10-14 10:48:51 -0600467 const struct cred *old_creds = override_creds(work->identity->creds);
Jens Axboecccf0ee2020-01-27 16:34:48 -0700468
Jens Axboe98447d62020-10-14 10:48:51 -0600469 worker->cur_creds = work->identity->creds;
Jens Axboecccf0ee2020-01-27 16:34:48 -0700470 if (worker->saved_creds)
471 put_cred(old_creds); /* creds set by previous switch */
472 else
473 worker->saved_creds = old_creds;
474}
475
Pavel Begunkovdc026a72020-03-04 16:14:09 +0300476static void io_impersonate_work(struct io_worker *worker,
477 struct io_wq_work *work)
478{
Jens Axboe98447d62020-10-14 10:48:51 -0600479 if ((work->flags & IO_WQ_WORK_FILES) &&
480 current->files != work->identity->files) {
Pavel Begunkovdc026a72020-03-04 16:14:09 +0300481 task_lock(current);
Jens Axboe98447d62020-10-14 10:48:51 -0600482 current->files = work->identity->files;
483 current->nsproxy = work->identity->nsproxy;
Pavel Begunkovdc026a72020-03-04 16:14:09 +0300484 task_unlock(current);
485 }
Jens Axboe98447d62020-10-14 10:48:51 -0600486 if ((work->flags & IO_WQ_WORK_FS) && current->fs != work->identity->fs)
487 current->fs = work->identity->fs;
488 if ((work->flags & IO_WQ_WORK_MM) && work->identity->mm != worker->mm)
Pavel Begunkovdc026a72020-03-04 16:14:09 +0300489 io_wq_switch_mm(worker, work);
Jens Axboe98447d62020-10-14 10:48:51 -0600490 if ((work->flags & IO_WQ_WORK_CREDS) &&
491 worker->cur_creds != work->identity->creds)
Pavel Begunkovdc026a72020-03-04 16:14:09 +0300492 io_wq_switch_creds(worker, work);
Jens Axboe69228332020-10-20 14:28:41 -0600493 if (work->flags & IO_WQ_WORK_FSIZE)
494 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->identity->fsize;
495 else if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY)
496 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
Dennis Zhou91d8f512020-09-16 13:41:05 -0700497 io_wq_switch_blkcg(worker, work);
Jens Axboe4ea33a92020-10-15 13:46:44 -0600498#ifdef CONFIG_AUDIT
499 current->loginuid = work->identity->loginuid;
500 current->sessionid = work->identity->sessionid;
501#endif
Pavel Begunkovdc026a72020-03-04 16:14:09 +0300502}
503
504static void io_assign_current_work(struct io_worker *worker,
505 struct io_wq_work *work)
506{
Pavel Begunkovd78298e2020-03-14 00:31:03 +0300507 if (work) {
508 /* flush pending signals before assigning new work */
509 if (signal_pending(current))
510 flush_signals(current);
511 cond_resched();
512 }
Pavel Begunkovdc026a72020-03-04 16:14:09 +0300513
Jens Axboe4ea33a92020-10-15 13:46:44 -0600514#ifdef CONFIG_AUDIT
515 current->loginuid = KUIDT_INIT(AUDIT_UID_UNSET);
516 current->sessionid = AUDIT_SID_UNSET;
517#endif
518
Pavel Begunkovdc026a72020-03-04 16:14:09 +0300519 spin_lock_irq(&worker->lock);
520 worker->cur_work = work;
521 spin_unlock_irq(&worker->lock);
522}
523
Pavel Begunkov60cf46a2020-03-14 00:31:05 +0300524static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
525
Jens Axboe771b53d02019-10-22 10:25:58 -0600526static void io_worker_handle_work(struct io_worker *worker)
527 __releases(wqe->lock)
528{
Jens Axboe771b53d02019-10-22 10:25:58 -0600529 struct io_wqe *wqe = worker->wqe;
530 struct io_wq *wq = wqe->wq;
531
532 do {
Pavel Begunkov86f3cd12020-03-23 22:57:22 +0300533 struct io_wq_work *work;
Pavel Begunkovf462fd32020-03-04 16:14:11 +0300534get_next:
Jens Axboe771b53d02019-10-22 10:25:58 -0600535 /*
Jens Axboe771b53d02019-10-22 10:25:58 -0600536 * If we got some work, mark us as busy. If we didn't, but
537 * the list isn't empty, it means we stalled on hashed work.
538 * Mark us stalled so we don't keep looking for work when we
539 * can't make progress, any work completion or insertion will
540 * clear the stalled flag.
541 */
Pavel Begunkov60cf46a2020-03-14 00:31:05 +0300542 work = io_get_next_work(wqe);
Jens Axboe771b53d02019-10-22 10:25:58 -0600543 if (work)
544 __io_worker_busy(wqe, worker, work);
Jens Axboe6206f0e2019-11-26 11:59:32 -0700545 else if (!wq_list_empty(&wqe->work_list))
Jens Axboe771b53d02019-10-22 10:25:58 -0600546 wqe->flags |= IO_WQE_FLAG_STALLED;
547
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200548 raw_spin_unlock_irq(&wqe->lock);
Jens Axboe771b53d02019-10-22 10:25:58 -0600549 if (!work)
550 break;
Pavel Begunkov58e39312020-03-04 16:14:10 +0300551 io_assign_current_work(worker, work);
Jens Axboe36c2f922019-11-13 09:43:34 -0700552
Pavel Begunkovdc026a72020-03-04 16:14:09 +0300553 /* handle a whole dependent link */
554 do {
Pavel Begunkov86f3cd12020-03-23 22:57:22 +0300555 struct io_wq_work *old_work, *next_hashed, *linked;
Pavel Begunkovb089ed392020-07-25 14:42:00 +0300556 unsigned int hash = io_get_work_hash(work);
Hillf Dantonfd1c4bc2019-12-24 09:14:29 -0700557
Pavel Begunkov86f3cd12020-03-23 22:57:22 +0300558 next_hashed = wq_next_work(work);
Pavel Begunkov58e39312020-03-04 16:14:10 +0300559 io_impersonate_work(worker, work);
Pavel Begunkovdc026a72020-03-04 16:14:09 +0300560 /*
561 * OK to set IO_WQ_WORK_CANCEL even for uncancellable
562 * work, the worker function will do the right thing.
563 */
564 if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
565 work->flags |= IO_WQ_WORK_CANCEL;
Jens Axboe36c2f922019-11-13 09:43:34 -0700566
Pavel Begunkovf4db7182020-06-25 18:20:54 +0300567 old_work = work;
568 linked = wq->do_work(work);
Pavel Begunkovf2cf1142020-03-22 19:14:26 +0300569
Pavel Begunkov86f3cd12020-03-23 22:57:22 +0300570 work = next_hashed;
571 if (!work && linked && !io_wq_is_hashed(linked)) {
572 work = linked;
573 linked = NULL;
574 }
575 io_assign_current_work(worker, work);
Pavel Begunkove9fd9392020-03-04 16:14:12 +0300576 wq->free_work(old_work);
Pavel Begunkovdc026a72020-03-04 16:14:09 +0300577
Pavel Begunkov86f3cd12020-03-23 22:57:22 +0300578 if (linked)
579 io_wqe_enqueue(wqe, linked);
580
581 if (hash != -1U && !next_hashed) {
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200582 raw_spin_lock_irq(&wqe->lock);
Pavel Begunkovdc026a72020-03-04 16:14:09 +0300583 wqe->hash_map &= ~BIT_ULL(hash);
584 wqe->flags &= ~IO_WQE_FLAG_STALLED;
Pavel Begunkovf462fd32020-03-04 16:14:11 +0300585 /* skip unnecessary unlock-lock wqe->lock */
586 if (!work)
587 goto get_next;
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200588 raw_spin_unlock_irq(&wqe->lock);
Pavel Begunkovdc026a72020-03-04 16:14:09 +0300589 }
Pavel Begunkov58e39312020-03-04 16:14:10 +0300590 } while (work);
Jens Axboe36c2f922019-11-13 09:43:34 -0700591
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200592 raw_spin_lock_irq(&wqe->lock);
Jens Axboe771b53d02019-10-22 10:25:58 -0600593 } while (1);
594}
595
Jens Axboe771b53d02019-10-22 10:25:58 -0600596static int io_wqe_worker(void *data)
597{
598 struct io_worker *worker = data;
599 struct io_wqe *wqe = worker->wqe;
600 struct io_wq *wq = wqe->wq;
Jens Axboe771b53d02019-10-22 10:25:58 -0600601
602 io_worker_start(wqe, worker);
603
604 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
Jens Axboe506d95f2019-12-07 21:03:59 -0700605 set_current_state(TASK_INTERRUPTIBLE);
Jens Axboee995d512019-12-07 21:06:46 -0700606loop:
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200607 raw_spin_lock_irq(&wqe->lock);
Jens Axboe771b53d02019-10-22 10:25:58 -0600608 if (io_wqe_run_queue(wqe)) {
609 __set_current_state(TASK_RUNNING);
610 io_worker_handle_work(worker);
Jens Axboee995d512019-12-07 21:06:46 -0700611 goto loop;
Jens Axboe771b53d02019-10-22 10:25:58 -0600612 }
613 /* drops the lock on success, retry */
614 if (__io_worker_idle(wqe, worker)) {
615 __release(&wqe->lock);
Jens Axboee995d512019-12-07 21:06:46 -0700616 goto loop;
Jens Axboe771b53d02019-10-22 10:25:58 -0600617 }
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200618 raw_spin_unlock_irq(&wqe->lock);
Jens Axboe771b53d02019-10-22 10:25:58 -0600619 if (signal_pending(current))
620 flush_signals(current);
621 if (schedule_timeout(WORKER_IDLE_TIMEOUT))
622 continue;
623 /* timed out, exit unless we're the fixed worker */
624 if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
625 !(worker->flags & IO_WORKER_F_FIXED))
626 break;
627 }
628
Jens Axboe771b53d02019-10-22 10:25:58 -0600629 if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200630 raw_spin_lock_irq(&wqe->lock);
Jens Axboe6206f0e2019-11-26 11:59:32 -0700631 if (!wq_list_empty(&wqe->work_list))
Jens Axboe771b53d02019-10-22 10:25:58 -0600632 io_worker_handle_work(worker);
633 else
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200634 raw_spin_unlock_irq(&wqe->lock);
Jens Axboe771b53d02019-10-22 10:25:58 -0600635 }
636
637 io_worker_exit(worker);
638 return 0;
639}
640
641/*
Jens Axboe771b53d02019-10-22 10:25:58 -0600642 * Called when a worker is scheduled in. Mark us as currently running.
643 */
644void io_wq_worker_running(struct task_struct *tsk)
645{
646 struct io_worker *worker = kthread_data(tsk);
647 struct io_wqe *wqe = worker->wqe;
648
649 if (!(worker->flags & IO_WORKER_F_UP))
650 return;
651 if (worker->flags & IO_WORKER_F_RUNNING)
652 return;
653 worker->flags |= IO_WORKER_F_RUNNING;
Jens Axboec5def4a2019-11-07 11:41:16 -0700654 io_wqe_inc_running(wqe, worker);
Jens Axboe771b53d02019-10-22 10:25:58 -0600655}
656
657/*
658 * Called when worker is going to sleep. If there are no workers currently
659 * running and we have work pending, wake up a free one or have the manager
660 * set one up.
661 */
662void io_wq_worker_sleeping(struct task_struct *tsk)
663{
664 struct io_worker *worker = kthread_data(tsk);
665 struct io_wqe *wqe = worker->wqe;
666
667 if (!(worker->flags & IO_WORKER_F_UP))
668 return;
669 if (!(worker->flags & IO_WORKER_F_RUNNING))
670 return;
671
672 worker->flags &= ~IO_WORKER_F_RUNNING;
673
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200674 raw_spin_lock_irq(&wqe->lock);
Jens Axboec5def4a2019-11-07 11:41:16 -0700675 io_wqe_dec_running(wqe, worker);
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200676 raw_spin_unlock_irq(&wqe->lock);
Jens Axboe771b53d02019-10-22 10:25:58 -0600677}
678
Jens Axboeb60fda62019-11-19 08:37:07 -0700679static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
Jens Axboe771b53d02019-10-22 10:25:58 -0600680{
Hillf Dantonc4068bf2020-09-26 21:26:55 +0800681 struct io_wqe_acct *acct = &wqe->acct[index];
Jens Axboe771b53d02019-10-22 10:25:58 -0600682 struct io_worker *worker;
683
Jann Hornad6e0052019-11-26 17:39:45 +0100684 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
Jens Axboe771b53d02019-10-22 10:25:58 -0600685 if (!worker)
Jens Axboeb60fda62019-11-19 08:37:07 -0700686 return false;
Jens Axboe771b53d02019-10-22 10:25:58 -0600687
688 refcount_set(&worker->ref, 1);
689 worker->nulls_node.pprev = NULL;
Jens Axboe771b53d02019-10-22 10:25:58 -0600690 worker->wqe = wqe;
Jens Axboe36c2f922019-11-13 09:43:34 -0700691 spin_lock_init(&worker->lock);
Jens Axboe771b53d02019-10-22 10:25:58 -0600692
693 worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node,
Jens Axboec5def4a2019-11-07 11:41:16 -0700694 "io_wqe_worker-%d/%d", index, wqe->node);
Jens Axboe771b53d02019-10-22 10:25:58 -0600695 if (IS_ERR(worker->task)) {
696 kfree(worker);
Jens Axboeb60fda62019-11-19 08:37:07 -0700697 return false;
Jens Axboe771b53d02019-10-22 10:25:58 -0600698 }
Jens Axboea8b595b2020-10-15 10:13:07 -0600699 kthread_bind_mask(worker->task, cpumask_of_node(wqe->node));
Jens Axboe771b53d02019-10-22 10:25:58 -0600700
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200701 raw_spin_lock_irq(&wqe->lock);
Jens Axboe021d1cd2019-11-14 08:00:41 -0700702 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
Jens Axboee61df662019-11-13 13:54:49 -0700703 list_add_tail_rcu(&worker->all_list, &wqe->all_list);
Jens Axboe771b53d02019-10-22 10:25:58 -0600704 worker->flags |= IO_WORKER_F_FREE;
Jens Axboec5def4a2019-11-07 11:41:16 -0700705 if (index == IO_WQ_ACCT_BOUND)
706 worker->flags |= IO_WORKER_F_BOUND;
707 if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
Jens Axboe771b53d02019-10-22 10:25:58 -0600708 worker->flags |= IO_WORKER_F_FIXED;
Jens Axboec5def4a2019-11-07 11:41:16 -0700709 acct->nr_workers++;
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200710 raw_spin_unlock_irq(&wqe->lock);
Jens Axboe771b53d02019-10-22 10:25:58 -0600711
Jens Axboec5def4a2019-11-07 11:41:16 -0700712 if (index == IO_WQ_ACCT_UNBOUND)
713 atomic_inc(&wq->user->processes);
714
Hillf Dantonc4068bf2020-09-26 21:26:55 +0800715 refcount_inc(&wq->refs);
Jens Axboe771b53d02019-10-22 10:25:58 -0600716 wake_up_process(worker->task);
Jens Axboeb60fda62019-11-19 08:37:07 -0700717 return true;
Jens Axboe771b53d02019-10-22 10:25:58 -0600718}
719
Jens Axboec5def4a2019-11-07 11:41:16 -0700720static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
Jens Axboe771b53d02019-10-22 10:25:58 -0600721 __must_hold(wqe->lock)
722{
Jens Axboec5def4a2019-11-07 11:41:16 -0700723 struct io_wqe_acct *acct = &wqe->acct[index];
Jens Axboe771b53d02019-10-22 10:25:58 -0600724
Jens Axboec5def4a2019-11-07 11:41:16 -0700725 /* if we have available workers or no work, no need */
Jens Axboe021d1cd2019-11-14 08:00:41 -0700726 if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
Jens Axboec5def4a2019-11-07 11:41:16 -0700727 return false;
728 return acct->nr_workers < acct->max_workers;
Jens Axboe771b53d02019-10-22 10:25:58 -0600729}
730
Hillf Dantonc4068bf2020-09-26 21:26:55 +0800731static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
732{
733 send_sig(SIGINT, worker->task, 1);
734 return false;
735}
736
737/*
738 * Iterate the passed in list and call the specific function for each
739 * worker that isn't exiting
740 */
741static bool io_wq_for_each_worker(struct io_wqe *wqe,
742 bool (*func)(struct io_worker *, void *),
743 void *data)
744{
745 struct io_worker *worker;
746 bool ret = false;
747
748 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
749 if (io_worker_get(worker)) {
750 /* no task if node is/was offline */
751 if (worker->task)
752 ret = func(worker, data);
753 io_worker_release(worker);
754 if (ret)
755 break;
756 }
757 }
758
759 return ret;
760}
761
762static bool io_wq_worker_wake(struct io_worker *worker, void *data)
763{
764 wake_up_process(worker->task);
765 return false;
766}
767
Jens Axboe771b53d02019-10-22 10:25:58 -0600768/*
769 * Manager thread. Tasked with creating new workers, if we need them.
770 */
771static int io_wq_manager(void *data)
772{
773 struct io_wq *wq = data;
Jann Horn3fc50ab2019-11-26 19:10:20 +0100774 int node;
Jens Axboeb60fda62019-11-19 08:37:07 -0700775
776 /* create fixed workers */
Hillf Dantonc4068bf2020-09-26 21:26:55 +0800777 refcount_set(&wq->refs, 1);
Jann Horn3fc50ab2019-11-26 19:10:20 +0100778 for_each_node(node) {
Jens Axboe75634392020-02-11 06:30:06 -0700779 if (!node_online(node))
780 continue;
Hillf Dantonc4068bf2020-09-26 21:26:55 +0800781 if (create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
782 continue;
783 set_bit(IO_WQ_BIT_ERROR, &wq->state);
784 set_bit(IO_WQ_BIT_EXIT, &wq->state);
785 goto out;
Jens Axboeb60fda62019-11-19 08:37:07 -0700786 }
787
788 complete(&wq->done);
Jens Axboe771b53d02019-10-22 10:25:58 -0600789
790 while (!kthread_should_stop()) {
Jens Axboeaa96bf82020-04-03 11:26:26 -0600791 if (current->task_works)
792 task_work_run();
793
Jann Horn3fc50ab2019-11-26 19:10:20 +0100794 for_each_node(node) {
795 struct io_wqe *wqe = wq->wqes[node];
Jens Axboec5def4a2019-11-07 11:41:16 -0700796 bool fork_worker[2] = { false, false };
Jens Axboe771b53d02019-10-22 10:25:58 -0600797
Jens Axboe75634392020-02-11 06:30:06 -0700798 if (!node_online(node))
799 continue;
800
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200801 raw_spin_lock_irq(&wqe->lock);
Jens Axboec5def4a2019-11-07 11:41:16 -0700802 if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
803 fork_worker[IO_WQ_ACCT_BOUND] = true;
804 if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
805 fork_worker[IO_WQ_ACCT_UNBOUND] = true;
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200806 raw_spin_unlock_irq(&wqe->lock);
Jens Axboec5def4a2019-11-07 11:41:16 -0700807 if (fork_worker[IO_WQ_ACCT_BOUND])
808 create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
809 if (fork_worker[IO_WQ_ACCT_UNBOUND])
810 create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
Jens Axboe771b53d02019-10-22 10:25:58 -0600811 }
812 set_current_state(TASK_INTERRUPTIBLE);
813 schedule_timeout(HZ);
814 }
815
Jens Axboeaa96bf82020-04-03 11:26:26 -0600816 if (current->task_works)
817 task_work_run();
818
Hillf Dantonc4068bf2020-09-26 21:26:55 +0800819out:
820 if (refcount_dec_and_test(&wq->refs)) {
Jens Axboeb60fda62019-11-19 08:37:07 -0700821 complete(&wq->done);
Hillf Dantonc4068bf2020-09-26 21:26:55 +0800822 return 0;
823 }
824 /* if ERROR is set and we get here, we have workers to wake */
825 if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
826 rcu_read_lock();
827 for_each_node(node)
828 io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
829 rcu_read_unlock();
830 }
Jens Axboeb60fda62019-11-19 08:37:07 -0700831 return 0;
Jens Axboe771b53d02019-10-22 10:25:58 -0600832}
833
Jens Axboec5def4a2019-11-07 11:41:16 -0700834static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
835 struct io_wq_work *work)
836{
837 bool free_worker;
838
839 if (!(work->flags & IO_WQ_WORK_UNBOUND))
840 return true;
841 if (atomic_read(&acct->nr_running))
842 return true;
843
844 rcu_read_lock();
Jens Axboe021d1cd2019-11-14 08:00:41 -0700845 free_worker = !hlist_nulls_empty(&wqe->free_list);
Jens Axboec5def4a2019-11-07 11:41:16 -0700846 rcu_read_unlock();
847 if (free_worker)
848 return true;
849
850 if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
851 !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
852 return false;
853
854 return true;
855}
856
Pavel Begunkove9fd9392020-03-04 16:14:12 +0300857static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
Pavel Begunkovfc04c392020-03-01 19:18:19 +0300858{
Pavel Begunkove9fd9392020-03-04 16:14:12 +0300859 struct io_wq *wq = wqe->wq;
860
Pavel Begunkovfc04c392020-03-01 19:18:19 +0300861 do {
862 struct io_wq_work *old_work = work;
863
864 work->flags |= IO_WQ_WORK_CANCEL;
Pavel Begunkovf4db7182020-06-25 18:20:54 +0300865 work = wq->do_work(work);
Pavel Begunkove9fd9392020-03-04 16:14:12 +0300866 wq->free_work(old_work);
Pavel Begunkovfc04c392020-03-01 19:18:19 +0300867 } while (work);
868}
869
Pavel Begunkov86f3cd12020-03-23 22:57:22 +0300870static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
871{
872 unsigned int hash;
873 struct io_wq_work *tail;
874
875 if (!io_wq_is_hashed(work)) {
876append:
877 wq_list_add_tail(&work->list, &wqe->work_list);
878 return;
879 }
880
881 hash = io_get_work_hash(work);
882 tail = wqe->hash_tail[hash];
883 wqe->hash_tail[hash] = work;
884 if (!tail)
885 goto append;
886
887 wq_list_add_after(&work->list, &tail->list, &wqe->work_list);
888}
889
Jens Axboe771b53d02019-10-22 10:25:58 -0600890static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
891{
Jens Axboec5def4a2019-11-07 11:41:16 -0700892 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
Jens Axboe895e2ca2019-12-17 08:46:33 -0700893 int work_flags;
Jens Axboe771b53d02019-10-22 10:25:58 -0600894 unsigned long flags;
895
Jens Axboec5def4a2019-11-07 11:41:16 -0700896 /*
897 * Do early check to see if we need a new unbound worker, and if we do,
898 * if we're allowed to do so. This isn't 100% accurate as there's a
899 * gap between this check and incrementing the value, but that's OK.
900 * It's close enough to not be an issue, fork() has the same delay.
901 */
902 if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
Pavel Begunkove9fd9392020-03-04 16:14:12 +0300903 io_run_cancel(work, wqe);
Jens Axboec5def4a2019-11-07 11:41:16 -0700904 return;
905 }
906
Jens Axboe895e2ca2019-12-17 08:46:33 -0700907 work_flags = work->flags;
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200908 raw_spin_lock_irqsave(&wqe->lock, flags);
Pavel Begunkov86f3cd12020-03-23 22:57:22 +0300909 io_wqe_insert_work(wqe, work);
Jens Axboe771b53d02019-10-22 10:25:58 -0600910 wqe->flags &= ~IO_WQE_FLAG_STALLED;
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +0200911 raw_spin_unlock_irqrestore(&wqe->lock, flags);
Jens Axboe771b53d02019-10-22 10:25:58 -0600912
Jens Axboe895e2ca2019-12-17 08:46:33 -0700913 if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
914 !atomic_read(&acct->nr_running))
Jens Axboec5def4a2019-11-07 11:41:16 -0700915 io_wqe_wake_worker(wqe, acct);
Jens Axboe771b53d02019-10-22 10:25:58 -0600916}
917
918void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
919{
920 struct io_wqe *wqe = wq->wqes[numa_node_id()];
921
922 io_wqe_enqueue(wqe, work);
923}
924
925/*
Pavel Begunkov8766dd52020-03-14 00:31:04 +0300926 * Work items that hash to the same value will not be done in parallel.
927 * Used to limit concurrent writes, generally hashed by inode.
Jens Axboe771b53d02019-10-22 10:25:58 -0600928 */
Pavel Begunkov8766dd52020-03-14 00:31:04 +0300929void io_wq_hash_work(struct io_wq_work *work, void *val)
Jens Axboe771b53d02019-10-22 10:25:58 -0600930{
Pavel Begunkov8766dd52020-03-14 00:31:04 +0300931 unsigned int bit;
Jens Axboe771b53d02019-10-22 10:25:58 -0600932
933 bit = hash_ptr(val, IO_WQ_HASH_ORDER);
934 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
Jens Axboe771b53d02019-10-22 10:25:58 -0600935}
936
Jens Axboe771b53d02019-10-22 10:25:58 -0600937void io_wq_cancel_all(struct io_wq *wq)
938{
Jann Horn3fc50ab2019-11-26 19:10:20 +0100939 int node;
Jens Axboe771b53d02019-10-22 10:25:58 -0600940
941 set_bit(IO_WQ_BIT_CANCEL, &wq->state);
942
Jens Axboe771b53d02019-10-22 10:25:58 -0600943 rcu_read_lock();
Jann Horn3fc50ab2019-11-26 19:10:20 +0100944 for_each_node(node) {
945 struct io_wqe *wqe = wq->wqes[node];
Jens Axboe771b53d02019-10-22 10:25:58 -0600946
Jens Axboee61df662019-11-13 13:54:49 -0700947 io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
Jens Axboe771b53d02019-10-22 10:25:58 -0600948 }
949 rcu_read_unlock();
950}
951
Jens Axboe62755e32019-10-28 21:49:21 -0600952struct io_cb_cancel_data {
Pavel Begunkov2293b412020-03-07 01:15:39 +0300953 work_cancel_fn *fn;
954 void *data;
Pavel Begunkov4f26bda2020-06-15 10:24:03 +0300955 int nr_running;
956 int nr_pending;
957 bool cancel_all;
Jens Axboe62755e32019-10-28 21:49:21 -0600958};
959
Pavel Begunkov2293b412020-03-07 01:15:39 +0300960static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
Jens Axboe62755e32019-10-28 21:49:21 -0600961{
Pavel Begunkov2293b412020-03-07 01:15:39 +0300962 struct io_cb_cancel_data *match = data;
Jens Axboe6f726532019-11-05 13:51:51 -0700963 unsigned long flags;
Jens Axboe62755e32019-10-28 21:49:21 -0600964
965 /*
966 * Hold the lock to avoid ->cur_work going out of scope, caller
Jens Axboe36c2f922019-11-13 09:43:34 -0700967 * may dereference the passed in work.
Jens Axboe62755e32019-10-28 21:49:21 -0600968 */
Jens Axboe36c2f922019-11-13 09:43:34 -0700969 spin_lock_irqsave(&worker->lock, flags);
Jens Axboe62755e32019-10-28 21:49:21 -0600970 if (worker->cur_work &&
Jens Axboe0c9d5cc2019-12-11 19:29:43 -0700971 !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
Pavel Begunkov2293b412020-03-07 01:15:39 +0300972 match->fn(worker->cur_work, match->data)) {
Jens Axboe771b53d02019-10-22 10:25:58 -0600973 send_sig(SIGINT, worker->task, 1);
Pavel Begunkov4f26bda2020-06-15 10:24:03 +0300974 match->nr_running++;
Jens Axboe771b53d02019-10-22 10:25:58 -0600975 }
Jens Axboe36c2f922019-11-13 09:43:34 -0700976 spin_unlock_irqrestore(&worker->lock, flags);
Jens Axboe771b53d02019-10-22 10:25:58 -0600977
Pavel Begunkov4f26bda2020-06-15 10:24:03 +0300978 return match->nr_running && !match->cancel_all;
Jens Axboe771b53d02019-10-22 10:25:58 -0600979}
980
Pavel Begunkov204361a2020-08-23 20:33:10 +0300981static inline void io_wqe_remove_pending(struct io_wqe *wqe,
982 struct io_wq_work *work,
983 struct io_wq_work_node *prev)
984{
985 unsigned int hash = io_get_work_hash(work);
986 struct io_wq_work *prev_work = NULL;
987
988 if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
989 if (prev)
990 prev_work = container_of(prev, struct io_wq_work, list);
991 if (prev_work && io_get_work_hash(prev_work) == hash)
992 wqe->hash_tail[hash] = prev_work;
993 else
994 wqe->hash_tail[hash] = NULL;
995 }
996 wq_list_del(&wqe->work_list, &work->list, prev);
997}
998
Pavel Begunkov4f26bda2020-06-15 10:24:03 +0300999static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
Pavel Begunkovf4c26652020-06-15 10:24:02 +03001000 struct io_cb_cancel_data *match)
Jens Axboe771b53d02019-10-22 10:25:58 -06001001{
Jens Axboe6206f0e2019-11-26 11:59:32 -07001002 struct io_wq_work_node *node, *prev;
Jens Axboe771b53d02019-10-22 10:25:58 -06001003 struct io_wq_work *work;
Jens Axboe6f726532019-11-05 13:51:51 -07001004 unsigned long flags;
Jens Axboe771b53d02019-10-22 10:25:58 -06001005
Pavel Begunkov4f26bda2020-06-15 10:24:03 +03001006retry:
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +02001007 raw_spin_lock_irqsave(&wqe->lock, flags);
Jens Axboe6206f0e2019-11-26 11:59:32 -07001008 wq_list_for_each(node, prev, &wqe->work_list) {
1009 work = container_of(node, struct io_wq_work, list);
Pavel Begunkov4f26bda2020-06-15 10:24:03 +03001010 if (!match->fn(work, match->data))
1011 continue;
Pavel Begunkov204361a2020-08-23 20:33:10 +03001012 io_wqe_remove_pending(wqe, work, prev);
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +02001013 raw_spin_unlock_irqrestore(&wqe->lock, flags);
Pavel Begunkov4f26bda2020-06-15 10:24:03 +03001014 io_run_cancel(work, wqe);
1015 match->nr_pending++;
1016 if (!match->cancel_all)
1017 return;
1018
1019 /* not safe to continue after unlock */
1020 goto retry;
Jens Axboe771b53d02019-10-22 10:25:58 -06001021 }
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +02001022 raw_spin_unlock_irqrestore(&wqe->lock, flags);
Pavel Begunkovf4c26652020-06-15 10:24:02 +03001023}
Jens Axboe771b53d02019-10-22 10:25:58 -06001024
Pavel Begunkov4f26bda2020-06-15 10:24:03 +03001025static void io_wqe_cancel_running_work(struct io_wqe *wqe,
Pavel Begunkovf4c26652020-06-15 10:24:02 +03001026 struct io_cb_cancel_data *match)
1027{
Jens Axboe771b53d02019-10-22 10:25:58 -06001028 rcu_read_lock();
Pavel Begunkov4f26bda2020-06-15 10:24:03 +03001029 io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
Jens Axboe771b53d02019-10-22 10:25:58 -06001030 rcu_read_unlock();
Jens Axboe771b53d02019-10-22 10:25:58 -06001031}
1032
Pavel Begunkov2293b412020-03-07 01:15:39 +03001033enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
Pavel Begunkov4f26bda2020-06-15 10:24:03 +03001034 void *data, bool cancel_all)
Pavel Begunkov2293b412020-03-07 01:15:39 +03001035{
1036 struct io_cb_cancel_data match = {
Pavel Begunkov4f26bda2020-06-15 10:24:03 +03001037 .fn = cancel,
1038 .data = data,
1039 .cancel_all = cancel_all,
Pavel Begunkov2293b412020-03-07 01:15:39 +03001040 };
Pavel Begunkov2293b412020-03-07 01:15:39 +03001041 int node;
1042
Pavel Begunkovf4c26652020-06-15 10:24:02 +03001043 /*
1044 * First check pending list, if we're lucky we can just remove it
1045 * from there. CANCEL_OK means that the work is returned as-new,
1046 * no completion will be posted for it.
1047 */
Pavel Begunkov2293b412020-03-07 01:15:39 +03001048 for_each_node(node) {
1049 struct io_wqe *wqe = wq->wqes[node];
1050
Pavel Begunkov4f26bda2020-06-15 10:24:03 +03001051 io_wqe_cancel_pending_work(wqe, &match);
1052 if (match.nr_pending && !match.cancel_all)
Pavel Begunkovf4c26652020-06-15 10:24:02 +03001053 return IO_WQ_CANCEL_OK;
Pavel Begunkov2293b412020-03-07 01:15:39 +03001054 }
1055
Pavel Begunkovf4c26652020-06-15 10:24:02 +03001056 /*
1057 * Now check if a free (going busy) or busy worker has the work
1058 * currently running. If we find it there, we'll return CANCEL_RUNNING
1059 * as an indication that we attempt to signal cancellation. The
1060 * completion will run normally in this case.
1061 */
1062 for_each_node(node) {
1063 struct io_wqe *wqe = wq->wqes[node];
1064
Pavel Begunkov4f26bda2020-06-15 10:24:03 +03001065 io_wqe_cancel_running_work(wqe, &match);
1066 if (match.nr_running && !match.cancel_all)
Pavel Begunkovf4c26652020-06-15 10:24:02 +03001067 return IO_WQ_CANCEL_RUNNING;
1068 }
1069
Pavel Begunkov4f26bda2020-06-15 10:24:03 +03001070 if (match.nr_running)
1071 return IO_WQ_CANCEL_RUNNING;
1072 if (match.nr_pending)
1073 return IO_WQ_CANCEL_OK;
Pavel Begunkovf4c26652020-06-15 10:24:02 +03001074 return IO_WQ_CANCEL_NOTFOUND;
Pavel Begunkov2293b412020-03-07 01:15:39 +03001075}
1076
1077static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
Jens Axboe00bcda12020-02-08 19:13:32 -07001078{
1079 return work == data;
1080}
1081
Jens Axboe771b53d02019-10-22 10:25:58 -06001082enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
1083{
Pavel Begunkov4f26bda2020-06-15 10:24:03 +03001084 return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork, false);
Jens Axboe771b53d02019-10-22 10:25:58 -06001085}
1086
Jens Axboe576a3472019-11-25 08:49:20 -07001087struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
Jens Axboe771b53d02019-10-22 10:25:58 -06001088{
Jann Horn3fc50ab2019-11-26 19:10:20 +01001089 int ret = -ENOMEM, node;
Jens Axboe771b53d02019-10-22 10:25:58 -06001090 struct io_wq *wq;
1091
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03001092 if (WARN_ON_ONCE(!data->free_work || !data->do_work))
Pavel Begunkove9fd9392020-03-04 16:14:12 +03001093 return ERR_PTR(-EINVAL);
1094
Jann Hornad6e0052019-11-26 17:39:45 +01001095 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
Jens Axboe771b53d02019-10-22 10:25:58 -06001096 if (!wq)
1097 return ERR_PTR(-ENOMEM);
1098
Jann Horn3fc50ab2019-11-26 19:10:20 +01001099 wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL);
Jens Axboe43c01fb2020-10-22 09:02:50 -06001100 if (!wq->wqes)
1101 goto err_wq;
1102
1103 ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1104 if (ret)
1105 goto err_wqes;
Jens Axboe771b53d02019-10-22 10:25:58 -06001106
Pavel Begunkove9fd9392020-03-04 16:14:12 +03001107 wq->free_work = data->free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03001108 wq->do_work = data->do_work;
Jens Axboe7d723062019-11-12 22:31:31 -07001109
Jens Axboec5def4a2019-11-07 11:41:16 -07001110 /* caller must already hold a reference to this */
Jens Axboe576a3472019-11-25 08:49:20 -07001111 wq->user = data->user;
Jens Axboec5def4a2019-11-07 11:41:16 -07001112
Jens Axboe43c01fb2020-10-22 09:02:50 -06001113 ret = -ENOMEM;
Jann Horn3fc50ab2019-11-26 19:10:20 +01001114 for_each_node(node) {
Jens Axboe771b53d02019-10-22 10:25:58 -06001115 struct io_wqe *wqe;
Jens Axboe75634392020-02-11 06:30:06 -07001116 int alloc_node = node;
Jens Axboe771b53d02019-10-22 10:25:58 -06001117
Jens Axboe75634392020-02-11 06:30:06 -07001118 if (!node_online(alloc_node))
1119 alloc_node = NUMA_NO_NODE;
1120 wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
Jens Axboe771b53d02019-10-22 10:25:58 -06001121 if (!wqe)
Jann Horn3fc50ab2019-11-26 19:10:20 +01001122 goto err;
1123 wq->wqes[node] = wqe;
Jens Axboe75634392020-02-11 06:30:06 -07001124 wqe->node = alloc_node;
Jens Axboec5def4a2019-11-07 11:41:16 -07001125 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
1126 atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
Jens Axboe576a3472019-11-25 08:49:20 -07001127 if (wq->user) {
Jens Axboec5def4a2019-11-07 11:41:16 -07001128 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
1129 task_rlimit(current, RLIMIT_NPROC);
1130 }
1131 atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
Jens Axboe771b53d02019-10-22 10:25:58 -06001132 wqe->wq = wq;
Sebastian Andrzej Siewior95da8462020-09-01 10:41:46 +02001133 raw_spin_lock_init(&wqe->lock);
Jens Axboe6206f0e2019-11-26 11:59:32 -07001134 INIT_WQ_LIST(&wqe->work_list);
Jens Axboe021d1cd2019-11-14 08:00:41 -07001135 INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
Jens Axboee61df662019-11-13 13:54:49 -07001136 INIT_LIST_HEAD(&wqe->all_list);
Jens Axboe771b53d02019-10-22 10:25:58 -06001137 }
1138
1139 init_completion(&wq->done);
1140
Jens Axboe771b53d02019-10-22 10:25:58 -06001141 wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
1142 if (!IS_ERR(wq->manager)) {
1143 wake_up_process(wq->manager);
Jens Axboeb60fda62019-11-19 08:37:07 -07001144 wait_for_completion(&wq->done);
1145 if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
1146 ret = -ENOMEM;
1147 goto err;
1148 }
Jens Axboe848f7e12020-01-23 15:33:32 -07001149 refcount_set(&wq->use_refs, 1);
Jens Axboeb60fda62019-11-19 08:37:07 -07001150 reinit_completion(&wq->done);
Jens Axboe771b53d02019-10-22 10:25:58 -06001151 return wq;
1152 }
1153
1154 ret = PTR_ERR(wq->manager);
Jens Axboe771b53d02019-10-22 10:25:58 -06001155 complete(&wq->done);
Jens Axboeb60fda62019-11-19 08:37:07 -07001156err:
Jens Axboe43c01fb2020-10-22 09:02:50 -06001157 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
Jann Horn3fc50ab2019-11-26 19:10:20 +01001158 for_each_node(node)
1159 kfree(wq->wqes[node]);
Jens Axboe43c01fb2020-10-22 09:02:50 -06001160err_wqes:
Jens Axboeb60fda62019-11-19 08:37:07 -07001161 kfree(wq->wqes);
Jens Axboe43c01fb2020-10-22 09:02:50 -06001162err_wq:
Jens Axboeb60fda62019-11-19 08:37:07 -07001163 kfree(wq);
Jens Axboe771b53d02019-10-22 10:25:58 -06001164 return ERR_PTR(ret);
1165}
1166
Pavel Begunkoveba6f5a2020-01-28 03:15:47 +03001167bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
1168{
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03001169 if (data->free_work != wq->free_work || data->do_work != wq->do_work)
Pavel Begunkoveba6f5a2020-01-28 03:15:47 +03001170 return false;
1171
1172 return refcount_inc_not_zero(&wq->use_refs);
1173}
1174
Jens Axboe848f7e12020-01-23 15:33:32 -07001175static void __io_wq_destroy(struct io_wq *wq)
Jens Axboe771b53d02019-10-22 10:25:58 -06001176{
Jann Horn3fc50ab2019-11-26 19:10:20 +01001177 int node;
Jens Axboe771b53d02019-10-22 10:25:58 -06001178
Jens Axboe43c01fb2020-10-22 09:02:50 -06001179 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1180
Jens Axboeb60fda62019-11-19 08:37:07 -07001181 set_bit(IO_WQ_BIT_EXIT, &wq->state);
1182 if (wq->manager)
Jens Axboe771b53d02019-10-22 10:25:58 -06001183 kthread_stop(wq->manager);
Jens Axboe771b53d02019-10-22 10:25:58 -06001184
1185 rcu_read_lock();
Jann Horn3fc50ab2019-11-26 19:10:20 +01001186 for_each_node(node)
1187 io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
Jens Axboe771b53d02019-10-22 10:25:58 -06001188 rcu_read_unlock();
1189
1190 wait_for_completion(&wq->done);
1191
Jann Horn3fc50ab2019-11-26 19:10:20 +01001192 for_each_node(node)
1193 kfree(wq->wqes[node]);
Jens Axboe771b53d02019-10-22 10:25:58 -06001194 kfree(wq->wqes);
1195 kfree(wq);
1196}
Jens Axboe848f7e12020-01-23 15:33:32 -07001197
1198void io_wq_destroy(struct io_wq *wq)
1199{
1200 if (refcount_dec_and_test(&wq->use_refs))
1201 __io_wq_destroy(wq);
1202}
Jens Axboeaa96bf82020-04-03 11:26:26 -06001203
1204struct task_struct *io_wq_get_task(struct io_wq *wq)
1205{
1206 return wq->manager;
1207}
Jens Axboe43c01fb2020-10-22 09:02:50 -06001208
1209static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
1210{
1211 struct task_struct *task = worker->task;
1212 struct rq_flags rf;
1213 struct rq *rq;
1214
1215 rq = task_rq_lock(task, &rf);
1216 do_set_cpus_allowed(task, cpumask_of_node(worker->wqe->node));
1217 task->flags |= PF_NO_SETAFFINITY;
1218 task_rq_unlock(rq, task, &rf);
1219 return false;
1220}
1221
1222static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
1223{
1224 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1225 int i;
1226
1227 rcu_read_lock();
1228 for_each_node(i)
1229 io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, NULL);
1230 rcu_read_unlock();
1231 return 0;
1232}
1233
1234static __init int io_wq_init(void)
1235{
1236 int ret;
1237
1238 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
1239 io_wq_cpu_online, NULL);
1240 if (ret < 0)
1241 return ret;
1242 io_wq_online = ret;
1243 return 0;
1244}
1245subsys_initcall(io_wq_init);