blob: c56648f7cad47a12256dca68fd6de55c80f3adf4 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Jens Axboe86db1e22008-01-29 14:53:40 +01002/*
3 * Functions related to io context handling
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/bio.h>
9#include <linux/blkdev.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Ingo Molnarf719ff9b2017-02-06 10:57:33 +010011#include <linux/sched/task.h>
Jens Axboe86db1e22008-01-29 14:53:40 +010012
13#include "blk.h"
Christoph Hellwig2aa77452021-11-23 19:53:08 +010014#include "blk-mq-sched.h"
Jens Axboe86db1e22008-01-29 14:53:40 +010015
16/*
17 * For io context allocations
18 */
19static struct kmem_cache *iocontext_cachep;
20
Tejun Heo6e736be2011-12-14 00:33:38 +010021/**
22 * get_io_context - increment reference count to io_context
23 * @ioc: io_context to get
24 *
25 * Increment reference count to @ioc.
26 */
Christoph Hellwig87dd1d62021-11-26 12:58:10 +010027static void get_io_context(struct io_context *ioc)
Tejun Heo6e736be2011-12-14 00:33:38 +010028{
29 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
30 atomic_long_inc(&ioc->refcount);
31}
Tejun Heo6e736be2011-12-14 00:33:38 +010032
Tejun Heo7e5a8792011-12-14 00:33:42 +010033static void icq_free_icq_rcu(struct rcu_head *head)
34{
35 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
36
37 kmem_cache_free(icq->__rcu_icq_cache, icq);
38}
39
Omar Sandoval3d492c22017-02-10 10:32:34 -080040/*
Jens Axboe7b36a712017-03-02 13:59:08 -070041 * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
42 * and queue locked for legacy.
Omar Sandoval3d492c22017-02-10 10:32:34 -080043 */
Tejun Heo7e5a8792011-12-14 00:33:42 +010044static void ioc_exit_icq(struct io_cq *icq)
45{
Tejun Heo621032a2012-02-15 09:45:53 +010046 struct elevator_type *et = icq->q->elevator->type;
47
48 if (icq->flags & ICQ_EXITED)
49 return;
50
Jens Axboef9cd4bf2018-11-01 16:41:41 -060051 if (et->ops.exit_icq)
52 et->ops.exit_icq(icq);
Tejun Heo621032a2012-02-15 09:45:53 +010053
54 icq->flags |= ICQ_EXITED;
55}
56
Jens Axboe7b36a712017-03-02 13:59:08 -070057/*
58 * Release an icq. Called with ioc locked for blk-mq, and with both ioc
59 * and queue locked for legacy.
60 */
Tejun Heo621032a2012-02-15 09:45:53 +010061static void ioc_destroy_icq(struct io_cq *icq)
62{
Tejun Heo7e5a8792011-12-14 00:33:42 +010063 struct io_context *ioc = icq->ioc;
64 struct request_queue *q = icq->q;
65 struct elevator_type *et = q->elevator->type;
66
67 lockdep_assert_held(&ioc->lock);
Tejun Heo7e5a8792011-12-14 00:33:42 +010068
69 radix_tree_delete(&ioc->icq_tree, icq->q->id);
70 hlist_del_init(&icq->ioc_node);
71 list_del_init(&icq->q_node);
72
73 /*
74 * Both setting lookup hint to and clearing it from @icq are done
75 * under queue_lock. If it's not pointing to @icq now, it never
76 * will. Hint assignment itself can race safely.
77 */
Paul E. McKenneyec6c676a2014-02-17 13:35:57 -080078 if (rcu_access_pointer(ioc->icq_hint) == icq)
Tejun Heo7e5a8792011-12-14 00:33:42 +010079 rcu_assign_pointer(ioc->icq_hint, NULL);
80
Tejun Heo621032a2012-02-15 09:45:53 +010081 ioc_exit_icq(icq);
Tejun Heo7e5a8792011-12-14 00:33:42 +010082
83 /*
84 * @icq->q might have gone away by the time RCU callback runs
85 * making it impossible to determine icq_cache. Record it in @icq.
86 */
87 icq->__rcu_icq_cache = et->icq_cache;
Sahitya Tummala30a2da72020-03-11 16:07:50 +053088 icq->flags |= ICQ_DESTROYED;
Tejun Heo7e5a8792011-12-14 00:33:42 +010089 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
90}
91
Tejun Heob2efa052011-12-14 00:33:39 +010092/*
93 * Slow path for ioc release in put_io_context(). Performs double-lock
Tejun Heoc5869802011-12-14 00:33:41 +010094 * dancing to unlink all icq's and then frees ioc.
Tejun Heob2efa052011-12-14 00:33:39 +010095 */
96static void ioc_release_fn(struct work_struct *work)
97{
98 struct io_context *ioc = container_of(work, struct io_context,
99 release_work);
John Ognessa43f0852020-06-19 17:23:17 +0206100 spin_lock_irq(&ioc->lock);
Tejun Heob2efa052011-12-14 00:33:39 +0100101
Tejun Heoc5869802011-12-14 00:33:41 +0100102 while (!hlist_empty(&ioc->icq_list)) {
103 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
104 struct io_cq, ioc_node);
Tejun Heo2274b022012-02-15 09:45:52 +0100105 struct request_queue *q = icq->q;
Tejun Heob2efa052011-12-14 00:33:39 +0100106
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700107 if (spin_trylock(&q->queue_lock)) {
Tejun Heo621032a2012-02-15 09:45:53 +0100108 ioc_destroy_icq(icq);
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700109 spin_unlock(&q->queue_lock);
Tejun Heo2274b022012-02-15 09:45:52 +0100110 } else {
John Ognessab96bba2020-06-19 17:23:18 +0206111 /* Make sure q and icq cannot be freed. */
112 rcu_read_lock();
113
114 /* Re-acquire the locks in the correct order. */
115 spin_unlock(&ioc->lock);
116 spin_lock(&q->queue_lock);
117 spin_lock(&ioc->lock);
118
119 /*
120 * The icq may have been destroyed when the ioc lock
121 * was released.
122 */
123 if (!(icq->flags & ICQ_DESTROYED))
124 ioc_destroy_icq(icq);
125
126 spin_unlock(&q->queue_lock);
127 rcu_read_unlock();
Tejun Heob2efa052011-12-14 00:33:39 +0100128 }
Jens Axboeffc4e752008-02-19 10:02:29 +0100129 }
Tejun Heob2efa052011-12-14 00:33:39 +0100130
John Ognessa43f0852020-06-19 17:23:17 +0206131 spin_unlock_irq(&ioc->lock);
Tejun Heob2efa052011-12-14 00:33:39 +0100132
133 kmem_cache_free(iocontext_cachep, ioc);
Jens Axboe86db1e22008-01-29 14:53:40 +0100134}
135
Tejun Heo42ec57a2011-12-14 00:33:37 +0100136/**
137 * put_io_context - put a reference of io_context
138 * @ioc: io_context to put
139 *
140 * Decrement reference count of @ioc and release it if the count reaches
Tejun Heo11a31222012-02-07 07:51:30 +0100141 * zero.
Jens Axboe86db1e22008-01-29 14:53:40 +0100142 */
Tejun Heo11a31222012-02-07 07:51:30 +0100143void put_io_context(struct io_context *ioc)
Jens Axboe86db1e22008-01-29 14:53:40 +0100144{
Tejun Heob2efa052011-12-14 00:33:39 +0100145 unsigned long flags;
Xiaotian Fengff8c1472012-03-14 15:34:48 +0100146 bool free_ioc = false;
Tejun Heob2efa052011-12-14 00:33:39 +0100147
Jens Axboe86db1e22008-01-29 14:53:40 +0100148 if (ioc == NULL)
Tejun Heo42ec57a2011-12-14 00:33:37 +0100149 return;
Jens Axboe86db1e22008-01-29 14:53:40 +0100150
Tejun Heo42ec57a2011-12-14 00:33:37 +0100151 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
Jens Axboe86db1e22008-01-29 14:53:40 +0100152
Tejun Heob2efa052011-12-14 00:33:39 +0100153 /*
Tejun Heo11a31222012-02-07 07:51:30 +0100154 * Releasing ioc requires reverse order double locking and we may
155 * already be holding a queue_lock. Do it asynchronously from wq.
Tejun Heob2efa052011-12-14 00:33:39 +0100156 */
Tejun Heo11a31222012-02-07 07:51:30 +0100157 if (atomic_long_dec_and_test(&ioc->refcount)) {
158 spin_lock_irqsave(&ioc->lock, flags);
159 if (!hlist_empty(&ioc->icq_list))
Viresh Kumar695588f2013-04-24 17:12:56 +0530160 queue_work(system_power_efficient_wq,
161 &ioc->release_work);
Xiaotian Fengff8c1472012-03-14 15:34:48 +0100162 else
163 free_ioc = true;
Tejun Heo11a31222012-02-07 07:51:30 +0100164 spin_unlock_irqrestore(&ioc->lock, flags);
Tejun Heob2efa052011-12-14 00:33:39 +0100165 }
Xiaotian Fengff8c1472012-03-14 15:34:48 +0100166
167 if (free_ioc)
168 kmem_cache_free(iocontext_cachep, ioc);
Jens Axboe86db1e22008-01-29 14:53:40 +0100169}
Christoph Hellwig222ee582021-11-26 12:58:11 +0100170EXPORT_SYMBOL_GPL(put_io_context);
Jens Axboe86db1e22008-01-29 14:53:40 +0100171
Tejun Heof6e8d012012-03-05 13:15:26 -0800172/**
173 * put_io_context_active - put active reference on ioc
174 * @ioc: ioc of interest
175 *
Christoph Hellwig50569c22021-11-26 12:58:12 +0100176 * Put an active reference to an ioc. If active reference reaches zero after
Tejun Heof6e8d012012-03-05 13:15:26 -0800177 * put, @ioc can never issue further IOs and ioscheds are notified.
178 */
Christoph Hellwig33047422021-11-26 12:58:09 +0100179static void put_io_context_active(struct io_context *ioc)
Jens Axboe86db1e22008-01-29 14:53:40 +0100180{
Tejun Heof6e8d012012-03-05 13:15:26 -0800181 struct io_cq *icq;
Jens Axboe86db1e22008-01-29 14:53:40 +0100182
Tejun Heof6e8d012012-03-05 13:15:26 -0800183 if (!atomic_dec_and_test(&ioc->active_ref)) {
Tejun Heo621032a2012-02-15 09:45:53 +0100184 put_io_context(ioc);
185 return;
186 }
187
John Ognessa43f0852020-06-19 17:23:17 +0206188 spin_lock_irq(&ioc->lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800189 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
Tejun Heo621032a2012-02-15 09:45:53 +0100190 if (icq->flags & ICQ_EXITED)
191 continue;
Omar Sandoval3d492c22017-02-10 10:32:34 -0800192
Jens Axboea1ce35f2018-10-29 10:23:51 -0600193 ioc_exit_icq(icq);
Tejun Heo621032a2012-02-15 09:45:53 +0100194 }
John Ognessa43f0852020-06-19 17:23:17 +0206195 spin_unlock_irq(&ioc->lock);
Tejun Heo621032a2012-02-15 09:45:53 +0100196
Tejun Heo11a31222012-02-07 07:51:30 +0100197 put_io_context(ioc);
Jens Axboe86db1e22008-01-29 14:53:40 +0100198}
199
Tejun Heof6e8d012012-03-05 13:15:26 -0800200/* Called by the exiting task */
201void exit_io_context(struct task_struct *task)
202{
203 struct io_context *ioc;
204
205 task_lock(task);
206 ioc = task->io_context;
207 task->io_context = NULL;
208 task_unlock(task);
209
210 atomic_dec(&ioc->nr_tasks);
211 put_io_context_active(ioc);
212}
213
Jens Axboe7b36a712017-03-02 13:59:08 -0700214static void __ioc_clear_queue(struct list_head *icq_list)
215{
216 unsigned long flags;
217
Sahitya Tummala30a2da72020-03-11 16:07:50 +0530218 rcu_read_lock();
Jens Axboe7b36a712017-03-02 13:59:08 -0700219 while (!list_empty(icq_list)) {
220 struct io_cq *icq = list_entry(icq_list->next,
Jens Axboea1ce35f2018-10-29 10:23:51 -0600221 struct io_cq, q_node);
Jens Axboe7b36a712017-03-02 13:59:08 -0700222 struct io_context *ioc = icq->ioc;
223
224 spin_lock_irqsave(&ioc->lock, flags);
Sahitya Tummala30a2da72020-03-11 16:07:50 +0530225 if (icq->flags & ICQ_DESTROYED) {
226 spin_unlock_irqrestore(&ioc->lock, flags);
227 continue;
228 }
Jens Axboe7b36a712017-03-02 13:59:08 -0700229 ioc_destroy_icq(icq);
230 spin_unlock_irqrestore(&ioc->lock, flags);
231 }
Sahitya Tummala30a2da72020-03-11 16:07:50 +0530232 rcu_read_unlock();
Jens Axboe7b36a712017-03-02 13:59:08 -0700233}
234
Tejun Heo7e5a8792011-12-14 00:33:42 +0100235/**
236 * ioc_clear_queue - break any ioc association with the specified queue
237 * @q: request_queue being cleared
238 *
Jens Axboe7b36a712017-03-02 13:59:08 -0700239 * Walk @q->icq_list and exit all io_cq's.
Tejun Heo7e5a8792011-12-14 00:33:42 +0100240 */
241void ioc_clear_queue(struct request_queue *q)
242{
Jens Axboe7b36a712017-03-02 13:59:08 -0700243 LIST_HEAD(icq_list);
Tejun Heo7e5a8792011-12-14 00:33:42 +0100244
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700245 spin_lock_irq(&q->queue_lock);
Jens Axboe7b36a712017-03-02 13:59:08 -0700246 list_splice_init(&q->icq_list, &icq_list);
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700247 spin_unlock_irq(&q->queue_lock);
Tejun Heo7e5a8792011-12-14 00:33:42 +0100248
Jens Axboea1ce35f2018-10-29 10:23:51 -0600249 __ioc_clear_queue(&icq_list);
Tejun Heo7e5a8792011-12-14 00:33:42 +0100250}
251
Christoph Hellwiga0f14d82021-11-26 12:58:13 +0100252static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
Jens Axboe86db1e22008-01-29 14:53:40 +0100253{
Paul Bolledf415652011-06-06 05:11:34 +0200254 struct io_context *ioc;
Jens Axboe86db1e22008-01-29 14:53:40 +0100255
Tejun Heo42ec57a2011-12-14 00:33:37 +0100256 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
257 node);
258 if (unlikely(!ioc))
Christoph Hellwiga0f14d82021-11-26 12:58:13 +0100259 return NULL;
Tejun Heo42ec57a2011-12-14 00:33:37 +0100260
Tejun Heo42ec57a2011-12-14 00:33:37 +0100261 atomic_long_set(&ioc->refcount, 1);
Olof Johansson4638a832012-08-01 12:17:27 +0200262 atomic_set(&ioc->nr_tasks, 1);
Tejun Heof6e8d012012-03-05 13:15:26 -0800263 atomic_set(&ioc->active_ref, 1);
Tejun Heo42ec57a2011-12-14 00:33:37 +0100264 spin_lock_init(&ioc->lock);
Shakeel Buttc1379692018-07-03 10:14:46 -0700265 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
Tejun Heoc5869802011-12-14 00:33:41 +0100266 INIT_HLIST_HEAD(&ioc->icq_list);
Tejun Heob2efa052011-12-14 00:33:39 +0100267 INIT_WORK(&ioc->release_work, ioc_release_fn);
Christoph Hellwiga0f14d82021-11-26 12:58:13 +0100268 return ioc;
269}
270
Christoph Hellwigd538ea42021-11-26 12:58:15 +0100271static struct io_context *create_task_io_context(struct task_struct *task,
272 gfp_t gfp_flags, int node)
Christoph Hellwiga0f14d82021-11-26 12:58:13 +0100273{
274 struct io_context *ioc;
Christoph Hellwiga0f14d82021-11-26 12:58:13 +0100275
276 ioc = alloc_io_context(gfp_flags, node);
277 if (!ioc)
Christoph Hellwigd538ea42021-11-26 12:58:15 +0100278 return NULL;
Jens Axboe86db1e22008-01-29 14:53:40 +0100279
Tejun Heofd638362011-12-25 14:29:14 +0100280 /*
281 * Try to install. ioc shouldn't be installed if someone else
282 * already did or @task, which isn't %current, is exiting. Note
283 * that we need to allow ioc creation on exiting %current as exit
284 * path may issue IOs from e.g. exit_files(). The exit path is
285 * responsible for not issuing IO after exit_io_context().
286 */
Tejun Heo6e736be2011-12-14 00:33:38 +0100287 task_lock(task);
Tejun Heofd638362011-12-25 14:29:14 +0100288 if (!task->io_context &&
289 (task == current || !(task->flags & PF_EXITING)))
Tejun Heo6e736be2011-12-14 00:33:38 +0100290 task->io_context = ioc;
Tejun Heof2dbd762011-12-14 00:33:40 +0100291 else
Tejun Heo6e736be2011-12-14 00:33:38 +0100292 kmem_cache_free(iocontext_cachep, ioc);
Eric Dumazet3c9c7082012-05-31 13:39:05 +0200293
Christoph Hellwigd538ea42021-11-26 12:58:15 +0100294 ioc = task->io_context;
295 if (ioc)
296 get_io_context(ioc);
Tejun Heo6e736be2011-12-14 00:33:38 +0100297 task_unlock(task);
Christoph Hellwigd538ea42021-11-26 12:58:15 +0100298 return ioc;
Jens Axboe86db1e22008-01-29 14:53:40 +0100299}
Jens Axboe86db1e22008-01-29 14:53:40 +0100300
Tejun Heo6e736be2011-12-14 00:33:38 +0100301/**
302 * get_task_io_context - get io_context of a task
303 * @task: task of interest
304 * @gfp_flags: allocation flags, used if allocation is necessary
305 * @node: allocation node, used if allocation is necessary
Jens Axboe86db1e22008-01-29 14:53:40 +0100306 *
Tejun Heo6e736be2011-12-14 00:33:38 +0100307 * Return io_context of @task. If it doesn't exist, it is created with
308 * @gfp_flags and @node. The returned io_context has its reference count
309 * incremented.
310 *
311 * This function always goes through task_lock() and it's better to use
Tejun Heof2dbd762011-12-14 00:33:40 +0100312 * %current->io_context + get_io_context() for %current.
Jens Axboe86db1e22008-01-29 14:53:40 +0100313 */
Tejun Heo6e736be2011-12-14 00:33:38 +0100314struct io_context *get_task_io_context(struct task_struct *task,
315 gfp_t gfp_flags, int node)
Jens Axboe86db1e22008-01-29 14:53:40 +0100316{
Tejun Heo6e736be2011-12-14 00:33:38 +0100317 struct io_context *ioc;
Jens Axboe86db1e22008-01-29 14:53:40 +0100318
Mel Gormand0164ad2015-11-06 16:28:21 -0800319 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
Jens Axboe86db1e22008-01-29 14:53:40 +0100320
Christoph Hellwigd538ea42021-11-26 12:58:15 +0100321 task_lock(task);
322 ioc = task->io_context;
323 if (unlikely(!ioc)) {
Tejun Heo6e736be2011-12-14 00:33:38 +0100324 task_unlock(task);
Christoph Hellwigd538ea42021-11-26 12:58:15 +0100325 return create_task_io_context(task, gfp_flags, node);
326 }
327 get_io_context(ioc);
328 task_unlock(task);
329 return ioc;
Jens Axboe86db1e22008-01-29 14:53:40 +0100330}
Jens Axboe86db1e22008-01-29 14:53:40 +0100331
Christoph Hellwig88c9a2c2021-11-26 12:58:05 +0100332int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
333{
334 struct io_context *ioc = current->io_context;
Christoph Hellwig88c9a2c2021-11-26 12:58:05 +0100335
336 /*
337 * Share io context with parent, if CLONE_IO is set
338 */
339 if (clone_flags & CLONE_IO) {
Christoph Hellwig50569c22021-11-26 12:58:12 +0100340 atomic_long_inc(&ioc->refcount);
341 atomic_inc(&ioc->active_ref);
Christoph Hellwig88c9a2c2021-11-26 12:58:05 +0100342 atomic_inc(&ioc->nr_tasks);
Christoph Hellwig88c9a2c2021-11-26 12:58:05 +0100343 tsk->io_context = ioc;
344 } else if (ioprio_valid(ioc->ioprio)) {
Christoph Hellwig8ffc1362021-11-26 12:58:14 +0100345 tsk->io_context = alloc_io_context(GFP_KERNEL, NUMA_NO_NODE);
346 if (!tsk->io_context)
Christoph Hellwig88c9a2c2021-11-26 12:58:05 +0100347 return -ENOMEM;
Christoph Hellwig8ffc1362021-11-26 12:58:14 +0100348 tsk->io_context->ioprio = ioc->ioprio;
Christoph Hellwig88c9a2c2021-11-26 12:58:05 +0100349 }
350
351 return 0;
352}
353
Tejun Heo47fdd4c2011-12-14 00:33:42 +0100354/**
355 * ioc_lookup_icq - lookup io_cq from ioc
356 * @ioc: the associated io_context
357 * @q: the associated request_queue
358 *
359 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
360 * with @q->queue_lock held.
361 */
362struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
363{
364 struct io_cq *icq;
365
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700366 lockdep_assert_held(&q->queue_lock);
Tejun Heo47fdd4c2011-12-14 00:33:42 +0100367
368 /*
369 * icq's are indexed from @ioc using radix tree and hint pointer,
370 * both of which are protected with RCU. All removals are done
371 * holding both q and ioc locks, and we're holding q lock - if we
372 * find a icq which points to us, it's guaranteed to be valid.
373 */
374 rcu_read_lock();
375 icq = rcu_dereference(ioc->icq_hint);
376 if (icq && icq->q == q)
377 goto out;
378
379 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
380 if (icq && icq->q == q)
381 rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
382 else
383 icq = NULL;
384out:
385 rcu_read_unlock();
386 return icq;
387}
388EXPORT_SYMBOL(ioc_lookup_icq);
389
Tejun Heof1f8cc92011-12-14 00:33:42 +0100390/**
391 * ioc_create_icq - create and link io_cq
392 * @q: request_queue of interest
Tejun Heof1f8cc92011-12-14 00:33:42 +0100393 *
Tejun Heo24acfc32012-03-05 13:15:24 -0800394 * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
395 * will be created using @gfp_mask.
Tejun Heof1f8cc92011-12-14 00:33:42 +0100396 *
397 * The caller is responsible for ensuring @ioc won't go away and @q is
398 * alive and will stay alive until this function returns.
399 */
Christoph Hellwig18b74c42021-11-26 12:58:16 +0100400static struct io_cq *ioc_create_icq(struct request_queue *q)
Tejun Heof1f8cc92011-12-14 00:33:42 +0100401{
Christoph Hellwig18b74c42021-11-26 12:58:16 +0100402 struct io_context *ioc = current->io_context;
Tejun Heof1f8cc92011-12-14 00:33:42 +0100403 struct elevator_type *et = q->elevator->type;
Tejun Heof1f8cc92011-12-14 00:33:42 +0100404 struct io_cq *icq;
405
406 /* allocate stuff */
Christoph Hellwig18b74c42021-11-26 12:58:16 +0100407 icq = kmem_cache_alloc_node(et->icq_cache, GFP_ATOMIC | __GFP_ZERO,
Tejun Heof1f8cc92011-12-14 00:33:42 +0100408 q->node);
409 if (!icq)
410 return NULL;
411
Christoph Hellwig18b74c42021-11-26 12:58:16 +0100412 if (radix_tree_maybe_preload(GFP_ATOMIC) < 0) {
Tejun Heof1f8cc92011-12-14 00:33:42 +0100413 kmem_cache_free(et->icq_cache, icq);
414 return NULL;
415 }
416
417 icq->ioc = ioc;
418 icq->q = q;
419 INIT_LIST_HEAD(&icq->q_node);
420 INIT_HLIST_NODE(&icq->ioc_node);
421
422 /* lock both q and ioc and try to link @icq */
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700423 spin_lock_irq(&q->queue_lock);
Tejun Heof1f8cc92011-12-14 00:33:42 +0100424 spin_lock(&ioc->lock);
425
426 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
427 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
428 list_add(&icq->q_node, &q->icq_list);
Jens Axboef9cd4bf2018-11-01 16:41:41 -0600429 if (et->ops.init_icq)
430 et->ops.init_icq(icq);
Tejun Heof1f8cc92011-12-14 00:33:42 +0100431 } else {
432 kmem_cache_free(et->icq_cache, icq);
433 icq = ioc_lookup_icq(ioc, q);
434 if (!icq)
435 printk(KERN_ERR "cfq: icq link failed!\n");
436 }
437
438 spin_unlock(&ioc->lock);
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700439 spin_unlock_irq(&q->queue_lock);
Tejun Heof1f8cc92011-12-14 00:33:42 +0100440 radix_tree_preload_end();
441 return icq;
442}
443
Christoph Hellwig87dd1d62021-11-26 12:58:10 +0100444struct io_cq *ioc_find_get_icq(struct request_queue *q)
445{
Christoph Hellwigd538ea42021-11-26 12:58:15 +0100446 struct io_context *ioc = current->io_context;
447 struct io_cq *icq = NULL;
Christoph Hellwig87dd1d62021-11-26 12:58:10 +0100448
Christoph Hellwigd538ea42021-11-26 12:58:15 +0100449 if (unlikely(!ioc)) {
450 ioc = create_task_io_context(current, GFP_ATOMIC, q->node);
451 if (!ioc)
452 return NULL;
453 } else {
454 get_io_context(ioc);
Christoph Hellwig87dd1d62021-11-26 12:58:10 +0100455
Christoph Hellwigd538ea42021-11-26 12:58:15 +0100456 spin_lock_irq(&q->queue_lock);
457 icq = ioc_lookup_icq(ioc, q);
458 spin_unlock_irq(&q->queue_lock);
459 }
Christoph Hellwig87dd1d62021-11-26 12:58:10 +0100460
461 if (!icq) {
Christoph Hellwig18b74c42021-11-26 12:58:16 +0100462 icq = ioc_create_icq(q);
Christoph Hellwigd538ea42021-11-26 12:58:15 +0100463 if (!icq) {
464 put_io_context(ioc);
Christoph Hellwig87dd1d62021-11-26 12:58:10 +0100465 return NULL;
Christoph Hellwigd538ea42021-11-26 12:58:15 +0100466 }
Christoph Hellwig87dd1d62021-11-26 12:58:10 +0100467 }
Christoph Hellwig87dd1d62021-11-26 12:58:10 +0100468 return icq;
469}
470EXPORT_SYMBOL_GPL(ioc_find_get_icq);
471
Adrian Bunk13341592008-02-18 13:45:53 +0100472static int __init blk_ioc_init(void)
Jens Axboe86db1e22008-01-29 14:53:40 +0100473{
474 iocontext_cachep = kmem_cache_create("blkdev_ioc",
475 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
476 return 0;
477}
478subsys_initcall(blk_ioc_init);