blob: fdc129e64cc4e8a133216eea3ee303e34bb4a939 [file] [log] [blame]
Jens Axboebd166ef2017-01-17 06:03:22 -07001/*
2 * blk-mq scheduling framework
3 *
4 * Copyright (C) 2016 Jens Axboe
5 */
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/blk-mq.h>
9
10#include <trace/events/block.h>
11
12#include "blk.h"
13#include "blk-mq.h"
Omar Sandovald332ce02017-05-04 08:24:40 -060014#include "blk-mq-debugfs.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070015#include "blk-mq-sched.h"
16#include "blk-mq-tag.h"
17#include "blk-wbt.h"
18
19void blk_mq_sched_free_hctx_data(struct request_queue *q,
20 void (*exit)(struct blk_mq_hw_ctx *))
21{
22 struct blk_mq_hw_ctx *hctx;
23 int i;
24
25 queue_for_each_hw_ctx(q, hctx, i) {
26 if (exit && hctx->sched_data)
27 exit(hctx);
28 kfree(hctx->sched_data);
29 hctx->sched_data = NULL;
30 }
31}
32EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);
33
Christoph Hellwig44e8c2b2017-06-16 18:15:25 +020034void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
Jens Axboebd166ef2017-01-17 06:03:22 -070035{
Christoph Hellwig44e8c2b2017-06-16 18:15:25 +020036 struct request_queue *q = rq->q;
37 struct io_context *ioc = rq_ioc(bio);
Jens Axboebd166ef2017-01-17 06:03:22 -070038 struct io_cq *icq;
39
40 spin_lock_irq(q->queue_lock);
41 icq = ioc_lookup_icq(ioc, q);
42 spin_unlock_irq(q->queue_lock);
43
44 if (!icq) {
45 icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
46 if (!icq)
47 return;
48 }
Christoph Hellwigea511e32017-06-16 18:15:20 +020049 get_io_context(icq->ioc);
Christoph Hellwig44e8c2b2017-06-16 18:15:25 +020050 rq->elv.icq = icq;
Jens Axboebd166ef2017-01-17 06:03:22 -070051}
52
Jens Axboe8e8320c2017-06-20 17:56:13 -060053/*
54 * Mark a hardware queue as needing a restart. For shared queues, maintain
55 * a count of how many hardware queues are marked for restart.
56 */
57static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
58{
59 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
60 return;
61
Ming Lei97889f92018-06-25 19:31:48 +080062 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
Jens Axboe8e8320c2017-06-20 17:56:13 -060063}
64
Ming Lei97889f92018-06-25 19:31:48 +080065void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
Jens Axboe8e8320c2017-06-20 17:56:13 -060066{
67 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
Ming Lei97889f92018-06-25 19:31:48 +080068 return;
69 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
Jens Axboe8e8320c2017-06-20 17:56:13 -060070
Ming Lei97889f92018-06-25 19:31:48 +080071 blk_mq_run_hw_queue(hctx, true);
Jens Axboe8e8320c2017-06-20 17:56:13 -060072}
73
Ming Lei1f460b62017-10-27 12:43:30 +080074/*
75 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
76 * its queue by itself in its completion handler, so we don't need to
77 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
78 */
79static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
Ming Leicaf8eb02017-10-14 17:22:26 +080080{
81 struct request_queue *q = hctx->queue;
82 struct elevator_queue *e = q->elevator;
83 LIST_HEAD(rq_list);
84
85 do {
Ming Leide148292017-10-14 17:22:29 +080086 struct request *rq;
Ming Leicaf8eb02017-10-14 17:22:26 +080087
Ming Leide148292017-10-14 17:22:29 +080088 if (e->type->ops.mq.has_work &&
89 !e->type->ops.mq.has_work(hctx))
Ming Leicaf8eb02017-10-14 17:22:26 +080090 break;
Ming Leide148292017-10-14 17:22:29 +080091
Ming Lei88022d72017-11-05 02:21:12 +080092 if (!blk_mq_get_dispatch_budget(hctx))
Ming Lei1f460b62017-10-27 12:43:30 +080093 break;
Ming Leide148292017-10-14 17:22:29 +080094
95 rq = e->type->ops.mq.dispatch_request(hctx);
96 if (!rq) {
97 blk_mq_put_dispatch_budget(hctx);
98 break;
Ming Leide148292017-10-14 17:22:29 +080099 }
100
101 /*
102 * Now this rq owns the budget which has to be released
103 * if this rq won't be queued to driver via .queue_rq()
104 * in blk_mq_dispatch_rq_list().
105 */
Ming Leicaf8eb02017-10-14 17:22:26 +0800106 list_add(&rq->queuelist, &rq_list);
Ming Leide148292017-10-14 17:22:29 +0800107 } while (blk_mq_dispatch_rq_list(q, &rq_list, true));
Ming Leicaf8eb02017-10-14 17:22:26 +0800108}
109
Ming Leib3476892017-10-14 17:22:30 +0800110static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
111 struct blk_mq_ctx *ctx)
112{
113 unsigned idx = ctx->index_hw;
114
115 if (++idx == hctx->nr_ctx)
116 idx = 0;
117
118 return hctx->ctxs[idx];
119}
120
Ming Lei1f460b62017-10-27 12:43:30 +0800121/*
122 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
123 * its queue by itself in its completion handler, so we don't need to
124 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
125 */
126static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
Ming Leib3476892017-10-14 17:22:30 +0800127{
128 struct request_queue *q = hctx->queue;
129 LIST_HEAD(rq_list);
130 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
131
132 do {
133 struct request *rq;
Ming Leib3476892017-10-14 17:22:30 +0800134
135 if (!sbitmap_any_bit_set(&hctx->ctx_map))
136 break;
137
Ming Lei88022d72017-11-05 02:21:12 +0800138 if (!blk_mq_get_dispatch_budget(hctx))
Ming Lei1f460b62017-10-27 12:43:30 +0800139 break;
Ming Leib3476892017-10-14 17:22:30 +0800140
141 rq = blk_mq_dequeue_from_ctx(hctx, ctx);
142 if (!rq) {
143 blk_mq_put_dispatch_budget(hctx);
144 break;
Ming Leib3476892017-10-14 17:22:30 +0800145 }
146
147 /*
148 * Now this rq owns the budget which has to be released
149 * if this rq won't be queued to driver via .queue_rq()
150 * in blk_mq_dispatch_rq_list().
151 */
152 list_add(&rq->queuelist, &rq_list);
153
154 /* round robin for fair dispatch */
155 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
156
157 } while (blk_mq_dispatch_rq_list(q, &rq_list, true));
158
159 WRITE_ONCE(hctx->dispatch_from, ctx);
Ming Leib3476892017-10-14 17:22:30 +0800160}
161
Ming Lei1f460b62017-10-27 12:43:30 +0800162void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
Jens Axboebd166ef2017-01-17 06:03:22 -0700163{
Omar Sandoval81380ca2017-04-07 08:56:26 -0600164 struct request_queue *q = hctx->queue;
165 struct elevator_queue *e = q->elevator;
Jens Axboe64765a72017-02-17 11:39:26 -0700166 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
Jens Axboebd166ef2017-01-17 06:03:22 -0700167 LIST_HEAD(rq_list);
168
Ming Leif4560ff2017-06-18 14:24:27 -0600169 /* RCU or SRCU read lock is needed before checking quiesced flag */
170 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
Ming Lei1f460b62017-10-27 12:43:30 +0800171 return;
Jens Axboebd166ef2017-01-17 06:03:22 -0700172
173 hctx->run++;
174
175 /*
176 * If we have previous entries on our dispatch list, grab them first for
177 * more fair dispatch.
178 */
179 if (!list_empty_careful(&hctx->dispatch)) {
180 spin_lock(&hctx->lock);
181 if (!list_empty(&hctx->dispatch))
182 list_splice_init(&hctx->dispatch, &rq_list);
183 spin_unlock(&hctx->lock);
184 }
185
186 /*
187 * Only ask the scheduler for requests, if we didn't have residual
188 * requests from the dispatch list. This is to avoid the case where
189 * we only ever dispatch a fraction of the requests available because
190 * of low device queue depth. Once we pull requests out of the IO
191 * scheduler, we can no longer merge or sort them. So it's best to
192 * leave them there for as long as we can. Mark the hw queue as
193 * needing a restart in that case.
Ming Leicaf8eb02017-10-14 17:22:26 +0800194 *
Ming Lei5e3d02b2017-10-14 17:22:25 +0800195 * We want to dispatch from the scheduler if there was nothing
196 * on the dispatch list or we were able to dispatch from the
197 * dispatch list.
Jens Axboe64765a72017-02-17 11:39:26 -0700198 */
Ming Leicaf8eb02017-10-14 17:22:26 +0800199 if (!list_empty(&rq_list)) {
200 blk_mq_sched_mark_restart_hctx(hctx);
Ming Leib3476892017-10-14 17:22:30 +0800201 if (blk_mq_dispatch_rq_list(q, &rq_list, false)) {
202 if (has_sched_dispatch)
Ming Lei1f460b62017-10-27 12:43:30 +0800203 blk_mq_do_dispatch_sched(hctx);
Ming Leib3476892017-10-14 17:22:30 +0800204 else
Ming Lei1f460b62017-10-27 12:43:30 +0800205 blk_mq_do_dispatch_ctx(hctx);
Ming Leib3476892017-10-14 17:22:30 +0800206 }
Ming Leicaf8eb02017-10-14 17:22:26 +0800207 } else if (has_sched_dispatch) {
Ming Lei1f460b62017-10-27 12:43:30 +0800208 blk_mq_do_dispatch_sched(hctx);
Ming Lei6e7687172018-07-03 09:03:16 -0600209 } else if (hctx->dispatch_busy) {
210 /* dequeue request one by one from sw queue if queue is busy */
Ming Lei1f460b62017-10-27 12:43:30 +0800211 blk_mq_do_dispatch_ctx(hctx);
Ming Leicaf8eb02017-10-14 17:22:26 +0800212 } else {
213 blk_mq_flush_busy_ctxs(hctx, &rq_list);
Ming Leide148292017-10-14 17:22:29 +0800214 blk_mq_dispatch_rq_list(q, &rq_list, false);
Jens Axboec13660a2017-01-26 12:40:07 -0700215 }
Jens Axboebd166ef2017-01-17 06:03:22 -0700216}
217
Jens Axboee4d750c2017-02-03 09:48:28 -0700218bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
219 struct request **merged_request)
Jens Axboebd166ef2017-01-17 06:03:22 -0700220{
221 struct request *rq;
Jens Axboebd166ef2017-01-17 06:03:22 -0700222
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100223 switch (elv_merge(q, &rq, bio)) {
224 case ELEVATOR_BACK_MERGE:
Jens Axboebd166ef2017-01-17 06:03:22 -0700225 if (!blk_mq_sched_allow_merge(q, rq, bio))
226 return false;
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100227 if (!bio_attempt_back_merge(q, rq, bio))
228 return false;
229 *merged_request = attempt_back_merge(q, rq);
230 if (!*merged_request)
231 elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
232 return true;
233 case ELEVATOR_FRONT_MERGE:
Jens Axboebd166ef2017-01-17 06:03:22 -0700234 if (!blk_mq_sched_allow_merge(q, rq, bio))
235 return false;
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100236 if (!bio_attempt_front_merge(q, rq, bio))
237 return false;
238 *merged_request = attempt_front_merge(q, rq);
239 if (!*merged_request)
240 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
241 return true;
Keith Buschbea99a52018-02-01 14:41:15 -0700242 case ELEVATOR_DISCARD_MERGE:
243 return bio_attempt_discard_merge(q, rq, bio);
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100244 default:
245 return false;
Jens Axboebd166ef2017-01-17 06:03:22 -0700246 }
Jens Axboebd166ef2017-01-17 06:03:22 -0700247}
248EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
249
Ming Lei9bddeb22017-05-26 19:53:20 +0800250/*
Jens Axboe9c558732018-05-30 15:26:07 +0800251 * Iterate list of requests and see if we can merge this bio with any
252 * of them.
Ming Lei9bddeb22017-05-26 19:53:20 +0800253 */
Jens Axboe9c558732018-05-30 15:26:07 +0800254bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
255 struct bio *bio)
Ming Lei9bddeb22017-05-26 19:53:20 +0800256{
257 struct request *rq;
258 int checked = 8;
259
Jens Axboe9c558732018-05-30 15:26:07 +0800260 list_for_each_entry_reverse(rq, list, queuelist) {
Ming Lei9bddeb22017-05-26 19:53:20 +0800261 bool merged = false;
262
263 if (!checked--)
264 break;
265
266 if (!blk_rq_merge_ok(rq, bio))
267 continue;
268
269 switch (blk_try_merge(rq, bio)) {
270 case ELEVATOR_BACK_MERGE:
271 if (blk_mq_sched_allow_merge(q, rq, bio))
272 merged = bio_attempt_back_merge(q, rq, bio);
273 break;
274 case ELEVATOR_FRONT_MERGE:
275 if (blk_mq_sched_allow_merge(q, rq, bio))
276 merged = bio_attempt_front_merge(q, rq, bio);
277 break;
278 case ELEVATOR_DISCARD_MERGE:
279 merged = bio_attempt_discard_merge(q, rq, bio);
280 break;
281 default:
282 continue;
283 }
284
Ming Lei9bddeb22017-05-26 19:53:20 +0800285 return merged;
286 }
287
288 return false;
289}
Jens Axboe9c558732018-05-30 15:26:07 +0800290EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge);
291
292/*
293 * Reverse check our software queue for entries that we could potentially
294 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
295 * too much time checking for merges.
296 */
297static bool blk_mq_attempt_merge(struct request_queue *q,
298 struct blk_mq_ctx *ctx, struct bio *bio)
299{
300 lockdep_assert_held(&ctx->lock);
301
302 if (blk_mq_bio_list_merge(q, &ctx->rq_list, bio)) {
303 ctx->rq_merged++;
304 return true;
305 }
306
307 return false;
308}
Ming Lei9bddeb22017-05-26 19:53:20 +0800309
Jens Axboebd166ef2017-01-17 06:03:22 -0700310bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
311{
312 struct elevator_queue *e = q->elevator;
Ming Lei9bddeb22017-05-26 19:53:20 +0800313 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
314 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
315 bool ret = false;
Jens Axboebd166ef2017-01-17 06:03:22 -0700316
Ming Lei9bddeb22017-05-26 19:53:20 +0800317 if (e && e->type->ops.mq.bio_merge) {
Jens Axboebd166ef2017-01-17 06:03:22 -0700318 blk_mq_put_ctx(ctx);
319 return e->type->ops.mq.bio_merge(hctx, bio);
320 }
321
Ming Leib04f50a2018-07-02 17:35:59 +0800322 if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
323 !list_empty_careful(&ctx->rq_list)) {
Ming Lei9bddeb22017-05-26 19:53:20 +0800324 /* default per sw-queue merge */
325 spin_lock(&ctx->lock);
326 ret = blk_mq_attempt_merge(q, ctx, bio);
327 spin_unlock(&ctx->lock);
328 }
329
330 blk_mq_put_ctx(ctx);
331 return ret;
Jens Axboebd166ef2017-01-17 06:03:22 -0700332}
333
334bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
335{
336 return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
337}
338EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
339
340void blk_mq_sched_request_inserted(struct request *rq)
341{
342 trace_block_rq_insert(rq->q, rq);
343}
344EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
345
Omar Sandoval0cacba62017-02-02 15:42:39 -0800346static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
Ming Leia6a252e2017-11-02 23:24:36 +0800347 bool has_sched,
Omar Sandoval0cacba62017-02-02 15:42:39 -0800348 struct request *rq)
Jens Axboebd166ef2017-01-17 06:03:22 -0700349{
Ming Leia6a252e2017-11-02 23:24:36 +0800350 /* dispatch flush rq directly */
351 if (rq->rq_flags & RQF_FLUSH_SEQ) {
352 spin_lock(&hctx->lock);
353 list_add(&rq->queuelist, &hctx->dispatch);
354 spin_unlock(&hctx->lock);
355 return true;
Jens Axboebd166ef2017-01-17 06:03:22 -0700356 }
357
Ming Lei923218f2017-11-02 23:24:38 +0800358 if (has_sched)
Ming Leia6a252e2017-11-02 23:24:36 +0800359 rq->rq_flags |= RQF_SORTED;
Ming Leia6a252e2017-11-02 23:24:36 +0800360
361 return false;
Jens Axboebd166ef2017-01-17 06:03:22 -0700362}
Jens Axboebd166ef2017-01-17 06:03:22 -0700363
Jens Axboebd6737f2017-01-27 01:00:47 -0700364void blk_mq_sched_insert_request(struct request *rq, bool at_head,
Mike Snitzer9e97d292018-01-17 11:25:58 -0500365 bool run_queue, bool async)
Jens Axboebd6737f2017-01-27 01:00:47 -0700366{
367 struct request_queue *q = rq->q;
368 struct elevator_queue *e = q->elevator;
369 struct blk_mq_ctx *ctx = rq->mq_ctx;
370 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
371
Ming Leia6a252e2017-11-02 23:24:36 +0800372 /* flush rq in flush machinery need to be dispatched directly */
373 if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
Ming Lei923218f2017-11-02 23:24:38 +0800374 blk_insert_flush(rq);
375 goto run;
Jens Axboebd6737f2017-01-27 01:00:47 -0700376 }
377
Ming Lei923218f2017-11-02 23:24:38 +0800378 WARN_ON(e && (rq->tag != -1));
379
Ming Leia6a252e2017-11-02 23:24:36 +0800380 if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
Omar Sandoval0cacba62017-02-02 15:42:39 -0800381 goto run;
382
Jens Axboebd6737f2017-01-27 01:00:47 -0700383 if (e && e->type->ops.mq.insert_requests) {
384 LIST_HEAD(list);
385
386 list_add(&rq->queuelist, &list);
387 e->type->ops.mq.insert_requests(hctx, &list, at_head);
388 } else {
389 spin_lock(&ctx->lock);
390 __blk_mq_insert_request(hctx, rq, at_head);
391 spin_unlock(&ctx->lock);
392 }
393
Omar Sandoval0cacba62017-02-02 15:42:39 -0800394run:
Jens Axboebd6737f2017-01-27 01:00:47 -0700395 if (run_queue)
396 blk_mq_run_hw_queue(hctx, async);
397}
398
399void blk_mq_sched_insert_requests(struct request_queue *q,
400 struct blk_mq_ctx *ctx,
401 struct list_head *list, bool run_queue_async)
402{
403 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
404 struct elevator_queue *e = hctx->queue->elevator;
405
406 if (e && e->type->ops.mq.insert_requests)
407 e->type->ops.mq.insert_requests(hctx, list, false);
408 else
409 blk_mq_insert_requests(hctx, ctx, list);
410
411 blk_mq_run_hw_queue(hctx, run_queue_async);
412}
413
Jens Axboebd166ef2017-01-17 06:03:22 -0700414static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
415 struct blk_mq_hw_ctx *hctx,
416 unsigned int hctx_idx)
417{
418 if (hctx->sched_tags) {
419 blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
420 blk_mq_free_rq_map(hctx->sched_tags);
421 hctx->sched_tags = NULL;
422 }
423}
424
Omar Sandoval6917ff02017-04-05 12:01:30 -0700425static int blk_mq_sched_alloc_tags(struct request_queue *q,
426 struct blk_mq_hw_ctx *hctx,
427 unsigned int hctx_idx)
Jens Axboebd166ef2017-01-17 06:03:22 -0700428{
429 struct blk_mq_tag_set *set = q->tag_set;
Omar Sandoval6917ff02017-04-05 12:01:30 -0700430 int ret;
Jens Axboebd166ef2017-01-17 06:03:22 -0700431
Omar Sandoval6917ff02017-04-05 12:01:30 -0700432 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
433 set->reserved_tags);
434 if (!hctx->sched_tags)
435 return -ENOMEM;
Jens Axboebd166ef2017-01-17 06:03:22 -0700436
Omar Sandoval6917ff02017-04-05 12:01:30 -0700437 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
438 if (ret)
439 blk_mq_sched_free_tags(set, hctx, hctx_idx);
Jens Axboebd166ef2017-01-17 06:03:22 -0700440
Omar Sandoval6917ff02017-04-05 12:01:30 -0700441 return ret;
Jens Axboebd166ef2017-01-17 06:03:22 -0700442}
443
Omar Sandoval54d53292017-04-07 08:52:27 -0600444static void blk_mq_sched_tags_teardown(struct request_queue *q)
Jens Axboebd166ef2017-01-17 06:03:22 -0700445{
446 struct blk_mq_tag_set *set = q->tag_set;
447 struct blk_mq_hw_ctx *hctx;
448 int i;
449
450 queue_for_each_hw_ctx(q, hctx, i)
451 blk_mq_sched_free_tags(set, hctx, i);
452}
Jens Axboed3484992017-01-13 14:43:58 -0700453
Omar Sandoval93252632017-04-05 12:01:31 -0700454int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
455 unsigned int hctx_idx)
456{
457 struct elevator_queue *e = q->elevator;
Omar Sandovalee056f92017-04-05 12:01:34 -0700458 int ret;
Omar Sandoval93252632017-04-05 12:01:31 -0700459
460 if (!e)
461 return 0;
462
Omar Sandovalee056f92017-04-05 12:01:34 -0700463 ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
464 if (ret)
465 return ret;
466
467 if (e->type->ops.mq.init_hctx) {
468 ret = e->type->ops.mq.init_hctx(hctx, hctx_idx);
469 if (ret) {
470 blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
471 return ret;
472 }
473 }
474
Omar Sandovald332ce02017-05-04 08:24:40 -0600475 blk_mq_debugfs_register_sched_hctx(q, hctx);
476
Omar Sandovalee056f92017-04-05 12:01:34 -0700477 return 0;
Omar Sandoval93252632017-04-05 12:01:31 -0700478}
479
480void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
481 unsigned int hctx_idx)
482{
483 struct elevator_queue *e = q->elevator;
484
485 if (!e)
486 return;
487
Omar Sandovald332ce02017-05-04 08:24:40 -0600488 blk_mq_debugfs_unregister_sched_hctx(hctx);
489
Omar Sandovalee056f92017-04-05 12:01:34 -0700490 if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
491 e->type->ops.mq.exit_hctx(hctx, hctx_idx);
492 hctx->sched_data = NULL;
493 }
494
Omar Sandoval93252632017-04-05 12:01:31 -0700495 blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
496}
497
Omar Sandoval6917ff02017-04-05 12:01:30 -0700498int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
499{
500 struct blk_mq_hw_ctx *hctx;
Omar Sandovalee056f92017-04-05 12:01:34 -0700501 struct elevator_queue *eq;
Omar Sandoval6917ff02017-04-05 12:01:30 -0700502 unsigned int i;
503 int ret;
504
505 if (!e) {
506 q->elevator = NULL;
Ming Lei32a50fa2018-06-02 15:18:09 +0800507 q->nr_requests = q->tag_set->queue_depth;
Omar Sandoval6917ff02017-04-05 12:01:30 -0700508 return 0;
509 }
510
511 /*
Ming Lei32825c42017-07-03 20:37:14 +0800512 * Default to double of smaller one between hw queue_depth and 128,
513 * since we don't split into sync/async like the old code did.
514 * Additionally, this is a per-hw queue depth.
Omar Sandoval6917ff02017-04-05 12:01:30 -0700515 */
Ming Lei32825c42017-07-03 20:37:14 +0800516 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
517 BLKDEV_MAX_RQ);
Omar Sandoval6917ff02017-04-05 12:01:30 -0700518
519 queue_for_each_hw_ctx(q, hctx, i) {
520 ret = blk_mq_sched_alloc_tags(q, hctx, i);
521 if (ret)
522 goto err;
523 }
524
525 ret = e->ops.mq.init_sched(q, e);
526 if (ret)
527 goto err;
528
Omar Sandovald332ce02017-05-04 08:24:40 -0600529 blk_mq_debugfs_register_sched(q);
530
531 queue_for_each_hw_ctx(q, hctx, i) {
532 if (e->ops.mq.init_hctx) {
Omar Sandovalee056f92017-04-05 12:01:34 -0700533 ret = e->ops.mq.init_hctx(hctx, i);
534 if (ret) {
535 eq = q->elevator;
536 blk_mq_exit_sched(q, eq);
537 kobject_put(&eq->kobj);
538 return ret;
539 }
540 }
Omar Sandovald332ce02017-05-04 08:24:40 -0600541 blk_mq_debugfs_register_sched_hctx(q, hctx);
Omar Sandovalee056f92017-04-05 12:01:34 -0700542 }
543
Omar Sandoval6917ff02017-04-05 12:01:30 -0700544 return 0;
545
546err:
Omar Sandoval54d53292017-04-07 08:52:27 -0600547 blk_mq_sched_tags_teardown(q);
548 q->elevator = NULL;
Omar Sandoval6917ff02017-04-05 12:01:30 -0700549 return ret;
550}
551
Omar Sandoval54d53292017-04-07 08:52:27 -0600552void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
553{
Omar Sandovalee056f92017-04-05 12:01:34 -0700554 struct blk_mq_hw_ctx *hctx;
555 unsigned int i;
556
Omar Sandovald332ce02017-05-04 08:24:40 -0600557 queue_for_each_hw_ctx(q, hctx, i) {
558 blk_mq_debugfs_unregister_sched_hctx(hctx);
559 if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
560 e->type->ops.mq.exit_hctx(hctx, i);
561 hctx->sched_data = NULL;
Omar Sandovalee056f92017-04-05 12:01:34 -0700562 }
563 }
Omar Sandovald332ce02017-05-04 08:24:40 -0600564 blk_mq_debugfs_unregister_sched(q);
Omar Sandoval54d53292017-04-07 08:52:27 -0600565 if (e->type->ops.mq.exit_sched)
566 e->type->ops.mq.exit_sched(e);
567 blk_mq_sched_tags_teardown(q);
568 q->elevator = NULL;
569}