blob: e9da5e6a8526f38bb6b3e581d7661eda4c520a38 [file] [log] [blame]
Jens Axboe75bb4622014-05-28 10:15:41 -06001/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
Jens Axboe320ae512013-10-24 09:20:05 +01007#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/backing-dev.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
Catalin Marinasf75782e2015-09-14 18:16:02 +010012#include <linux/kmemleak.h>
Jens Axboe320ae512013-10-24 09:20:05 +010013#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/workqueue.h>
17#include <linux/smp.h>
18#include <linux/llist.h>
19#include <linux/list_sort.h>
20#include <linux/cpu.h>
21#include <linux/cache.h>
22#include <linux/sched/sysctl.h>
Ingo Molnar105ab3d2017-02-01 16:36:40 +010023#include <linux/sched/topology.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
Jens Axboe320ae512013-10-24 09:20:05 +010025#include <linux/delay.h>
Jens Axboeaedcd722014-09-17 08:27:03 -060026#include <linux/crash_dump.h>
Jens Axboe88c7b2b2016-08-25 08:07:30 -060027#include <linux/prefetch.h>
Jens Axboe320ae512013-10-24 09:20:05 +010028
29#include <trace/events/block.h>
30
31#include <linux/blk-mq.h>
32#include "blk.h"
33#include "blk-mq.h"
Omar Sandoval9c1051a2017-05-04 08:17:21 -060034#include "blk-mq-debugfs.h"
Jens Axboe320ae512013-10-24 09:20:05 +010035#include "blk-mq-tag.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -070036#include "blk-stat.h"
Jens Axboe87760e52016-11-09 12:38:14 -070037#include "blk-wbt.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070038#include "blk-mq-sched.h"
Jens Axboe320ae512013-10-24 09:20:05 +010039
Christoph Hellwigea435e12017-11-02 21:29:54 +030040static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
Omar Sandoval34dbad52017-03-21 08:56:08 -070041static void blk_mq_poll_stats_start(struct request_queue *q);
42static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
43
Stephen Bates720b8cc2017-04-07 06:24:03 -060044static int blk_mq_poll_stats_bkt(const struct request *rq)
45{
46 int ddir, bytes, bucket;
47
Jens Axboe99c749a2017-04-21 07:55:42 -060048 ddir = rq_data_dir(rq);
Stephen Bates720b8cc2017-04-07 06:24:03 -060049 bytes = blk_rq_bytes(rq);
50
51 bucket = ddir + 2*(ilog2(bytes) - 9);
52
53 if (bucket < 0)
54 return -1;
55 else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
56 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
57
58 return bucket;
59}
60
Jens Axboe320ae512013-10-24 09:20:05 +010061/*
62 * Check if any of the ctx's have pending work in this hardware queue
63 */
Jens Axboe79f720a2017-11-10 09:13:21 -070064static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
Jens Axboe320ae512013-10-24 09:20:05 +010065{
Jens Axboe79f720a2017-11-10 09:13:21 -070066 return !list_empty_careful(&hctx->dispatch) ||
67 sbitmap_any_bit_set(&hctx->ctx_map) ||
Jens Axboebd166ef2017-01-17 06:03:22 -070068 blk_mq_sched_has_work(hctx);
Jens Axboe320ae512013-10-24 09:20:05 +010069}
70
71/*
72 * Mark this ctx as having pending work in this hardware queue
73 */
74static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
75 struct blk_mq_ctx *ctx)
76{
Omar Sandoval88459642016-09-17 08:38:44 -060077 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
78 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
Jens Axboe1429d7c2014-05-19 09:23:55 -060079}
80
81static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
82 struct blk_mq_ctx *ctx)
83{
Omar Sandoval88459642016-09-17 08:38:44 -060084 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
Jens Axboe320ae512013-10-24 09:20:05 +010085}
86
Jens Axboef299b7c2017-08-08 17:51:45 -060087struct mq_inflight {
88 struct hd_struct *part;
89 unsigned int *inflight;
90};
91
92static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
93 struct request *rq, void *priv,
94 bool reserved)
95{
96 struct mq_inflight *mi = priv;
97
Omar Sandoval61318372018-04-26 00:21:58 -070098 /*
99 * index[0] counts the specific partition that was asked for. index[1]
100 * counts the ones that are active on the whole device, so increment
101 * that if mi->part is indeed a partition, and not a whole device.
102 */
103 if (rq->part == mi->part)
104 mi->inflight[0]++;
105 if (mi->part->partno)
106 mi->inflight[1]++;
Jens Axboef299b7c2017-08-08 17:51:45 -0600107}
108
109void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
110 unsigned int inflight[2])
111{
112 struct mq_inflight mi = { .part = part, .inflight = inflight, };
113
Jens Axboeb8d62b32017-08-08 17:53:33 -0600114 inflight[0] = inflight[1] = 0;
Jens Axboef299b7c2017-08-08 17:51:45 -0600115 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
116}
117
Omar Sandovalbf0ddab2018-04-26 00:21:59 -0700118static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
119 struct request *rq, void *priv,
120 bool reserved)
121{
122 struct mq_inflight *mi = priv;
123
124 if (rq->part == mi->part)
125 mi->inflight[rq_data_dir(rq)]++;
126}
127
128void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
129 unsigned int inflight[2])
130{
131 struct mq_inflight mi = { .part = part, .inflight = inflight, };
132
133 inflight[0] = inflight[1] = 0;
134 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
135}
136
Ming Lei1671d522017-03-27 20:06:57 +0800137void blk_freeze_queue_start(struct request_queue *q)
Ming Lei43a5e4e2013-12-26 21:31:35 +0800138{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200139 int freeze_depth;
Tejun Heocddd5d12014-08-16 08:02:24 -0400140
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200141 freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
142 if (freeze_depth == 1) {
Dan Williams3ef28e82015-10-21 13:20:12 -0400143 percpu_ref_kill(&q->q_usage_counter);
Ming Lei055f6e12017-11-09 10:49:53 -0800144 if (q->mq_ops)
145 blk_mq_run_hw_queues(q, false);
Tejun Heocddd5d12014-08-16 08:02:24 -0400146 }
Tejun Heof3af0202014-11-04 13:52:27 -0500147}
Ming Lei1671d522017-03-27 20:06:57 +0800148EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
Tejun Heof3af0202014-11-04 13:52:27 -0500149
Keith Busch6bae363e2017-03-01 14:22:10 -0500150void blk_mq_freeze_queue_wait(struct request_queue *q)
Tejun Heof3af0202014-11-04 13:52:27 -0500151{
Dan Williams3ef28e82015-10-21 13:20:12 -0400152 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
Ming Lei43a5e4e2013-12-26 21:31:35 +0800153}
Keith Busch6bae363e2017-03-01 14:22:10 -0500154EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
Ming Lei43a5e4e2013-12-26 21:31:35 +0800155
Keith Buschf91328c2017-03-01 14:22:11 -0500156int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
157 unsigned long timeout)
158{
159 return wait_event_timeout(q->mq_freeze_wq,
160 percpu_ref_is_zero(&q->q_usage_counter),
161 timeout);
162}
163EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
Jens Axboe320ae512013-10-24 09:20:05 +0100164
Tejun Heof3af0202014-11-04 13:52:27 -0500165/*
166 * Guarantee no request is in use, so we can change any data structure of
167 * the queue afterward.
168 */
Dan Williams3ef28e82015-10-21 13:20:12 -0400169void blk_freeze_queue(struct request_queue *q)
Tejun Heof3af0202014-11-04 13:52:27 -0500170{
Dan Williams3ef28e82015-10-21 13:20:12 -0400171 /*
172 * In the !blk_mq case we are only calling this to kill the
173 * q_usage_counter, otherwise this increases the freeze depth
174 * and waits for it to return to zero. For this reason there is
175 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
176 * exported to drivers as the only user for unfreeze is blk_mq.
177 */
Ming Lei1671d522017-03-27 20:06:57 +0800178 blk_freeze_queue_start(q);
Ming Lei454be722017-11-30 07:56:35 +0800179 if (!q->mq_ops)
180 blk_drain_queue(q);
Tejun Heof3af0202014-11-04 13:52:27 -0500181 blk_mq_freeze_queue_wait(q);
182}
Dan Williams3ef28e82015-10-21 13:20:12 -0400183
184void blk_mq_freeze_queue(struct request_queue *q)
185{
186 /*
187 * ...just an alias to keep freeze and unfreeze actions balanced
188 * in the blk_mq_* namespace
189 */
190 blk_freeze_queue(q);
191}
Jens Axboec761d962015-01-02 15:05:12 -0700192EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
Tejun Heof3af0202014-11-04 13:52:27 -0500193
Keith Buschb4c6a022014-12-19 17:54:14 -0700194void blk_mq_unfreeze_queue(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +0100195{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200196 int freeze_depth;
Jens Axboe320ae512013-10-24 09:20:05 +0100197
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200198 freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
199 WARN_ON_ONCE(freeze_depth < 0);
200 if (!freeze_depth) {
Dan Williams3ef28e82015-10-21 13:20:12 -0400201 percpu_ref_reinit(&q->q_usage_counter);
Jens Axboe320ae512013-10-24 09:20:05 +0100202 wake_up_all(&q->mq_freeze_wq);
Tejun Heoadd703f2014-07-01 10:34:38 -0600203 }
Jens Axboe320ae512013-10-24 09:20:05 +0100204}
Keith Buschb4c6a022014-12-19 17:54:14 -0700205EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
Jens Axboe320ae512013-10-24 09:20:05 +0100206
Bart Van Assche852ec802017-06-21 10:55:47 -0700207/*
208 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
209 * mpt3sas driver such that this function can be removed.
210 */
211void blk_mq_quiesce_queue_nowait(struct request_queue *q)
212{
Bart Van Assche8814ce82018-03-07 17:10:04 -0800213 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
Bart Van Assche852ec802017-06-21 10:55:47 -0700214}
215EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
216
Bart Van Assche6a83e742016-11-02 10:09:51 -0600217/**
Ming Lei69e07c42017-06-06 23:22:07 +0800218 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
Bart Van Assche6a83e742016-11-02 10:09:51 -0600219 * @q: request queue.
220 *
221 * Note: this function does not prevent that the struct request end_io()
Ming Lei69e07c42017-06-06 23:22:07 +0800222 * callback function is invoked. Once this function is returned, we make
223 * sure no dispatch can happen until the queue is unquiesced via
224 * blk_mq_unquiesce_queue().
Bart Van Assche6a83e742016-11-02 10:09:51 -0600225 */
226void blk_mq_quiesce_queue(struct request_queue *q)
227{
228 struct blk_mq_hw_ctx *hctx;
229 unsigned int i;
230 bool rcu = false;
231
Ming Lei1d9e9bc2017-06-06 23:22:08 +0800232 blk_mq_quiesce_queue_nowait(q);
Ming Leif4560ff2017-06-18 14:24:27 -0600233
Bart Van Assche6a83e742016-11-02 10:09:51 -0600234 queue_for_each_hw_ctx(q, hctx, i) {
235 if (hctx->flags & BLK_MQ_F_BLOCKING)
Tejun Heo05707b62018-01-09 08:29:53 -0800236 synchronize_srcu(hctx->srcu);
Bart Van Assche6a83e742016-11-02 10:09:51 -0600237 else
238 rcu = true;
239 }
240 if (rcu)
241 synchronize_rcu();
242}
243EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
244
Ming Leie4e73912017-06-06 23:22:03 +0800245/*
246 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
247 * @q: request queue.
248 *
249 * This function recovers queue into the state before quiescing
250 * which is done by blk_mq_quiesce_queue.
251 */
252void blk_mq_unquiesce_queue(struct request_queue *q)
253{
Bart Van Assche8814ce82018-03-07 17:10:04 -0800254 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
Ming Leif4560ff2017-06-18 14:24:27 -0600255
Ming Lei1d9e9bc2017-06-06 23:22:08 +0800256 /* dispatch requests which are inserted during quiescing */
257 blk_mq_run_hw_queues(q, true);
Ming Leie4e73912017-06-06 23:22:03 +0800258}
259EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
260
Jens Axboeaed3ea92014-12-22 14:04:42 -0700261void blk_mq_wake_waiters(struct request_queue *q)
262{
263 struct blk_mq_hw_ctx *hctx;
264 unsigned int i;
265
266 queue_for_each_hw_ctx(q, hctx, i)
267 if (blk_mq_hw_queue_mapped(hctx))
268 blk_mq_tag_wakeup_all(hctx->tags, true);
269}
270
Jens Axboe320ae512013-10-24 09:20:05 +0100271bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
272{
273 return blk_mq_has_free_tags(hctx->tags);
274}
275EXPORT_SYMBOL(blk_mq_can_queue);
276
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200277static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
278 unsigned int tag, unsigned int op)
Jens Axboe320ae512013-10-24 09:20:05 +0100279{
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200280 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
281 struct request *rq = tags->static_rqs[tag];
Jens Axboebf9ae8c2018-01-14 10:40:45 -0700282 req_flags_t rq_flags = 0;
Bart Van Asschec3a148d2017-06-20 11:15:43 -0700283
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200284 if (data->flags & BLK_MQ_REQ_INTERNAL) {
285 rq->tag = -1;
286 rq->internal_tag = tag;
287 } else {
288 if (blk_mq_tag_busy(data->hctx)) {
Jens Axboebf9ae8c2018-01-14 10:40:45 -0700289 rq_flags = RQF_MQ_INFLIGHT;
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200290 atomic_inc(&data->hctx->nr_active);
291 }
292 rq->tag = tag;
293 rq->internal_tag = -1;
294 data->hctx->tags->rqs[rq->tag] = rq;
295 }
296
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200297 /* csd/requeue_work/fifo_time is initialized before use */
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200298 rq->q = data->q;
299 rq->mq_ctx = data->ctx;
Jens Axboebf9ae8c2018-01-14 10:40:45 -0700300 rq->rq_flags = rq_flags;
Jens Axboe7c3fb702018-01-10 11:46:39 -0700301 rq->cpu = -1;
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600302 rq->cmd_flags = op;
Bart Van Assche1b6d65a2017-11-09 10:49:55 -0800303 if (data->flags & BLK_MQ_REQ_PREEMPT)
304 rq->rq_flags |= RQF_PREEMPT;
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200305 if (blk_queue_io_stat(data->q))
Christoph Hellwige8064022016-10-20 15:12:13 +0200306 rq->rq_flags |= RQF_IO_STAT;
Jens Axboe7c3fb702018-01-10 11:46:39 -0700307 INIT_LIST_HEAD(&rq->queuelist);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200308 INIT_HLIST_NODE(&rq->hash);
309 RB_CLEAR_NODE(&rq->rb_node);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200310 rq->rq_disk = NULL;
311 rq->part = NULL;
Omar Sandoval522a7772018-05-09 02:08:53 -0700312 rq->start_time_ns = ktime_get_ns();
Omar Sandoval544ccc8d2018-05-09 02:08:50 -0700313 rq->io_start_time_ns = 0;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200314 rq->nr_phys_segments = 0;
315#if defined(CONFIG_BLK_DEV_INTEGRITY)
316 rq->nr_integrity_segments = 0;
317#endif
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200318 rq->special = NULL;
319 /* tag was already set */
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200320 rq->extra_len = 0;
Jens Axboee14575b32018-01-10 11:34:25 -0700321 rq->__deadline = 0;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200322
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200323 INIT_LIST_HEAD(&rq->timeout_list);
Jens Axboef6be4fb2014-06-06 11:03:48 -0600324 rq->timeout = 0;
325
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200326 rq->end_io = NULL;
327 rq->end_io_data = NULL;
328 rq->next_rq = NULL;
329
Jens Axboe7c3fb702018-01-10 11:46:39 -0700330#ifdef CONFIG_BLK_CGROUP
331 rq->rl = NULL;
Jens Axboe7c3fb702018-01-10 11:46:39 -0700332#endif
333
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200334 data->ctx->rq_dispatched[op_is_sync(op)]++;
Keith Busch12f5b932018-05-29 15:52:28 +0200335 refcount_set(&rq->ref, 1);
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200336 return rq;
Jens Axboe320ae512013-10-24 09:20:05 +0100337}
338
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200339static struct request *blk_mq_get_request(struct request_queue *q,
340 struct bio *bio, unsigned int op,
341 struct blk_mq_alloc_data *data)
342{
343 struct elevator_queue *e = q->elevator;
344 struct request *rq;
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200345 unsigned int tag;
Bart Van Assche21e768b2017-10-16 16:32:26 -0700346 bool put_ctx_on_error = false;
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200347
348 blk_queue_enter_live(q);
349 data->q = q;
Bart Van Assche21e768b2017-10-16 16:32:26 -0700350 if (likely(!data->ctx)) {
351 data->ctx = blk_mq_get_ctx(q);
352 put_ctx_on_error = true;
353 }
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200354 if (likely(!data->hctx))
355 data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -0500356 if (op & REQ_NOWAIT)
357 data->flags |= BLK_MQ_REQ_NOWAIT;
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200358
359 if (e) {
360 data->flags |= BLK_MQ_REQ_INTERNAL;
361
362 /*
363 * Flush requests are special and go directly to the
Jens Axboe17a51192018-05-09 13:28:50 -0600364 * dispatch list. Don't include reserved tags in the
365 * limiting, as it isn't useful.
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200366 */
Jens Axboe17a51192018-05-09 13:28:50 -0600367 if (!op_is_flush(op) && e->type->ops.mq.limit_depth &&
368 !(data->flags & BLK_MQ_REQ_RESERVED))
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200369 e->type->ops.mq.limit_depth(op, data);
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200370 }
371
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200372 tag = blk_mq_get_tag(data);
373 if (tag == BLK_MQ_TAG_FAIL) {
Bart Van Assche21e768b2017-10-16 16:32:26 -0700374 if (put_ctx_on_error) {
375 blk_mq_put_ctx(data->ctx);
Ming Lei1ad43c02017-08-02 08:01:45 +0800376 data->ctx = NULL;
377 }
Christoph Hellwig037cebb2017-06-16 18:15:23 +0200378 blk_queue_exit(q);
379 return NULL;
380 }
381
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200382 rq = blk_mq_rq_ctx_init(data, tag, op);
Christoph Hellwig037cebb2017-06-16 18:15:23 +0200383 if (!op_is_flush(op)) {
384 rq->elv.icq = NULL;
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200385 if (e && e->type->ops.mq.prepare_request) {
Christoph Hellwig44e8c2b2017-06-16 18:15:25 +0200386 if (e->type->icq_cache && rq_ioc(bio))
387 blk_mq_sched_assign_ioc(rq, bio);
388
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200389 e->type->ops.mq.prepare_request(rq, bio);
390 rq->rq_flags |= RQF_ELVPRIV;
Christoph Hellwig44e8c2b2017-06-16 18:15:25 +0200391 }
Christoph Hellwig037cebb2017-06-16 18:15:23 +0200392 }
393 data->hctx->queued++;
394 return rq;
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200395}
396
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700397struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800398 blk_mq_req_flags_t flags)
Jens Axboe320ae512013-10-24 09:20:05 +0100399{
Jens Axboe5a797e02017-01-26 12:22:11 -0700400 struct blk_mq_alloc_data alloc_data = { .flags = flags };
Jens Axboebd166ef2017-01-17 06:03:22 -0700401 struct request *rq;
Joe Lawrencea492f072014-08-28 08:15:21 -0600402 int ret;
Jens Axboe320ae512013-10-24 09:20:05 +0100403
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800404 ret = blk_queue_enter(q, flags);
Joe Lawrencea492f072014-08-28 08:15:21 -0600405 if (ret)
406 return ERR_PTR(ret);
Jens Axboe320ae512013-10-24 09:20:05 +0100407
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700408 rq = blk_mq_get_request(q, NULL, op, &alloc_data);
Keith Busch3280d662017-08-14 16:40:11 -0400409 blk_queue_exit(q);
Jens Axboe841bac22016-09-21 10:08:43 -0600410
Jens Axboebd166ef2017-01-17 06:03:22 -0700411 if (!rq)
Joe Lawrencea492f072014-08-28 08:15:21 -0600412 return ERR_PTR(-EWOULDBLOCK);
Christoph Hellwig0c4de0f2016-07-19 11:31:50 +0200413
Ming Lei1ad43c02017-08-02 08:01:45 +0800414 blk_mq_put_ctx(alloc_data.ctx);
Ming Lei1ad43c02017-08-02 08:01:45 +0800415
Christoph Hellwig0c4de0f2016-07-19 11:31:50 +0200416 rq->__data_len = 0;
417 rq->__sector = (sector_t) -1;
418 rq->bio = rq->biotail = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +0100419 return rq;
420}
Jens Axboe4bb659b2014-05-09 09:36:49 -0600421EXPORT_SYMBOL(blk_mq_alloc_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100422
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700423struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800424 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
Ming Lin1f5bd332016-06-13 16:45:21 +0200425{
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800426 struct blk_mq_alloc_data alloc_data = { .flags = flags };
Ming Lin1f5bd332016-06-13 16:45:21 +0200427 struct request *rq;
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800428 unsigned int cpu;
Ming Lin1f5bd332016-06-13 16:45:21 +0200429 int ret;
430
431 /*
432 * If the tag allocator sleeps we could get an allocation for a
433 * different hardware context. No need to complicate the low level
434 * allocator for this for the rare use case of a command tied to
435 * a specific queue.
436 */
437 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
438 return ERR_PTR(-EINVAL);
439
440 if (hctx_idx >= q->nr_hw_queues)
441 return ERR_PTR(-EIO);
442
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800443 ret = blk_queue_enter(q, flags);
Ming Lin1f5bd332016-06-13 16:45:21 +0200444 if (ret)
445 return ERR_PTR(ret);
446
Christoph Hellwigc8712c62016-09-23 10:25:48 -0600447 /*
448 * Check if the hardware context is actually mapped to anything.
449 * If not tell the caller that it should skip this queue.
450 */
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800451 alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
452 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
453 blk_queue_exit(q);
454 return ERR_PTR(-EXDEV);
Christoph Hellwigc8712c62016-09-23 10:25:48 -0600455 }
Christoph Hellwig20e4d8132018-01-12 10:53:06 +0800456 cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800457 alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
Ming Lin1f5bd332016-06-13 16:45:21 +0200458
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700459 rq = blk_mq_get_request(q, NULL, op, &alloc_data);
Keith Busch3280d662017-08-14 16:40:11 -0400460 blk_queue_exit(q);
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800461
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800462 if (!rq)
463 return ERR_PTR(-EWOULDBLOCK);
Ming Lin1f5bd332016-06-13 16:45:21 +0200464
465 return rq;
466}
467EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
468
Keith Busch12f5b932018-05-29 15:52:28 +0200469static void __blk_mq_free_request(struct request *rq)
470{
471 struct request_queue *q = rq->q;
472 struct blk_mq_ctx *ctx = rq->mq_ctx;
473 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
474 const int sched_tag = rq->internal_tag;
475
476 if (rq->tag != -1)
477 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
478 if (sched_tag != -1)
479 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
480 blk_mq_sched_restart(hctx);
481 blk_queue_exit(q);
482}
483
Christoph Hellwig6af54052017-06-16 18:15:22 +0200484void blk_mq_free_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100485{
Jens Axboe320ae512013-10-24 09:20:05 +0100486 struct request_queue *q = rq->q;
Christoph Hellwig6af54052017-06-16 18:15:22 +0200487 struct elevator_queue *e = q->elevator;
488 struct blk_mq_ctx *ctx = rq->mq_ctx;
489 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
Jens Axboe320ae512013-10-24 09:20:05 +0100490
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200491 if (rq->rq_flags & RQF_ELVPRIV) {
Christoph Hellwig6af54052017-06-16 18:15:22 +0200492 if (e && e->type->ops.mq.finish_request)
493 e->type->ops.mq.finish_request(rq);
494 if (rq->elv.icq) {
495 put_io_context(rq->elv.icq->ioc);
496 rq->elv.icq = NULL;
497 }
498 }
499
500 ctx->rq_completed[rq_is_sync(rq)]++;
Christoph Hellwige8064022016-10-20 15:12:13 +0200501 if (rq->rq_flags & RQF_MQ_INFLIGHT)
Jens Axboe0d2602c2014-05-13 15:10:52 -0600502 atomic_dec(&hctx->nr_active);
Jens Axboe87760e52016-11-09 12:38:14 -0700503
Jens Axboe7beb2f82017-09-30 02:08:24 -0600504 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
505 laptop_io_completion(q->backing_dev_info);
506
Omar Sandovala8a45942018-05-09 02:08:48 -0700507 wbt_done(q->rq_wb, rq);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600508
Shaohua Li85acb3b2017-10-06 17:56:00 -0700509 if (blk_rq_rl(rq))
510 blk_put_rl(blk_rq_rl(rq));
511
Keith Busch12f5b932018-05-29 15:52:28 +0200512 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
513 if (refcount_dec_and_test(&rq->ref))
514 __blk_mq_free_request(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100515}
Jens Axboe1a3b5952014-11-17 10:40:48 -0700516EXPORT_SYMBOL_GPL(blk_mq_free_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100517
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200518inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
Jens Axboe320ae512013-10-24 09:20:05 +0100519{
Omar Sandoval522a7772018-05-09 02:08:53 -0700520 u64 now = ktime_get_ns();
521
Omar Sandoval4bc63392018-05-09 02:08:52 -0700522 if (rq->rq_flags & RQF_STATS) {
523 blk_mq_poll_stats_start(rq->q);
Omar Sandoval522a7772018-05-09 02:08:53 -0700524 blk_stat_add(rq, now);
Omar Sandoval4bc63392018-05-09 02:08:52 -0700525 }
526
Omar Sandoval522a7772018-05-09 02:08:53 -0700527 blk_account_io_done(rq, now);
Ming Lei0d11e6a2013-12-05 10:50:39 -0700528
Christoph Hellwig91b63632014-04-16 09:44:53 +0200529 if (rq->end_io) {
Omar Sandovala8a45942018-05-09 02:08:48 -0700530 wbt_done(rq->q->rq_wb, rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100531 rq->end_io(rq, error);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200532 } else {
533 if (unlikely(blk_bidi_rq(rq)))
534 blk_mq_free_request(rq->next_rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100535 blk_mq_free_request(rq);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200536 }
Jens Axboe320ae512013-10-24 09:20:05 +0100537}
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700538EXPORT_SYMBOL(__blk_mq_end_request);
Christoph Hellwig63151a42014-04-16 09:44:52 +0200539
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200540void blk_mq_end_request(struct request *rq, blk_status_t error)
Christoph Hellwig63151a42014-04-16 09:44:52 +0200541{
542 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
543 BUG();
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700544 __blk_mq_end_request(rq, error);
Christoph Hellwig63151a42014-04-16 09:44:52 +0200545}
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700546EXPORT_SYMBOL(blk_mq_end_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100547
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800548static void __blk_mq_complete_request_remote(void *data)
Jens Axboe320ae512013-10-24 09:20:05 +0100549{
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800550 struct request *rq = data;
Jens Axboe320ae512013-10-24 09:20:05 +0100551
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800552 rq->q->softirq_done_fn(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100553}
554
Christoph Hellwig453f8342017-04-20 16:03:10 +0200555static void __blk_mq_complete_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100556{
557 struct blk_mq_ctx *ctx = rq->mq_ctx;
Christoph Hellwig38535202014-04-25 02:32:53 -0700558 bool shared = false;
Jens Axboe320ae512013-10-24 09:20:05 +0100559 int cpu;
560
Keith Busch12f5b932018-05-29 15:52:28 +0200561 if (cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) !=
562 MQ_RQ_IN_FLIGHT)
563 return;
Tejun Heo1d9bd512018-01-09 08:29:48 -0800564
Christoph Hellwig453f8342017-04-20 16:03:10 +0200565 if (rq->internal_tag != -1)
566 blk_mq_sched_completed_request(rq);
Christoph Hellwig453f8342017-04-20 16:03:10 +0200567
Christoph Hellwig38535202014-04-25 02:32:53 -0700568 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800569 rq->q->softirq_done_fn(rq);
570 return;
571 }
Jens Axboe320ae512013-10-24 09:20:05 +0100572
573 cpu = get_cpu();
Christoph Hellwig38535202014-04-25 02:32:53 -0700574 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
575 shared = cpus_share_cache(cpu, ctx->cpu);
576
577 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800578 rq->csd.func = __blk_mq_complete_request_remote;
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800579 rq->csd.info = rq;
580 rq->csd.flags = 0;
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +0100581 smp_call_function_single_async(ctx->cpu, &rq->csd);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800582 } else {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800583 rq->q->softirq_done_fn(rq);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800584 }
Jens Axboe320ae512013-10-24 09:20:05 +0100585 put_cpu();
586}
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800587
Jens Axboe04ced152018-01-09 08:29:46 -0800588static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
Bart Van Asscheb7435db2018-01-10 11:34:27 -0800589 __releases(hctx->srcu)
Jens Axboe04ced152018-01-09 08:29:46 -0800590{
591 if (!(hctx->flags & BLK_MQ_F_BLOCKING))
592 rcu_read_unlock();
593 else
Tejun Heo05707b62018-01-09 08:29:53 -0800594 srcu_read_unlock(hctx->srcu, srcu_idx);
Jens Axboe04ced152018-01-09 08:29:46 -0800595}
596
597static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
Bart Van Asscheb7435db2018-01-10 11:34:27 -0800598 __acquires(hctx->srcu)
Jens Axboe04ced152018-01-09 08:29:46 -0800599{
Jens Axboe08b5a6e2018-01-09 09:32:25 -0700600 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
601 /* shut up gcc false positive */
602 *srcu_idx = 0;
Jens Axboe04ced152018-01-09 08:29:46 -0800603 rcu_read_lock();
Jens Axboe08b5a6e2018-01-09 09:32:25 -0700604 } else
Tejun Heo05707b62018-01-09 08:29:53 -0800605 *srcu_idx = srcu_read_lock(hctx->srcu);
Jens Axboe04ced152018-01-09 08:29:46 -0800606}
607
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800608/**
609 * blk_mq_complete_request - end I/O on a request
610 * @rq: the request being processed
611 *
612 * Description:
613 * Ends all I/O on a request. It does not handle partial completions.
614 * The actual completion happens out-of-order, through a IPI handler.
615 **/
Christoph Hellwig08e00292017-04-20 16:03:09 +0200616void blk_mq_complete_request(struct request *rq)
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800617{
Keith Busch12f5b932018-05-29 15:52:28 +0200618 if (unlikely(blk_should_fake_timeout(rq->q)))
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800619 return;
Keith Busch12f5b932018-05-29 15:52:28 +0200620 __blk_mq_complete_request(rq);
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800621}
622EXPORT_SYMBOL(blk_mq_complete_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100623
Keith Busch973c0192015-01-07 18:55:43 -0700624int blk_mq_request_started(struct request *rq)
625{
Tejun Heo5a61c362018-01-09 08:29:52 -0800626 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
Keith Busch973c0192015-01-07 18:55:43 -0700627}
628EXPORT_SYMBOL_GPL(blk_mq_request_started);
629
Christoph Hellwige2490072014-09-13 16:40:09 -0700630void blk_mq_start_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100631{
632 struct request_queue *q = rq->q;
633
Jens Axboebd166ef2017-01-17 06:03:22 -0700634 blk_mq_sched_started_request(rq);
635
Jens Axboe320ae512013-10-24 09:20:05 +0100636 trace_block_rq_issue(q, rq);
637
Jens Axboecf43e6b2016-11-07 21:32:37 -0700638 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
Omar Sandoval544ccc8d2018-05-09 02:08:50 -0700639 rq->io_start_time_ns = ktime_get_ns();
640#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
641 rq->throtl_size = blk_rq_sectors(rq);
642#endif
Jens Axboecf43e6b2016-11-07 21:32:37 -0700643 rq->rq_flags |= RQF_STATS;
Omar Sandovala8a45942018-05-09 02:08:48 -0700644 wbt_issue(q->rq_wb, rq);
Jens Axboecf43e6b2016-11-07 21:32:37 -0700645 }
646
Tejun Heo1d9bd512018-01-09 08:29:48 -0800647 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
Jens Axboe538b7532014-09-16 10:37:37 -0600648
Tejun Heo1d9bd512018-01-09 08:29:48 -0800649 blk_add_timer(rq);
Keith Busch12f5b932018-05-29 15:52:28 +0200650 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800651
652 if (q->dma_drain_size && blk_rq_bytes(rq)) {
653 /*
654 * Make sure space for the drain appears. We know we can do
655 * this because max_hw_segments has been adjusted to be one
656 * fewer than the device can handle.
657 */
658 rq->nr_phys_segments++;
659 }
Jens Axboe320ae512013-10-24 09:20:05 +0100660}
Christoph Hellwige2490072014-09-13 16:40:09 -0700661EXPORT_SYMBOL(blk_mq_start_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100662
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200663static void __blk_mq_requeue_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100664{
665 struct request_queue *q = rq->q;
666
Ming Lei923218f2017-11-02 23:24:38 +0800667 blk_mq_put_driver_tag(rq);
668
Jens Axboe320ae512013-10-24 09:20:05 +0100669 trace_block_rq_requeue(q, rq);
Omar Sandovala8a45942018-05-09 02:08:48 -0700670 wbt_requeue(q->rq_wb, rq);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800671
Keith Busch12f5b932018-05-29 15:52:28 +0200672 if (blk_mq_request_started(rq)) {
673 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
Christoph Hellwige2490072014-09-13 16:40:09 -0700674 if (q->dma_drain_size && blk_rq_bytes(rq))
675 rq->nr_phys_segments--;
676 }
Jens Axboe320ae512013-10-24 09:20:05 +0100677}
678
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700679void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200680{
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200681 __blk_mq_requeue_request(rq);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200682
Ming Lei105976f2018-02-23 23:36:56 +0800683 /* this request will be re-inserted to io scheduler queue */
684 blk_mq_sched_requeue_request(rq);
685
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200686 BUG_ON(blk_queued_rq(rq));
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700687 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200688}
689EXPORT_SYMBOL(blk_mq_requeue_request);
690
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600691static void blk_mq_requeue_work(struct work_struct *work)
692{
693 struct request_queue *q =
Mike Snitzer28494502016-09-14 13:28:30 -0400694 container_of(work, struct request_queue, requeue_work.work);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600695 LIST_HEAD(rq_list);
696 struct request *rq, *next;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600697
Jens Axboe18e97812017-07-27 08:03:57 -0600698 spin_lock_irq(&q->requeue_lock);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600699 list_splice_init(&q->requeue_list, &rq_list);
Jens Axboe18e97812017-07-27 08:03:57 -0600700 spin_unlock_irq(&q->requeue_lock);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600701
702 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
Christoph Hellwige8064022016-10-20 15:12:13 +0200703 if (!(rq->rq_flags & RQF_SOFTBARRIER))
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600704 continue;
705
Christoph Hellwige8064022016-10-20 15:12:13 +0200706 rq->rq_flags &= ~RQF_SOFTBARRIER;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600707 list_del_init(&rq->queuelist);
Mike Snitzer9e97d292018-01-17 11:25:58 -0500708 blk_mq_sched_insert_request(rq, true, false, false);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600709 }
710
711 while (!list_empty(&rq_list)) {
712 rq = list_entry(rq_list.next, struct request, queuelist);
713 list_del_init(&rq->queuelist);
Mike Snitzer9e97d292018-01-17 11:25:58 -0500714 blk_mq_sched_insert_request(rq, false, false, false);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600715 }
716
Bart Van Assche52d7f1b2016-10-28 17:20:32 -0700717 blk_mq_run_hw_queues(q, false);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600718}
719
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700720void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
721 bool kick_requeue_list)
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600722{
723 struct request_queue *q = rq->q;
724 unsigned long flags;
725
726 /*
727 * We abuse this flag that is otherwise used by the I/O scheduler to
Jens Axboeff821d22017-11-10 22:05:12 -0700728 * request head insertion from the workqueue.
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600729 */
Christoph Hellwige8064022016-10-20 15:12:13 +0200730 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600731
732 spin_lock_irqsave(&q->requeue_lock, flags);
733 if (at_head) {
Christoph Hellwige8064022016-10-20 15:12:13 +0200734 rq->rq_flags |= RQF_SOFTBARRIER;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600735 list_add(&rq->queuelist, &q->requeue_list);
736 } else {
737 list_add_tail(&rq->queuelist, &q->requeue_list);
738 }
739 spin_unlock_irqrestore(&q->requeue_lock, flags);
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700740
741 if (kick_requeue_list)
742 blk_mq_kick_requeue_list(q);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600743}
744EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
745
746void blk_mq_kick_requeue_list(struct request_queue *q)
747{
Bart Van Asscheae943d22018-01-19 08:58:55 -0800748 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600749}
750EXPORT_SYMBOL(blk_mq_kick_requeue_list);
751
Mike Snitzer28494502016-09-14 13:28:30 -0400752void blk_mq_delay_kick_requeue_list(struct request_queue *q,
753 unsigned long msecs)
754{
Bart Van Assched4acf362017-08-09 11:28:06 -0700755 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
756 msecs_to_jiffies(msecs));
Mike Snitzer28494502016-09-14 13:28:30 -0400757}
758EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
759
Jens Axboe0e62f512014-06-04 10:23:49 -0600760struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
761{
Jens Axboe88c7b2b2016-08-25 08:07:30 -0600762 if (tag < tags->nr_tags) {
763 prefetch(tags->rqs[tag]);
Hannes Reinecke4ee86ba2016-03-15 12:03:28 -0700764 return tags->rqs[tag];
Jens Axboe88c7b2b2016-08-25 08:07:30 -0600765 }
Hannes Reinecke4ee86ba2016-03-15 12:03:28 -0700766
767 return NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600768}
769EXPORT_SYMBOL(blk_mq_tag_to_rq);
770
Tejun Heo358f70d2018-01-09 08:29:50 -0800771static void blk_mq_rq_timed_out(struct request *req, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100772{
Christoph Hellwigd1210d52018-05-29 15:52:39 +0200773 if (req->q->mq_ops->timeout) {
774 enum blk_eh_timer_return ret;
Jens Axboe87ee7b12014-04-24 08:51:47 -0600775
Christoph Hellwigd1210d52018-05-29 15:52:39 +0200776 ret = req->q->mq_ops->timeout(req, reserved);
777 if (ret == BLK_EH_DONE)
778 return;
779 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700780 }
Christoph Hellwigd1210d52018-05-29 15:52:39 +0200781
782 blk_add_timer(req);
Jens Axboe87ee7b12014-04-24 08:51:47 -0600783}
Keith Busch5b3f25f2015-01-07 18:55:46 -0700784
Keith Busch12f5b932018-05-29 15:52:28 +0200785static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
786{
787 unsigned long deadline;
788
789 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
790 return false;
791
792 deadline = blk_rq_deadline(rq);
793 if (time_after_eq(jiffies, deadline))
794 return true;
795
796 if (*next == 0)
797 *next = deadline;
798 else if (time_after(*next, deadline))
799 *next = deadline;
800 return false;
801}
802
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700803static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
804 struct request *rq, void *priv, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100805{
Keith Busch12f5b932018-05-29 15:52:28 +0200806 unsigned long *next = priv;
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700807
Keith Busch12f5b932018-05-29 15:52:28 +0200808 /*
809 * Just do a quick check if it is expired before locking the request in
810 * so we're not unnecessarilly synchronizing across CPUs.
811 */
812 if (!blk_mq_req_expired(rq, next))
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700813 return;
Jens Axboe320ae512013-10-24 09:20:05 +0100814
Tejun Heo1d9bd512018-01-09 08:29:48 -0800815 /*
Keith Busch12f5b932018-05-29 15:52:28 +0200816 * We have reason to believe the request may be expired. Take a
817 * reference on the request to lock this request lifetime into its
818 * currently allocated context to prevent it from being reallocated in
819 * the event the completion by-passes this timeout handler.
820 *
821 * If the reference was already released, then the driver beat the
822 * timeout handler to posting a natural completion.
Tejun Heo1d9bd512018-01-09 08:29:48 -0800823 */
Keith Busch12f5b932018-05-29 15:52:28 +0200824 if (!refcount_inc_not_zero(&rq->ref))
825 return;
826
827 /*
828 * The request is now locked and cannot be reallocated underneath the
829 * timeout handler's processing. Re-verify this exact request is truly
830 * expired; if it is not expired, then the request was completed and
831 * reallocated as a new request.
832 */
833 if (blk_mq_req_expired(rq, next))
Tejun Heo1d9bd512018-01-09 08:29:48 -0800834 blk_mq_rq_timed_out(rq, reserved);
Keith Busch12f5b932018-05-29 15:52:28 +0200835 if (refcount_dec_and_test(&rq->ref))
836 __blk_mq_free_request(rq);
Tejun Heo1d9bd512018-01-09 08:29:48 -0800837}
838
Christoph Hellwig287922e2015-10-30 20:57:30 +0800839static void blk_mq_timeout_work(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +0100840{
Christoph Hellwig287922e2015-10-30 20:57:30 +0800841 struct request_queue *q =
842 container_of(work, struct request_queue, timeout_work);
Keith Busch12f5b932018-05-29 15:52:28 +0200843 unsigned long next = 0;
Tejun Heo1d9bd512018-01-09 08:29:48 -0800844 struct blk_mq_hw_ctx *hctx;
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700845 int i;
Jens Axboe320ae512013-10-24 09:20:05 +0100846
Gabriel Krisman Bertazi71f79fb2016-08-01 08:23:39 -0600847 /* A deadlock might occur if a request is stuck requiring a
848 * timeout at the same time a queue freeze is waiting
849 * completion, since the timeout code would not be able to
850 * acquire the queue reference here.
851 *
852 * That's why we don't use blk_queue_enter here; instead, we use
853 * percpu_ref_tryget directly, because we need to be able to
854 * obtain a reference even in the short window between the queue
855 * starting to freeze, by dropping the first reference in
Ming Lei1671d522017-03-27 20:06:57 +0800856 * blk_freeze_queue_start, and the moment the last request is
Gabriel Krisman Bertazi71f79fb2016-08-01 08:23:39 -0600857 * consumed, marked by the instant q_usage_counter reaches
858 * zero.
859 */
860 if (!percpu_ref_tryget(&q->q_usage_counter))
Christoph Hellwig287922e2015-10-30 20:57:30 +0800861 return;
862
Keith Busch12f5b932018-05-29 15:52:28 +0200863 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
Jens Axboe320ae512013-10-24 09:20:05 +0100864
Keith Busch12f5b932018-05-29 15:52:28 +0200865 if (next != 0) {
866 mod_timer(&q->timeout, next);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600867 } else {
Bart Van Asschefcd36c32018-01-10 08:33:33 -0800868 /*
869 * Request timeouts are handled as a forward rolling timer. If
870 * we end up here it means that no requests are pending and
871 * also that no request has been pending for a while. Mark
872 * each hctx as idle.
873 */
Ming Leif054b562015-04-21 10:00:19 +0800874 queue_for_each_hw_ctx(q, hctx, i) {
875 /* the hctx may be unmapped, so check it here */
876 if (blk_mq_hw_queue_mapped(hctx))
877 blk_mq_tag_idle(hctx);
878 }
Jens Axboe0d2602c2014-05-13 15:10:52 -0600879 }
Christoph Hellwig287922e2015-10-30 20:57:30 +0800880 blk_queue_exit(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100881}
882
Omar Sandoval88459642016-09-17 08:38:44 -0600883struct flush_busy_ctx_data {
884 struct blk_mq_hw_ctx *hctx;
885 struct list_head *list;
886};
887
888static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
889{
890 struct flush_busy_ctx_data *flush_data = data;
891 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
892 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
893
Omar Sandoval88459642016-09-17 08:38:44 -0600894 spin_lock(&ctx->lock);
895 list_splice_tail_init(&ctx->rq_list, flush_data->list);
Omar Sandovale9a99a62018-02-27 16:56:42 -0800896 sbitmap_clear_bit(sb, bitnr);
Omar Sandoval88459642016-09-17 08:38:44 -0600897 spin_unlock(&ctx->lock);
898 return true;
899}
900
Jens Axboe320ae512013-10-24 09:20:05 +0100901/*
Jens Axboe1429d7c2014-05-19 09:23:55 -0600902 * Process software queues that have been marked busy, splicing them
903 * to the for-dispatch
904 */
Jens Axboe2c3ad662016-12-14 14:34:47 -0700905void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
Jens Axboe1429d7c2014-05-19 09:23:55 -0600906{
Omar Sandoval88459642016-09-17 08:38:44 -0600907 struct flush_busy_ctx_data data = {
908 .hctx = hctx,
909 .list = list,
910 };
Jens Axboe1429d7c2014-05-19 09:23:55 -0600911
Omar Sandoval88459642016-09-17 08:38:44 -0600912 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600913}
Jens Axboe2c3ad662016-12-14 14:34:47 -0700914EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600915
Ming Leib3476892017-10-14 17:22:30 +0800916struct dispatch_rq_data {
917 struct blk_mq_hw_ctx *hctx;
918 struct request *rq;
919};
920
921static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
922 void *data)
923{
924 struct dispatch_rq_data *dispatch_data = data;
925 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
926 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
927
928 spin_lock(&ctx->lock);
huhaib4f6f382018-05-22 17:39:34 +0800929 if (!list_empty(&ctx->rq_list)) {
Ming Leib3476892017-10-14 17:22:30 +0800930 dispatch_data->rq = list_entry_rq(ctx->rq_list.next);
931 list_del_init(&dispatch_data->rq->queuelist);
932 if (list_empty(&ctx->rq_list))
933 sbitmap_clear_bit(sb, bitnr);
934 }
935 spin_unlock(&ctx->lock);
936
937 return !dispatch_data->rq;
938}
939
940struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
941 struct blk_mq_ctx *start)
942{
943 unsigned off = start ? start->index_hw : 0;
944 struct dispatch_rq_data data = {
945 .hctx = hctx,
946 .rq = NULL,
947 };
948
949 __sbitmap_for_each_set(&hctx->ctx_map, off,
950 dispatch_rq_from_ctx, &data);
951
952 return data.rq;
953}
954
Jens Axboe703fd1c2016-09-16 13:59:14 -0600955static inline unsigned int queued_to_index(unsigned int queued)
956{
957 if (!queued)
958 return 0;
Jens Axboe1429d7c2014-05-19 09:23:55 -0600959
Jens Axboe703fd1c2016-09-16 13:59:14 -0600960 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600961}
962
Jens Axboebd6737f2017-01-27 01:00:47 -0700963bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
964 bool wait)
Jens Axboebd166ef2017-01-17 06:03:22 -0700965{
966 struct blk_mq_alloc_data data = {
967 .q = rq->q,
Jens Axboebd166ef2017-01-17 06:03:22 -0700968 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
969 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
970 };
971
Jens Axboe5feeacd2017-04-20 17:23:13 -0600972 might_sleep_if(wait);
973
Omar Sandoval81380ca2017-04-07 08:56:26 -0600974 if (rq->tag != -1)
975 goto done;
Jens Axboebd166ef2017-01-17 06:03:22 -0700976
Sagi Grimberg415b8062017-02-27 10:04:39 -0700977 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
978 data.flags |= BLK_MQ_REQ_RESERVED;
979
Jens Axboebd166ef2017-01-17 06:03:22 -0700980 rq->tag = blk_mq_get_tag(&data);
981 if (rq->tag >= 0) {
Jens Axboe200e86b2017-01-25 08:11:38 -0700982 if (blk_mq_tag_busy(data.hctx)) {
983 rq->rq_flags |= RQF_MQ_INFLIGHT;
984 atomic_inc(&data.hctx->nr_active);
985 }
Jens Axboebd166ef2017-01-17 06:03:22 -0700986 data.hctx->tags->rqs[rq->tag] = rq;
Jens Axboebd166ef2017-01-17 06:03:22 -0700987 }
988
Omar Sandoval81380ca2017-04-07 08:56:26 -0600989done:
990 if (hctx)
991 *hctx = data.hctx;
992 return rq->tag != -1;
Jens Axboebd166ef2017-01-17 06:03:22 -0700993}
994
Jens Axboeeb619fd2017-11-09 08:32:43 -0700995static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
996 int flags, void *key)
Omar Sandovalda55f2c2017-02-22 10:58:29 -0800997{
998 struct blk_mq_hw_ctx *hctx;
999
1000 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1001
Jens Axboeeb619fd2017-11-09 08:32:43 -07001002 list_del_init(&wait->entry);
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001003 blk_mq_run_hw_queue(hctx, true);
1004 return 1;
1005}
1006
Jens Axboef906a6a2017-11-09 16:10:13 -07001007/*
1008 * Mark us waiting for a tag. For shared tags, this involves hooking us into
Bart Van Asscheee3e4de2018-01-09 10:09:15 -08001009 * the tag wakeups. For non-shared tags, we can simply mark us needing a
1010 * restart. For both cases, take care to check the condition again after
Jens Axboef906a6a2017-11-09 16:10:13 -07001011 * marking us as waiting.
1012 */
1013static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
1014 struct request *rq)
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001015{
Jens Axboeeb619fd2017-11-09 08:32:43 -07001016 struct blk_mq_hw_ctx *this_hctx = *hctx;
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001017 struct sbq_wait_state *ws;
Jens Axboef906a6a2017-11-09 16:10:13 -07001018 wait_queue_entry_t *wait;
1019 bool ret;
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001020
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001021 if (!(this_hctx->flags & BLK_MQ_F_TAG_SHARED)) {
Jens Axboef906a6a2017-11-09 16:10:13 -07001022 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state))
1023 set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state);
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001024
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001025 /*
1026 * It's possible that a tag was freed in the window between the
1027 * allocation failure and adding the hardware queue to the wait
1028 * queue.
1029 *
1030 * Don't clear RESTART here, someone else could have set it.
1031 * At most this will cost an extra queue run.
1032 */
1033 return blk_mq_get_driver_tag(rq, hctx, false);
Jens Axboeeb619fd2017-11-09 08:32:43 -07001034 }
1035
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001036 wait = &this_hctx->dispatch_wait;
1037 if (!list_empty_careful(&wait->entry))
1038 return false;
1039
1040 spin_lock(&this_hctx->lock);
1041 if (!list_empty(&wait->entry)) {
1042 spin_unlock(&this_hctx->lock);
1043 return false;
1044 }
1045
1046 ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx);
1047 add_wait_queue(&ws->wait, wait);
1048
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001049 /*
Jens Axboeeb619fd2017-11-09 08:32:43 -07001050 * It's possible that a tag was freed in the window between the
1051 * allocation failure and adding the hardware queue to the wait
1052 * queue.
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001053 */
Jens Axboef906a6a2017-11-09 16:10:13 -07001054 ret = blk_mq_get_driver_tag(rq, hctx, false);
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001055 if (!ret) {
Jens Axboef906a6a2017-11-09 16:10:13 -07001056 spin_unlock(&this_hctx->lock);
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001057 return false;
Jens Axboef906a6a2017-11-09 16:10:13 -07001058 }
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001059
1060 /*
1061 * We got a tag, remove ourselves from the wait queue to ensure
1062 * someone else gets the wakeup.
1063 */
1064 spin_lock_irq(&ws->wait.lock);
1065 list_del_init(&wait->entry);
1066 spin_unlock_irq(&ws->wait.lock);
1067 spin_unlock(&this_hctx->lock);
1068
1069 return true;
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001070}
1071
Ming Lei86ff7c22018-01-30 22:04:57 -05001072#define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
1073
Ming Leide148292017-10-14 17:22:29 +08001074bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
Jens Axboeeb619fd2017-11-09 08:32:43 -07001075 bool got_budget)
Jens Axboef04c3df2016-12-07 08:41:17 -07001076{
Omar Sandoval81380ca2017-04-07 08:56:26 -06001077 struct blk_mq_hw_ctx *hctx;
Jianchao Wang6d6f167c2017-11-02 23:24:32 +08001078 struct request *rq, *nxt;
Jens Axboeeb619fd2017-11-09 08:32:43 -07001079 bool no_tag = false;
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001080 int errors, queued;
Ming Lei86ff7c22018-01-30 22:04:57 -05001081 blk_status_t ret = BLK_STS_OK;
Jens Axboef04c3df2016-12-07 08:41:17 -07001082
Omar Sandoval81380ca2017-04-07 08:56:26 -06001083 if (list_empty(list))
1084 return false;
1085
Ming Leide148292017-10-14 17:22:29 +08001086 WARN_ON(!list_is_singular(list) && got_budget);
1087
Jens Axboef04c3df2016-12-07 08:41:17 -07001088 /*
Jens Axboef04c3df2016-12-07 08:41:17 -07001089 * Now process all the entries, sending them to the driver.
1090 */
Jens Axboe93efe982017-03-24 12:04:19 -06001091 errors = queued = 0;
Omar Sandoval81380ca2017-04-07 08:56:26 -06001092 do {
Jens Axboef04c3df2016-12-07 08:41:17 -07001093 struct blk_mq_queue_data bd;
1094
1095 rq = list_first_entry(list, struct request, queuelist);
Ming Lei0bca7992018-04-05 00:35:21 +08001096
1097 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
1098 if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
1099 break;
1100
1101 if (!blk_mq_get_driver_tag(rq, NULL, false)) {
Jens Axboe3c782d62017-01-26 12:50:36 -07001102 /*
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001103 * The initial allocation attempt failed, so we need to
Jens Axboeeb619fd2017-11-09 08:32:43 -07001104 * rerun the hardware queue when a tag is freed. The
1105 * waitqueue takes care of that. If the queue is run
1106 * before we add this entry back on the dispatch list,
1107 * we'll re-run it below.
Jens Axboe3c782d62017-01-26 12:50:36 -07001108 */
Jens Axboef906a6a2017-11-09 16:10:13 -07001109 if (!blk_mq_mark_tag_wait(&hctx, rq)) {
Ming Lei0bca7992018-04-05 00:35:21 +08001110 blk_mq_put_dispatch_budget(hctx);
Jens Axboef906a6a2017-11-09 16:10:13 -07001111 /*
1112 * For non-shared tags, the RESTART check
1113 * will suffice.
1114 */
1115 if (hctx->flags & BLK_MQ_F_TAG_SHARED)
1116 no_tag = true;
Omar Sandoval807b1042017-04-05 12:01:35 -07001117 break;
Ming Leide148292017-10-14 17:22:29 +08001118 }
1119 }
1120
Jens Axboef04c3df2016-12-07 08:41:17 -07001121 list_del_init(&rq->queuelist);
1122
1123 bd.rq = rq;
Jens Axboe113285b2017-03-02 13:26:04 -07001124
1125 /*
1126 * Flag last if we have no more requests, or if we have more
1127 * but can't assign a driver tag to it.
1128 */
1129 if (list_empty(list))
1130 bd.last = true;
1131 else {
Jens Axboe113285b2017-03-02 13:26:04 -07001132 nxt = list_first_entry(list, struct request, queuelist);
1133 bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1134 }
Jens Axboef04c3df2016-12-07 08:41:17 -07001135
1136 ret = q->mq_ops->queue_rq(hctx, &bd);
Ming Lei86ff7c22018-01-30 22:04:57 -05001137 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
Jianchao Wang6d6f167c2017-11-02 23:24:32 +08001138 /*
1139 * If an I/O scheduler has been configured and we got a
Jens Axboeff821d22017-11-10 22:05:12 -07001140 * driver tag for the next request already, free it
1141 * again.
Jianchao Wang6d6f167c2017-11-02 23:24:32 +08001142 */
1143 if (!list_empty(list)) {
1144 nxt = list_first_entry(list, struct request, queuelist);
1145 blk_mq_put_driver_tag(nxt);
1146 }
Jens Axboef04c3df2016-12-07 08:41:17 -07001147 list_add(&rq->queuelist, list);
1148 __blk_mq_requeue_request(rq);
1149 break;
Jens Axboef04c3df2016-12-07 08:41:17 -07001150 }
1151
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001152 if (unlikely(ret != BLK_STS_OK)) {
1153 errors++;
1154 blk_mq_end_request(rq, BLK_STS_IOERR);
1155 continue;
1156 }
1157
1158 queued++;
Omar Sandoval81380ca2017-04-07 08:56:26 -06001159 } while (!list_empty(list));
Jens Axboef04c3df2016-12-07 08:41:17 -07001160
1161 hctx->dispatched[queued_to_index(queued)]++;
1162
1163 /*
1164 * Any items that need requeuing? Stuff them into hctx->dispatch,
1165 * that is where we will continue on next queue run.
1166 */
1167 if (!list_empty(list)) {
Ming Lei86ff7c22018-01-30 22:04:57 -05001168 bool needs_restart;
1169
Jens Axboef04c3df2016-12-07 08:41:17 -07001170 spin_lock(&hctx->lock);
Jens Axboec13660a2017-01-26 12:40:07 -07001171 list_splice_init(list, &hctx->dispatch);
Jens Axboef04c3df2016-12-07 08:41:17 -07001172 spin_unlock(&hctx->lock);
1173
1174 /*
Bart Van Assche710c7852017-04-07 11:16:51 -07001175 * If SCHED_RESTART was set by the caller of this function and
1176 * it is no longer set that means that it was cleared by another
1177 * thread and hence that a queue rerun is needed.
Jens Axboef04c3df2016-12-07 08:41:17 -07001178 *
Jens Axboeeb619fd2017-11-09 08:32:43 -07001179 * If 'no_tag' is set, that means that we failed getting
1180 * a driver tag with an I/O scheduler attached. If our dispatch
1181 * waitqueue is no longer active, ensure that we run the queue
1182 * AFTER adding our entries back to the list.
Jens Axboebd166ef2017-01-17 06:03:22 -07001183 *
Bart Van Assche710c7852017-04-07 11:16:51 -07001184 * If no I/O scheduler has been configured it is possible that
1185 * the hardware queue got stopped and restarted before requests
1186 * were pushed back onto the dispatch list. Rerun the queue to
1187 * avoid starvation. Notes:
1188 * - blk_mq_run_hw_queue() checks whether or not a queue has
1189 * been stopped before rerunning a queue.
1190 * - Some but not all block drivers stop a queue before
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001191 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
Bart Van Assche710c7852017-04-07 11:16:51 -07001192 * and dm-rq.
Ming Lei86ff7c22018-01-30 22:04:57 -05001193 *
1194 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
1195 * bit is set, run queue after a delay to avoid IO stalls
1196 * that could otherwise occur if the queue is idle.
Jens Axboebd166ef2017-01-17 06:03:22 -07001197 */
Ming Lei86ff7c22018-01-30 22:04:57 -05001198 needs_restart = blk_mq_sched_needs_restart(hctx);
1199 if (!needs_restart ||
Jens Axboeeb619fd2017-11-09 08:32:43 -07001200 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
Jens Axboebd166ef2017-01-17 06:03:22 -07001201 blk_mq_run_hw_queue(hctx, true);
Ming Lei86ff7c22018-01-30 22:04:57 -05001202 else if (needs_restart && (ret == BLK_STS_RESOURCE))
1203 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
Jens Axboef04c3df2016-12-07 08:41:17 -07001204 }
1205
Jens Axboe93efe982017-03-24 12:04:19 -06001206 return (queued + errors) != 0;
Jens Axboef04c3df2016-12-07 08:41:17 -07001207}
1208
Bart Van Assche6a83e742016-11-02 10:09:51 -06001209static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1210{
1211 int srcu_idx;
1212
Jens Axboeb7a71e62017-08-01 09:28:24 -06001213 /*
1214 * We should be running this queue from one of the CPUs that
1215 * are mapped to it.
Ming Lei7df938f2018-01-18 00:41:52 +08001216 *
1217 * There are at least two related races now between setting
1218 * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
1219 * __blk_mq_run_hw_queue():
1220 *
1221 * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
1222 * but later it becomes online, then this warning is harmless
1223 * at all
1224 *
1225 * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
1226 * but later it becomes offline, then the warning can't be
1227 * triggered, and we depend on blk-mq timeout handler to
1228 * handle dispatched requests to this hctx
Jens Axboeb7a71e62017-08-01 09:28:24 -06001229 */
Ming Lei7df938f2018-01-18 00:41:52 +08001230 if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1231 cpu_online(hctx->next_cpu)) {
1232 printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
1233 raw_smp_processor_id(),
1234 cpumask_empty(hctx->cpumask) ? "inactive": "active");
1235 dump_stack();
1236 }
Bart Van Assche6a83e742016-11-02 10:09:51 -06001237
Jens Axboeb7a71e62017-08-01 09:28:24 -06001238 /*
1239 * We can't run the queue inline with ints disabled. Ensure that
1240 * we catch bad users of this early.
1241 */
1242 WARN_ON_ONCE(in_interrupt());
1243
Jens Axboe04ced152018-01-09 08:29:46 -08001244 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
Jens Axboebf4907c2017-03-30 12:30:39 -06001245
Jens Axboe04ced152018-01-09 08:29:46 -08001246 hctx_lock(hctx, &srcu_idx);
1247 blk_mq_sched_dispatch_requests(hctx);
1248 hctx_unlock(hctx, srcu_idx);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001249}
1250
Ming Leif82ddf12018-04-08 17:48:10 +08001251static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
1252{
1253 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
1254
1255 if (cpu >= nr_cpu_ids)
1256 cpu = cpumask_first(hctx->cpumask);
1257 return cpu;
1258}
1259
Jens Axboe506e9312014-05-07 10:26:44 -06001260/*
1261 * It'd be great if the workqueue API had a way to pass
1262 * in a mask and had some smarts for more clever placement.
1263 * For now we just round-robin here, switching for every
1264 * BLK_MQ_CPU_WORK_BATCH queued items.
1265 */
1266static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1267{
Ming Lei7bed4592018-01-18 00:41:51 +08001268 bool tried = false;
Ming Lei476f8c92018-04-08 17:48:09 +08001269 int next_cpu = hctx->next_cpu;
Ming Lei7bed4592018-01-18 00:41:51 +08001270
Christoph Hellwigb657d7e2014-11-24 09:27:23 +01001271 if (hctx->queue->nr_hw_queues == 1)
1272 return WORK_CPU_UNBOUND;
Jens Axboe506e9312014-05-07 10:26:44 -06001273
1274 if (--hctx->next_cpu_batch <= 0) {
Ming Lei7bed4592018-01-18 00:41:51 +08001275select_cpu:
Ming Lei476f8c92018-04-08 17:48:09 +08001276 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
Christoph Hellwig20e4d8132018-01-12 10:53:06 +08001277 cpu_online_mask);
Jens Axboe506e9312014-05-07 10:26:44 -06001278 if (next_cpu >= nr_cpu_ids)
Ming Leif82ddf12018-04-08 17:48:10 +08001279 next_cpu = blk_mq_first_mapped_cpu(hctx);
Jens Axboe506e9312014-05-07 10:26:44 -06001280 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1281 }
1282
Ming Lei7bed4592018-01-18 00:41:51 +08001283 /*
1284 * Do unbound schedule if we can't find a online CPU for this hctx,
1285 * and it should only happen in the path of handling CPU DEAD.
1286 */
Ming Lei476f8c92018-04-08 17:48:09 +08001287 if (!cpu_online(next_cpu)) {
Ming Lei7bed4592018-01-18 00:41:51 +08001288 if (!tried) {
1289 tried = true;
1290 goto select_cpu;
1291 }
1292
1293 /*
1294 * Make sure to re-select CPU next time once after CPUs
1295 * in hctx->cpumask become online again.
1296 */
Ming Lei476f8c92018-04-08 17:48:09 +08001297 hctx->next_cpu = next_cpu;
Ming Lei7bed4592018-01-18 00:41:51 +08001298 hctx->next_cpu_batch = 1;
1299 return WORK_CPU_UNBOUND;
1300 }
Ming Lei476f8c92018-04-08 17:48:09 +08001301
1302 hctx->next_cpu = next_cpu;
1303 return next_cpu;
Jens Axboe506e9312014-05-07 10:26:44 -06001304}
1305
Bart Van Assche7587a5a2017-04-07 11:16:52 -07001306static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1307 unsigned long msecs)
Jens Axboe320ae512013-10-24 09:20:05 +01001308{
Bart Van Assche5435c022017-06-20 11:15:49 -07001309 if (unlikely(blk_mq_hctx_stopped(hctx)))
Jens Axboe320ae512013-10-24 09:20:05 +01001310 return;
1311
Jens Axboe1b792f22016-09-21 10:12:13 -06001312 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001313 int cpu = get_cpu();
1314 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
Paolo Bonzini398205b2014-11-07 23:03:59 +01001315 __blk_mq_run_hw_queue(hctx);
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001316 put_cpu();
Paolo Bonzini398205b2014-11-07 23:03:59 +01001317 return;
1318 }
Jens Axboee4043dc2014-04-09 10:18:23 -06001319
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001320 put_cpu();
Jens Axboee4043dc2014-04-09 10:18:23 -06001321 }
Paolo Bonzini398205b2014-11-07 23:03:59 +01001322
Bart Van Asscheae943d22018-01-19 08:58:55 -08001323 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
1324 msecs_to_jiffies(msecs));
Bart Van Assche7587a5a2017-04-07 11:16:52 -07001325}
1326
1327void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1328{
1329 __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1330}
1331EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1332
Jens Axboe79f720a2017-11-10 09:13:21 -07001333bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
Bart Van Assche7587a5a2017-04-07 11:16:52 -07001334{
Ming Lei24f5a902018-01-06 16:27:38 +08001335 int srcu_idx;
1336 bool need_run;
1337
1338 /*
1339 * When queue is quiesced, we may be switching io scheduler, or
1340 * updating nr_hw_queues, or other things, and we can't run queue
1341 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
1342 *
1343 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
1344 * quiesced.
1345 */
Jens Axboe04ced152018-01-09 08:29:46 -08001346 hctx_lock(hctx, &srcu_idx);
1347 need_run = !blk_queue_quiesced(hctx->queue) &&
1348 blk_mq_hctx_has_pending(hctx);
1349 hctx_unlock(hctx, srcu_idx);
Ming Lei24f5a902018-01-06 16:27:38 +08001350
1351 if (need_run) {
Jens Axboe79f720a2017-11-10 09:13:21 -07001352 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1353 return true;
1354 }
1355
1356 return false;
Jens Axboe320ae512013-10-24 09:20:05 +01001357}
Omar Sandoval5b727272017-04-14 01:00:00 -07001358EXPORT_SYMBOL(blk_mq_run_hw_queue);
Jens Axboe320ae512013-10-24 09:20:05 +01001359
Mike Snitzerb94ec292015-03-11 23:56:38 -04001360void blk_mq_run_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +01001361{
1362 struct blk_mq_hw_ctx *hctx;
1363 int i;
1364
1365 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe79f720a2017-11-10 09:13:21 -07001366 if (blk_mq_hctx_stopped(hctx))
Jens Axboe320ae512013-10-24 09:20:05 +01001367 continue;
1368
Mike Snitzerb94ec292015-03-11 23:56:38 -04001369 blk_mq_run_hw_queue(hctx, async);
Jens Axboe320ae512013-10-24 09:20:05 +01001370 }
1371}
Mike Snitzerb94ec292015-03-11 23:56:38 -04001372EXPORT_SYMBOL(blk_mq_run_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01001373
Bart Van Asschefd001442016-10-28 17:19:37 -07001374/**
1375 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1376 * @q: request queue.
1377 *
1378 * The caller is responsible for serializing this function against
1379 * blk_mq_{start,stop}_hw_queue().
1380 */
1381bool blk_mq_queue_stopped(struct request_queue *q)
1382{
1383 struct blk_mq_hw_ctx *hctx;
1384 int i;
1385
1386 queue_for_each_hw_ctx(q, hctx, i)
1387 if (blk_mq_hctx_stopped(hctx))
1388 return true;
1389
1390 return false;
1391}
1392EXPORT_SYMBOL(blk_mq_queue_stopped);
1393
Ming Lei39a70c72017-06-06 23:22:09 +08001394/*
1395 * This function is often used for pausing .queue_rq() by driver when
1396 * there isn't enough resource or some conditions aren't satisfied, and
Bart Van Assche4d606212017-08-17 16:23:00 -07001397 * BLK_STS_RESOURCE is usually returned.
Ming Lei39a70c72017-06-06 23:22:09 +08001398 *
1399 * We do not guarantee that dispatch can be drained or blocked
1400 * after blk_mq_stop_hw_queue() returns. Please use
1401 * blk_mq_quiesce_queue() for that requirement.
1402 */
Jens Axboe320ae512013-10-24 09:20:05 +01001403void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1404{
Ming Lei641a9ed2017-06-06 23:22:10 +08001405 cancel_delayed_work(&hctx->run_work);
1406
1407 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboe320ae512013-10-24 09:20:05 +01001408}
1409EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1410
Ming Lei39a70c72017-06-06 23:22:09 +08001411/*
1412 * This function is often used for pausing .queue_rq() by driver when
1413 * there isn't enough resource or some conditions aren't satisfied, and
Bart Van Assche4d606212017-08-17 16:23:00 -07001414 * BLK_STS_RESOURCE is usually returned.
Ming Lei39a70c72017-06-06 23:22:09 +08001415 *
1416 * We do not guarantee that dispatch can be drained or blocked
1417 * after blk_mq_stop_hw_queues() returns. Please use
1418 * blk_mq_quiesce_queue() for that requirement.
1419 */
Jens Axboe2719aa22017-05-03 11:08:14 -06001420void blk_mq_stop_hw_queues(struct request_queue *q)
1421{
Ming Lei641a9ed2017-06-06 23:22:10 +08001422 struct blk_mq_hw_ctx *hctx;
1423 int i;
1424
1425 queue_for_each_hw_ctx(q, hctx, i)
1426 blk_mq_stop_hw_queue(hctx);
Christoph Hellwig280d45f2013-10-25 14:45:58 +01001427}
1428EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1429
Jens Axboe320ae512013-10-24 09:20:05 +01001430void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1431{
1432 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboee4043dc2014-04-09 10:18:23 -06001433
Jens Axboe0ffbce82014-06-25 08:22:34 -06001434 blk_mq_run_hw_queue(hctx, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001435}
1436EXPORT_SYMBOL(blk_mq_start_hw_queue);
1437
Christoph Hellwig2f268552014-04-16 09:44:56 +02001438void blk_mq_start_hw_queues(struct request_queue *q)
1439{
1440 struct blk_mq_hw_ctx *hctx;
1441 int i;
1442
1443 queue_for_each_hw_ctx(q, hctx, i)
1444 blk_mq_start_hw_queue(hctx);
1445}
1446EXPORT_SYMBOL(blk_mq_start_hw_queues);
1447
Jens Axboeae911c52016-12-08 13:19:30 -07001448void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1449{
1450 if (!blk_mq_hctx_stopped(hctx))
1451 return;
1452
1453 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1454 blk_mq_run_hw_queue(hctx, async);
1455}
1456EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1457
Christoph Hellwig1b4a3252014-04-16 09:44:54 +02001458void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +01001459{
1460 struct blk_mq_hw_ctx *hctx;
1461 int i;
1462
Jens Axboeae911c52016-12-08 13:19:30 -07001463 queue_for_each_hw_ctx(q, hctx, i)
1464 blk_mq_start_stopped_hw_queue(hctx, async);
Jens Axboe320ae512013-10-24 09:20:05 +01001465}
1466EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1467
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001468static void blk_mq_run_work_fn(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +01001469{
1470 struct blk_mq_hw_ctx *hctx;
1471
Jens Axboe9f993732017-04-10 09:54:54 -06001472 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
Jens Axboe21c6e932017-04-10 09:54:56 -06001473
1474 /*
Ming Lei15fe8a902018-04-08 17:48:11 +08001475 * If we are stopped, don't run the queue.
Jens Axboe21c6e932017-04-10 09:54:56 -06001476 */
Ming Lei15fe8a902018-04-08 17:48:11 +08001477 if (test_bit(BLK_MQ_S_STOPPED, &hctx->state))
Jianchao Wang0196d6b2018-06-04 17:03:55 +08001478 return;
Jens Axboee4043dc2014-04-09 10:18:23 -06001479
Jens Axboe320ae512013-10-24 09:20:05 +01001480 __blk_mq_run_hw_queue(hctx);
1481}
1482
Ming Leicfd0c552015-10-20 23:13:57 +08001483static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
Ming Leicfd0c552015-10-20 23:13:57 +08001484 struct request *rq,
1485 bool at_head)
Jens Axboe320ae512013-10-24 09:20:05 +01001486{
Jens Axboee57690f2016-08-24 15:34:35 -06001487 struct blk_mq_ctx *ctx = rq->mq_ctx;
1488
Bart Van Assche7b607812017-06-20 11:15:47 -07001489 lockdep_assert_held(&ctx->lock);
1490
Jens Axboe01b983c2013-11-19 18:59:10 -07001491 trace_block_rq_insert(hctx->queue, rq);
1492
Christoph Hellwig72a0a362014-02-07 10:22:36 -08001493 if (at_head)
1494 list_add(&rq->queuelist, &ctx->rq_list);
1495 else
1496 list_add_tail(&rq->queuelist, &ctx->rq_list);
Ming Leicfd0c552015-10-20 23:13:57 +08001497}
Jens Axboe4bb659b2014-05-09 09:36:49 -06001498
Jens Axboe2c3ad662016-12-14 14:34:47 -07001499void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1500 bool at_head)
Ming Leicfd0c552015-10-20 23:13:57 +08001501{
1502 struct blk_mq_ctx *ctx = rq->mq_ctx;
1503
Bart Van Assche7b607812017-06-20 11:15:47 -07001504 lockdep_assert_held(&ctx->lock);
1505
Jens Axboee57690f2016-08-24 15:34:35 -06001506 __blk_mq_insert_req_list(hctx, rq, at_head);
Jens Axboe320ae512013-10-24 09:20:05 +01001507 blk_mq_hctx_mark_pending(hctx, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001508}
1509
Jens Axboe157f3772017-09-11 16:43:57 -06001510/*
1511 * Should only be used carefully, when the caller knows we want to
1512 * bypass a potential IO scheduler on the target device.
1513 */
Ming Leib0850292017-11-02 23:24:34 +08001514void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
Jens Axboe157f3772017-09-11 16:43:57 -06001515{
1516 struct blk_mq_ctx *ctx = rq->mq_ctx;
1517 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
1518
1519 spin_lock(&hctx->lock);
1520 list_add_tail(&rq->queuelist, &hctx->dispatch);
1521 spin_unlock(&hctx->lock);
1522
Ming Leib0850292017-11-02 23:24:34 +08001523 if (run_queue)
1524 blk_mq_run_hw_queue(hctx, false);
Jens Axboe157f3772017-09-11 16:43:57 -06001525}
1526
Jens Axboebd166ef2017-01-17 06:03:22 -07001527void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1528 struct list_head *list)
Jens Axboe320ae512013-10-24 09:20:05 +01001529
1530{
Jens Axboe320ae512013-10-24 09:20:05 +01001531 /*
1532 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1533 * offline now
1534 */
1535 spin_lock(&ctx->lock);
1536 while (!list_empty(list)) {
1537 struct request *rq;
1538
1539 rq = list_first_entry(list, struct request, queuelist);
Jens Axboee57690f2016-08-24 15:34:35 -06001540 BUG_ON(rq->mq_ctx != ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001541 list_del_init(&rq->queuelist);
Jens Axboee57690f2016-08-24 15:34:35 -06001542 __blk_mq_insert_req_list(hctx, rq, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001543 }
Ming Leicfd0c552015-10-20 23:13:57 +08001544 blk_mq_hctx_mark_pending(hctx, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001545 spin_unlock(&ctx->lock);
Jens Axboe320ae512013-10-24 09:20:05 +01001546}
1547
1548static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1549{
1550 struct request *rqa = container_of(a, struct request, queuelist);
1551 struct request *rqb = container_of(b, struct request, queuelist);
1552
1553 return !(rqa->mq_ctx < rqb->mq_ctx ||
1554 (rqa->mq_ctx == rqb->mq_ctx &&
1555 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1556}
1557
1558void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1559{
1560 struct blk_mq_ctx *this_ctx;
1561 struct request_queue *this_q;
1562 struct request *rq;
1563 LIST_HEAD(list);
1564 LIST_HEAD(ctx_list);
1565 unsigned int depth;
1566
1567 list_splice_init(&plug->mq_list, &list);
1568
1569 list_sort(NULL, &list, plug_ctx_cmp);
1570
1571 this_q = NULL;
1572 this_ctx = NULL;
1573 depth = 0;
1574
1575 while (!list_empty(&list)) {
1576 rq = list_entry_rq(list.next);
1577 list_del_init(&rq->queuelist);
1578 BUG_ON(!rq->q);
1579 if (rq->mq_ctx != this_ctx) {
1580 if (this_ctx) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001581 trace_block_unplug(this_q, depth, from_schedule);
1582 blk_mq_sched_insert_requests(this_q, this_ctx,
1583 &ctx_list,
1584 from_schedule);
Jens Axboe320ae512013-10-24 09:20:05 +01001585 }
1586
1587 this_ctx = rq->mq_ctx;
1588 this_q = rq->q;
1589 depth = 0;
1590 }
1591
1592 depth++;
1593 list_add_tail(&rq->queuelist, &ctx_list);
1594 }
1595
1596 /*
1597 * If 'this_ctx' is set, we know we have entries to complete
1598 * on 'ctx_list'. Do those.
1599 */
1600 if (this_ctx) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001601 trace_block_unplug(this_q, depth, from_schedule);
1602 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1603 from_schedule);
Jens Axboe320ae512013-10-24 09:20:05 +01001604 }
1605}
1606
1607static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1608{
Bart Van Asscheda8d7f02017-04-19 14:01:24 -07001609 blk_init_request_from_bio(rq, bio);
Jens Axboe4b570522014-05-29 11:00:11 -06001610
Shaohua Li85acb3b2017-10-06 17:56:00 -07001611 blk_rq_set_rl(rq, blk_get_rl(rq->q, bio));
1612
Jens Axboe6e85eaf2016-12-02 20:00:14 -07001613 blk_account_io_start(rq, true);
Jens Axboe320ae512013-10-24 09:20:05 +01001614}
1615
Jens Axboefd2d3322017-01-12 10:04:45 -07001616static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1617{
Jens Axboebd166ef2017-01-17 06:03:22 -07001618 if (rq->tag != -1)
1619 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1620
1621 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
Jens Axboefd2d3322017-01-12 10:04:45 -07001622}
1623
Mike Snitzer0f955492018-01-17 11:25:56 -05001624static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1625 struct request *rq,
1626 blk_qc_t *cookie)
Shaohua Lif984df12015-05-08 10:51:32 -07001627{
Shaohua Lif984df12015-05-08 10:51:32 -07001628 struct request_queue *q = rq->q;
Shaohua Lif984df12015-05-08 10:51:32 -07001629 struct blk_mq_queue_data bd = {
1630 .rq = rq,
Omar Sandovald945a362017-04-05 12:01:36 -07001631 .last = true,
Shaohua Lif984df12015-05-08 10:51:32 -07001632 };
Jens Axboebd166ef2017-01-17 06:03:22 -07001633 blk_qc_t new_cookie;
Jens Axboef06345a2017-06-12 11:22:46 -06001634 blk_status_t ret;
Mike Snitzer0f955492018-01-17 11:25:56 -05001635
1636 new_cookie = request_to_qc_t(hctx, rq);
1637
1638 /*
1639 * For OK queue, we are done. For error, caller may kill it.
1640 * Any other error (busy), just add it to our list as we
1641 * previously would have done.
1642 */
1643 ret = q->mq_ops->queue_rq(hctx, &bd);
1644 switch (ret) {
1645 case BLK_STS_OK:
1646 *cookie = new_cookie;
1647 break;
1648 case BLK_STS_RESOURCE:
Ming Lei86ff7c22018-01-30 22:04:57 -05001649 case BLK_STS_DEV_RESOURCE:
Mike Snitzer0f955492018-01-17 11:25:56 -05001650 __blk_mq_requeue_request(rq);
1651 break;
1652 default:
1653 *cookie = BLK_QC_T_NONE;
1654 break;
1655 }
1656
1657 return ret;
1658}
1659
Mike Snitzer0f955492018-01-17 11:25:56 -05001660static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1661 struct request *rq,
Ming Lei396eaf22018-01-17 11:25:57 -05001662 blk_qc_t *cookie,
1663 bool bypass_insert)
Mike Snitzer0f955492018-01-17 11:25:56 -05001664{
1665 struct request_queue *q = rq->q;
Ming Leid964f042017-06-06 23:22:00 +08001666 bool run_queue = true;
1667
Ming Lei23d4ee12018-01-18 12:06:59 +08001668 /*
1669 * RCU or SRCU read lock is needed before checking quiesced flag.
1670 *
1671 * When queue is stopped or quiesced, ignore 'bypass_insert' from
Bart Van Asschec77ff7f2018-01-19 08:58:54 -08001672 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
Ming Lei23d4ee12018-01-18 12:06:59 +08001673 * and avoid driver to try to dispatch again.
1674 */
Ming Leif4560ff2017-06-18 14:24:27 -06001675 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
Ming Leid964f042017-06-06 23:22:00 +08001676 run_queue = false;
Ming Lei23d4ee12018-01-18 12:06:59 +08001677 bypass_insert = false;
Ming Leid964f042017-06-06 23:22:00 +08001678 goto insert;
1679 }
Shaohua Lif984df12015-05-08 10:51:32 -07001680
Ming Lei396eaf22018-01-17 11:25:57 -05001681 if (q->elevator && !bypass_insert)
Bart Van Assche2253efc2016-10-28 17:20:02 -07001682 goto insert;
1683
Ming Lei0bca7992018-04-05 00:35:21 +08001684 if (!blk_mq_get_dispatch_budget(hctx))
Jens Axboebd166ef2017-01-17 06:03:22 -07001685 goto insert;
1686
Ming Lei0bca7992018-04-05 00:35:21 +08001687 if (!blk_mq_get_driver_tag(rq, NULL, false)) {
1688 blk_mq_put_dispatch_budget(hctx);
Ming Leide148292017-10-14 17:22:29 +08001689 goto insert;
Ming Lei88022d72017-11-05 02:21:12 +08001690 }
Ming Leide148292017-10-14 17:22:29 +08001691
Mike Snitzer0f955492018-01-17 11:25:56 -05001692 return __blk_mq_issue_directly(hctx, rq, cookie);
Bart Van Assche2253efc2016-10-28 17:20:02 -07001693insert:
Ming Lei396eaf22018-01-17 11:25:57 -05001694 if (bypass_insert)
1695 return BLK_STS_RESOURCE;
Mike Snitzer0f955492018-01-17 11:25:56 -05001696
Ming Lei23d4ee12018-01-18 12:06:59 +08001697 blk_mq_sched_insert_request(rq, false, run_queue, false);
Mike Snitzer0f955492018-01-17 11:25:56 -05001698 return BLK_STS_OK;
Shaohua Lif984df12015-05-08 10:51:32 -07001699}
1700
Christoph Hellwig5eb61262017-03-22 15:01:51 -04001701static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1702 struct request *rq, blk_qc_t *cookie)
1703{
Mike Snitzer0f955492018-01-17 11:25:56 -05001704 blk_status_t ret;
Jens Axboe04ced152018-01-09 08:29:46 -08001705 int srcu_idx;
Jens Axboebf4907c2017-03-30 12:30:39 -06001706
Jens Axboe04ced152018-01-09 08:29:46 -08001707 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
Jens Axboebf4907c2017-03-30 12:30:39 -06001708
Jens Axboe04ced152018-01-09 08:29:46 -08001709 hctx_lock(hctx, &srcu_idx);
Mike Snitzer0f955492018-01-17 11:25:56 -05001710
Ming Lei396eaf22018-01-17 11:25:57 -05001711 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
Ming Lei86ff7c22018-01-30 22:04:57 -05001712 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
Ming Lei23d4ee12018-01-18 12:06:59 +08001713 blk_mq_sched_insert_request(rq, false, true, false);
Mike Snitzer0f955492018-01-17 11:25:56 -05001714 else if (ret != BLK_STS_OK)
1715 blk_mq_end_request(rq, ret);
1716
Jens Axboe04ced152018-01-09 08:29:46 -08001717 hctx_unlock(hctx, srcu_idx);
Christoph Hellwig5eb61262017-03-22 15:01:51 -04001718}
1719
Bart Van Asschec77ff7f2018-01-19 08:58:54 -08001720blk_status_t blk_mq_request_issue_directly(struct request *rq)
Ming Lei396eaf22018-01-17 11:25:57 -05001721{
1722 blk_status_t ret;
1723 int srcu_idx;
1724 blk_qc_t unused_cookie;
1725 struct blk_mq_ctx *ctx = rq->mq_ctx;
1726 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
1727
1728 hctx_lock(hctx, &srcu_idx);
1729 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true);
1730 hctx_unlock(hctx, srcu_idx);
1731
1732 return ret;
Jens Axboe07068d52014-05-22 10:40:51 -06001733}
1734
Jens Axboedece1632015-11-05 10:41:16 -07001735static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
Jens Axboe07068d52014-05-22 10:40:51 -06001736{
Christoph Hellwigef295ec2016-10-28 08:48:16 -06001737 const int is_sync = op_is_sync(bio->bi_opf);
Christoph Hellwigf73f44e2017-01-27 08:30:47 -07001738 const int is_flush_fua = op_is_flush(bio->bi_opf);
Jens Axboe5a797e02017-01-26 12:22:11 -07001739 struct blk_mq_alloc_data data = { .flags = 0 };
Jens Axboe07068d52014-05-22 10:40:51 -06001740 struct request *rq;
Christoph Hellwig5eb61262017-03-22 15:01:51 -04001741 unsigned int request_count = 0;
Shaohua Lif984df12015-05-08 10:51:32 -07001742 struct blk_plug *plug;
Shaohua Li5b3f3412015-05-08 10:51:33 -07001743 struct request *same_queue_rq = NULL;
Jens Axboe7b371632015-11-05 10:41:40 -07001744 blk_qc_t cookie;
Jens Axboe87760e52016-11-09 12:38:14 -07001745 unsigned int wb_acct;
Jens Axboe07068d52014-05-22 10:40:51 -06001746
1747 blk_queue_bounce(q, &bio);
1748
NeilBrownaf67c312017-06-18 14:38:57 +10001749 blk_queue_split(q, &bio);
Wen Xiongf36ea502017-05-10 08:54:11 -05001750
Dmitry Monakhove23947b2017-06-29 11:31:11 -07001751 if (!bio_integrity_prep(bio))
Jens Axboedece1632015-11-05 10:41:16 -07001752 return BLK_QC_T_NONE;
Jens Axboe07068d52014-05-22 10:40:51 -06001753
Omar Sandoval87c279e2016-06-01 22:18:48 -07001754 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1755 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1756 return BLK_QC_T_NONE;
Shaohua Lif984df12015-05-08 10:51:32 -07001757
Jens Axboebd166ef2017-01-17 06:03:22 -07001758 if (blk_mq_sched_bio_merge(q, bio))
1759 return BLK_QC_T_NONE;
1760
Jens Axboe87760e52016-11-09 12:38:14 -07001761 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1762
Jens Axboebd166ef2017-01-17 06:03:22 -07001763 trace_block_getrq(q, bio, bio->bi_opf);
1764
Christoph Hellwigd2c0d382017-06-16 18:15:19 +02001765 rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
Jens Axboe87760e52016-11-09 12:38:14 -07001766 if (unlikely(!rq)) {
1767 __wbt_done(q->rq_wb, wb_acct);
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -05001768 if (bio->bi_opf & REQ_NOWAIT)
1769 bio_wouldblock_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -07001770 return BLK_QC_T_NONE;
Jens Axboe87760e52016-11-09 12:38:14 -07001771 }
1772
Omar Sandovala8a45942018-05-09 02:08:48 -07001773 wbt_track(rq, wb_acct);
Jens Axboe07068d52014-05-22 10:40:51 -06001774
Jens Axboefd2d3322017-01-12 10:04:45 -07001775 cookie = request_to_qc_t(data.hctx, rq);
Jens Axboe07068d52014-05-22 10:40:51 -06001776
Shaohua Lif984df12015-05-08 10:51:32 -07001777 plug = current->plug;
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001778 if (unlikely(is_flush_fua)) {
Shaohua Lif984df12015-05-08 10:51:32 -07001779 blk_mq_put_ctx(data.ctx);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001780 blk_mq_bio_to_request(rq, bio);
Ming Lei923218f2017-11-02 23:24:38 +08001781
1782 /* bypass scheduler for flush rq */
1783 blk_insert_flush(rq);
1784 blk_mq_run_hw_queue(data.hctx, true);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001785 } else if (plug && q->nr_hw_queues == 1) {
Shaohua Li600271d2016-11-03 17:03:54 -07001786 struct request *last = NULL;
1787
Jens Axboeb00c53e2017-04-20 16:40:36 -06001788 blk_mq_put_ctx(data.ctx);
Jeff Moyere6c44382015-05-08 10:51:30 -07001789 blk_mq_bio_to_request(rq, bio);
Ming Lei0a6219a2016-11-16 18:07:05 +08001790
1791 /*
1792 * @request_count may become stale because of schedule
1793 * out, so check the list again.
1794 */
1795 if (list_empty(&plug->mq_list))
1796 request_count = 0;
Christoph Hellwig254d2592017-03-22 15:01:50 -04001797 else if (blk_queue_nomerges(q))
1798 request_count = blk_plug_queued_count(q);
1799
Ming Lei676d0602015-10-20 23:13:56 +08001800 if (!request_count)
Jeff Moyere6c44382015-05-08 10:51:30 -07001801 trace_block_plug(q);
Shaohua Li600271d2016-11-03 17:03:54 -07001802 else
1803 last = list_entry_rq(plug->mq_list.prev);
Jens Axboeb094f892015-11-20 20:29:45 -07001804
Shaohua Li600271d2016-11-03 17:03:54 -07001805 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1806 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
Jeff Moyere6c44382015-05-08 10:51:30 -07001807 blk_flush_plug_list(plug, false);
1808 trace_block_plug(q);
Jens Axboe320ae512013-10-24 09:20:05 +01001809 }
Jens Axboeb094f892015-11-20 20:29:45 -07001810
Jeff Moyere6c44382015-05-08 10:51:30 -07001811 list_add_tail(&rq->queuelist, &plug->mq_list);
Christoph Hellwig22997222017-03-22 15:01:52 -04001812 } else if (plug && !blk_queue_nomerges(q)) {
Jens Axboe320ae512013-10-24 09:20:05 +01001813 blk_mq_bio_to_request(rq, bio);
Jens Axboe320ae512013-10-24 09:20:05 +01001814
Jens Axboe320ae512013-10-24 09:20:05 +01001815 /*
1816 * We do limited plugging. If the bio can be merged, do that.
1817 * Otherwise the existing request in the plug list will be
1818 * issued. So the plug list will have one request at most
Christoph Hellwig22997222017-03-22 15:01:52 -04001819 * The plug list might get flushed before this. If that happens,
1820 * the plug list is empty, and same_queue_rq is invalid.
Jens Axboe320ae512013-10-24 09:20:05 +01001821 */
Christoph Hellwig22997222017-03-22 15:01:52 -04001822 if (list_empty(&plug->mq_list))
1823 same_queue_rq = NULL;
1824 if (same_queue_rq)
1825 list_del_init(&same_queue_rq->queuelist);
1826 list_add_tail(&rq->queuelist, &plug->mq_list);
1827
Jens Axboebf4907c2017-03-30 12:30:39 -06001828 blk_mq_put_ctx(data.ctx);
1829
Ming Leidad7a3b2017-06-06 23:21:59 +08001830 if (same_queue_rq) {
1831 data.hctx = blk_mq_map_queue(q,
1832 same_queue_rq->mq_ctx->cpu);
Christoph Hellwig22997222017-03-22 15:01:52 -04001833 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1834 &cookie);
Ming Leidad7a3b2017-06-06 23:21:59 +08001835 }
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001836 } else if (q->nr_hw_queues > 1 && is_sync) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001837 blk_mq_put_ctx(data.ctx);
1838 blk_mq_bio_to_request(rq, bio);
Christoph Hellwig22997222017-03-22 15:01:52 -04001839 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
Ming Leiab42f352017-05-26 19:53:19 +08001840 } else {
Jens Axboeb00c53e2017-04-20 16:40:36 -06001841 blk_mq_put_ctx(data.ctx);
Ming Leiab42f352017-05-26 19:53:19 +08001842 blk_mq_bio_to_request(rq, bio);
huhai8fa9f552018-05-16 08:21:21 -06001843 blk_mq_sched_insert_request(rq, false, true, true);
Ming Leiab42f352017-05-26 19:53:19 +08001844 }
Jens Axboe320ae512013-10-24 09:20:05 +01001845
Jens Axboe7b371632015-11-05 10:41:40 -07001846 return cookie;
Jens Axboe320ae512013-10-24 09:20:05 +01001847}
1848
Jens Axboecc71a6f2017-01-11 14:29:56 -07001849void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1850 unsigned int hctx_idx)
Jens Axboe320ae512013-10-24 09:20:05 +01001851{
1852 struct page *page;
1853
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001854 if (tags->rqs && set->ops->exit_request) {
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001855 int i;
1856
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001857 for (i = 0; i < tags->nr_tags; i++) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001858 struct request *rq = tags->static_rqs[i];
1859
1860 if (!rq)
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001861 continue;
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001862 set->ops->exit_request(set, rq, hctx_idx);
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001863 tags->static_rqs[i] = NULL;
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001864 }
1865 }
1866
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001867 while (!list_empty(&tags->page_list)) {
1868 page = list_first_entry(&tags->page_list, struct page, lru);
Dave Hansen67534712014-01-08 20:17:46 -07001869 list_del_init(&page->lru);
Catalin Marinasf75782e2015-09-14 18:16:02 +01001870 /*
1871 * Remove kmemleak object previously allocated in
1872 * blk_mq_init_rq_map().
1873 */
1874 kmemleak_free(page_address(page));
Jens Axboe320ae512013-10-24 09:20:05 +01001875 __free_pages(page, page->private);
1876 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07001877}
Jens Axboe320ae512013-10-24 09:20:05 +01001878
Jens Axboecc71a6f2017-01-11 14:29:56 -07001879void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1880{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001881 kfree(tags->rqs);
Jens Axboecc71a6f2017-01-11 14:29:56 -07001882 tags->rqs = NULL;
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001883 kfree(tags->static_rqs);
1884 tags->static_rqs = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01001885
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001886 blk_mq_free_tags(tags);
Jens Axboe320ae512013-10-24 09:20:05 +01001887}
1888
Jens Axboecc71a6f2017-01-11 14:29:56 -07001889struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1890 unsigned int hctx_idx,
1891 unsigned int nr_tags,
1892 unsigned int reserved_tags)
Jens Axboe320ae512013-10-24 09:20:05 +01001893{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001894 struct blk_mq_tags *tags;
Shaohua Li59f082e2017-02-01 09:53:14 -08001895 int node;
Jens Axboe320ae512013-10-24 09:20:05 +01001896
Shaohua Li59f082e2017-02-01 09:53:14 -08001897 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1898 if (node == NUMA_NO_NODE)
1899 node = set->numa_node;
1900
1901 tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
Shaohua Li24391c02015-01-23 14:18:00 -07001902 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001903 if (!tags)
1904 return NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01001905
Kees Cook590b5b72018-06-12 14:04:20 -07001906 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02001907 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
Shaohua Li59f082e2017-02-01 09:53:14 -08001908 node);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001909 if (!tags->rqs) {
1910 blk_mq_free_tags(tags);
1911 return NULL;
1912 }
Jens Axboe320ae512013-10-24 09:20:05 +01001913
Kees Cook590b5b72018-06-12 14:04:20 -07001914 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
1915 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1916 node);
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001917 if (!tags->static_rqs) {
1918 kfree(tags->rqs);
1919 blk_mq_free_tags(tags);
1920 return NULL;
1921 }
1922
Jens Axboecc71a6f2017-01-11 14:29:56 -07001923 return tags;
1924}
1925
1926static size_t order_to_size(unsigned int order)
1927{
1928 return (size_t)PAGE_SIZE << order;
1929}
1930
Tejun Heo1d9bd512018-01-09 08:29:48 -08001931static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
1932 unsigned int hctx_idx, int node)
1933{
1934 int ret;
1935
1936 if (set->ops->init_request) {
1937 ret = set->ops->init_request(set, rq, hctx_idx, node);
1938 if (ret)
1939 return ret;
1940 }
1941
Keith Busch12f5b932018-05-29 15:52:28 +02001942 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
Tejun Heo1d9bd512018-01-09 08:29:48 -08001943 return 0;
1944}
1945
Jens Axboecc71a6f2017-01-11 14:29:56 -07001946int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1947 unsigned int hctx_idx, unsigned int depth)
1948{
1949 unsigned int i, j, entries_per_page, max_order = 4;
1950 size_t rq_size, left;
Shaohua Li59f082e2017-02-01 09:53:14 -08001951 int node;
1952
1953 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1954 if (node == NUMA_NO_NODE)
1955 node = set->numa_node;
Jens Axboecc71a6f2017-01-11 14:29:56 -07001956
1957 INIT_LIST_HEAD(&tags->page_list);
1958
Jens Axboe320ae512013-10-24 09:20:05 +01001959 /*
1960 * rq_size is the size of the request plus driver payload, rounded
1961 * to the cacheline size
1962 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001963 rq_size = round_up(sizeof(struct request) + set->cmd_size,
Jens Axboe320ae512013-10-24 09:20:05 +01001964 cache_line_size());
Jens Axboecc71a6f2017-01-11 14:29:56 -07001965 left = rq_size * depth;
Jens Axboe320ae512013-10-24 09:20:05 +01001966
Jens Axboecc71a6f2017-01-11 14:29:56 -07001967 for (i = 0; i < depth; ) {
Jens Axboe320ae512013-10-24 09:20:05 +01001968 int this_order = max_order;
1969 struct page *page;
1970 int to_do;
1971 void *p;
1972
Bartlomiej Zolnierkiewiczb3a834b2016-05-16 09:54:47 -06001973 while (this_order && left < order_to_size(this_order - 1))
Jens Axboe320ae512013-10-24 09:20:05 +01001974 this_order--;
1975
1976 do {
Shaohua Li59f082e2017-02-01 09:53:14 -08001977 page = alloc_pages_node(node,
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02001978 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
Jens Axboea5164402014-09-10 09:02:03 -06001979 this_order);
Jens Axboe320ae512013-10-24 09:20:05 +01001980 if (page)
1981 break;
1982 if (!this_order--)
1983 break;
1984 if (order_to_size(this_order) < rq_size)
1985 break;
1986 } while (1);
1987
1988 if (!page)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001989 goto fail;
Jens Axboe320ae512013-10-24 09:20:05 +01001990
1991 page->private = this_order;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001992 list_add_tail(&page->lru, &tags->page_list);
Jens Axboe320ae512013-10-24 09:20:05 +01001993
1994 p = page_address(page);
Catalin Marinasf75782e2015-09-14 18:16:02 +01001995 /*
1996 * Allow kmemleak to scan these pages as they contain pointers
1997 * to additional allocations like via ops->init_request().
1998 */
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02001999 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
Jens Axboe320ae512013-10-24 09:20:05 +01002000 entries_per_page = order_to_size(this_order) / rq_size;
Jens Axboecc71a6f2017-01-11 14:29:56 -07002001 to_do = min(entries_per_page, depth - i);
Jens Axboe320ae512013-10-24 09:20:05 +01002002 left -= to_do * rq_size;
2003 for (j = 0; j < to_do; j++) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07002004 struct request *rq = p;
2005
2006 tags->static_rqs[i] = rq;
Tejun Heo1d9bd512018-01-09 08:29:48 -08002007 if (blk_mq_init_request(set, rq, hctx_idx, node)) {
2008 tags->static_rqs[i] = NULL;
2009 goto fail;
Christoph Hellwige9b267d2014-04-15 13:59:10 -06002010 }
2011
Jens Axboe320ae512013-10-24 09:20:05 +01002012 p += rq_size;
2013 i++;
2014 }
2015 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07002016 return 0;
Jens Axboe320ae512013-10-24 09:20:05 +01002017
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002018fail:
Jens Axboecc71a6f2017-01-11 14:29:56 -07002019 blk_mq_free_rqs(set, tags, hctx_idx);
2020 return -ENOMEM;
Jens Axboe320ae512013-10-24 09:20:05 +01002021}
2022
Jens Axboee57690f2016-08-24 15:34:35 -06002023/*
2024 * 'cpu' is going away. splice any existing rq_list entries from this
2025 * software queue to the hw queue dispatch list, and ensure that it
2026 * gets run.
2027 */
Thomas Gleixner9467f852016-09-22 08:05:17 -06002028static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
Jens Axboe484b4062014-05-21 14:01:15 -06002029{
Thomas Gleixner9467f852016-09-22 08:05:17 -06002030 struct blk_mq_hw_ctx *hctx;
Jens Axboe484b4062014-05-21 14:01:15 -06002031 struct blk_mq_ctx *ctx;
2032 LIST_HEAD(tmp);
2033
Thomas Gleixner9467f852016-09-22 08:05:17 -06002034 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
Jens Axboee57690f2016-08-24 15:34:35 -06002035 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
Jens Axboe484b4062014-05-21 14:01:15 -06002036
2037 spin_lock(&ctx->lock);
2038 if (!list_empty(&ctx->rq_list)) {
2039 list_splice_init(&ctx->rq_list, &tmp);
2040 blk_mq_hctx_clear_pending(hctx, ctx);
2041 }
2042 spin_unlock(&ctx->lock);
2043
2044 if (list_empty(&tmp))
Thomas Gleixner9467f852016-09-22 08:05:17 -06002045 return 0;
Jens Axboe484b4062014-05-21 14:01:15 -06002046
Jens Axboee57690f2016-08-24 15:34:35 -06002047 spin_lock(&hctx->lock);
2048 list_splice_tail_init(&tmp, &hctx->dispatch);
2049 spin_unlock(&hctx->lock);
Jens Axboe484b4062014-05-21 14:01:15 -06002050
2051 blk_mq_run_hw_queue(hctx, true);
Thomas Gleixner9467f852016-09-22 08:05:17 -06002052 return 0;
Jens Axboe484b4062014-05-21 14:01:15 -06002053}
2054
Thomas Gleixner9467f852016-09-22 08:05:17 -06002055static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
Jens Axboe484b4062014-05-21 14:01:15 -06002056{
Thomas Gleixner9467f852016-09-22 08:05:17 -06002057 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
2058 &hctx->cpuhp_dead);
Jens Axboe484b4062014-05-21 14:01:15 -06002059}
2060
Ming Leic3b4afc2015-06-04 22:25:04 +08002061/* hctx->ctxs will be freed in queue's release handler */
Ming Lei08e98fc2014-09-25 23:23:38 +08002062static void blk_mq_exit_hctx(struct request_queue *q,
2063 struct blk_mq_tag_set *set,
2064 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2065{
Omar Sandoval9c1051a2017-05-04 08:17:21 -06002066 blk_mq_debugfs_unregister_hctx(hctx);
2067
Ming Lei8ab0b7d2018-01-09 21:28:29 +08002068 if (blk_mq_hw_queue_mapped(hctx))
2069 blk_mq_tag_idle(hctx);
Ming Lei08e98fc2014-09-25 23:23:38 +08002070
Ming Leif70ced02014-09-25 23:23:47 +08002071 if (set->ops->exit_request)
Christoph Hellwigd6296d392017-05-01 10:19:08 -06002072 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
Ming Leif70ced02014-09-25 23:23:47 +08002073
Omar Sandoval93252632017-04-05 12:01:31 -07002074 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2075
Ming Lei08e98fc2014-09-25 23:23:38 +08002076 if (set->ops->exit_hctx)
2077 set->ops->exit_hctx(hctx, hctx_idx);
2078
Bart Van Assche6a83e742016-11-02 10:09:51 -06002079 if (hctx->flags & BLK_MQ_F_BLOCKING)
Tejun Heo05707b62018-01-09 08:29:53 -08002080 cleanup_srcu_struct(hctx->srcu);
Bart Van Assche6a83e742016-11-02 10:09:51 -06002081
Thomas Gleixner9467f852016-09-22 08:05:17 -06002082 blk_mq_remove_cpuhp(hctx);
Ming Leif70ced02014-09-25 23:23:47 +08002083 blk_free_flush_queue(hctx->fq);
Omar Sandoval88459642016-09-17 08:38:44 -06002084 sbitmap_free(&hctx->ctx_map);
Ming Lei08e98fc2014-09-25 23:23:38 +08002085}
2086
Ming Lei624dbe42014-05-27 23:35:13 +08002087static void blk_mq_exit_hw_queues(struct request_queue *q,
2088 struct blk_mq_tag_set *set, int nr_queue)
2089{
2090 struct blk_mq_hw_ctx *hctx;
2091 unsigned int i;
2092
2093 queue_for_each_hw_ctx(q, hctx, i) {
2094 if (i == nr_queue)
2095 break;
Ming Lei08e98fc2014-09-25 23:23:38 +08002096 blk_mq_exit_hctx(q, set, hctx, i);
Ming Lei624dbe42014-05-27 23:35:13 +08002097 }
Ming Lei624dbe42014-05-27 23:35:13 +08002098}
2099
Ming Lei08e98fc2014-09-25 23:23:38 +08002100static int blk_mq_init_hctx(struct request_queue *q,
2101 struct blk_mq_tag_set *set,
2102 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2103{
2104 int node;
2105
2106 node = hctx->numa_node;
2107 if (node == NUMA_NO_NODE)
2108 node = hctx->numa_node = set->numa_node;
2109
Jens Axboe9f993732017-04-10 09:54:54 -06002110 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
Ming Lei08e98fc2014-09-25 23:23:38 +08002111 spin_lock_init(&hctx->lock);
2112 INIT_LIST_HEAD(&hctx->dispatch);
2113 hctx->queue = q;
Jeff Moyer2404e602015-11-03 10:40:06 -05002114 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
Ming Lei08e98fc2014-09-25 23:23:38 +08002115
Thomas Gleixner9467f852016-09-22 08:05:17 -06002116 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
Ming Lei08e98fc2014-09-25 23:23:38 +08002117
2118 hctx->tags = set->tags[hctx_idx];
2119
2120 /*
2121 * Allocate space for all possible cpus to avoid allocation at
2122 * runtime
2123 */
Johannes Thumshirnd904bfa2017-11-15 17:32:33 -08002124 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
Ming Lei08e98fc2014-09-25 23:23:38 +08002125 GFP_KERNEL, node);
2126 if (!hctx->ctxs)
2127 goto unregister_cpu_notifier;
2128
Omar Sandoval88459642016-09-17 08:38:44 -06002129 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
2130 node))
Ming Lei08e98fc2014-09-25 23:23:38 +08002131 goto free_ctxs;
2132
2133 hctx->nr_ctx = 0;
2134
Jens Axboeeb619fd2017-11-09 08:32:43 -07002135 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2136 INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2137
Ming Lei08e98fc2014-09-25 23:23:38 +08002138 if (set->ops->init_hctx &&
2139 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2140 goto free_bitmap;
2141
Omar Sandoval93252632017-04-05 12:01:31 -07002142 if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
2143 goto exit_hctx;
2144
Ming Leif70ced02014-09-25 23:23:47 +08002145 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2146 if (!hctx->fq)
Omar Sandoval93252632017-04-05 12:01:31 -07002147 goto sched_exit_hctx;
Ming Leif70ced02014-09-25 23:23:47 +08002148
Tejun Heo1d9bd512018-01-09 08:29:48 -08002149 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
Ming Leif70ced02014-09-25 23:23:47 +08002150 goto free_fq;
2151
Bart Van Assche6a83e742016-11-02 10:09:51 -06002152 if (hctx->flags & BLK_MQ_F_BLOCKING)
Tejun Heo05707b62018-01-09 08:29:53 -08002153 init_srcu_struct(hctx->srcu);
Bart Van Assche6a83e742016-11-02 10:09:51 -06002154
Omar Sandoval9c1051a2017-05-04 08:17:21 -06002155 blk_mq_debugfs_register_hctx(q, hctx);
2156
Ming Lei08e98fc2014-09-25 23:23:38 +08002157 return 0;
2158
Ming Leif70ced02014-09-25 23:23:47 +08002159 free_fq:
2160 kfree(hctx->fq);
Omar Sandoval93252632017-04-05 12:01:31 -07002161 sched_exit_hctx:
2162 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
Ming Leif70ced02014-09-25 23:23:47 +08002163 exit_hctx:
2164 if (set->ops->exit_hctx)
2165 set->ops->exit_hctx(hctx, hctx_idx);
Ming Lei08e98fc2014-09-25 23:23:38 +08002166 free_bitmap:
Omar Sandoval88459642016-09-17 08:38:44 -06002167 sbitmap_free(&hctx->ctx_map);
Ming Lei08e98fc2014-09-25 23:23:38 +08002168 free_ctxs:
2169 kfree(hctx->ctxs);
2170 unregister_cpu_notifier:
Thomas Gleixner9467f852016-09-22 08:05:17 -06002171 blk_mq_remove_cpuhp(hctx);
Ming Lei08e98fc2014-09-25 23:23:38 +08002172 return -1;
2173}
2174
Jens Axboe320ae512013-10-24 09:20:05 +01002175static void blk_mq_init_cpu_queues(struct request_queue *q,
2176 unsigned int nr_hw_queues)
2177{
2178 unsigned int i;
2179
2180 for_each_possible_cpu(i) {
2181 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2182 struct blk_mq_hw_ctx *hctx;
2183
Jens Axboe320ae512013-10-24 09:20:05 +01002184 __ctx->cpu = i;
2185 spin_lock_init(&__ctx->lock);
2186 INIT_LIST_HEAD(&__ctx->rq_list);
2187 __ctx->queue = q;
2188
Jens Axboe320ae512013-10-24 09:20:05 +01002189 /*
2190 * Set local node, IFF we have more than one hw queue. If
2191 * not, we remain on the home node of the device
2192 */
Christoph Hellwig20e4d8132018-01-12 10:53:06 +08002193 hctx = blk_mq_map_queue(q, i);
Jens Axboe320ae512013-10-24 09:20:05 +01002194 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
Raghavendra K Tbffed452015-12-02 16:59:05 +05302195 hctx->numa_node = local_memory_node(cpu_to_node(i));
Jens Axboe320ae512013-10-24 09:20:05 +01002196 }
2197}
2198
Jens Axboecc71a6f2017-01-11 14:29:56 -07002199static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2200{
2201 int ret = 0;
2202
2203 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2204 set->queue_depth, set->reserved_tags);
2205 if (!set->tags[hctx_idx])
2206 return false;
2207
2208 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2209 set->queue_depth);
2210 if (!ret)
2211 return true;
2212
2213 blk_mq_free_rq_map(set->tags[hctx_idx]);
2214 set->tags[hctx_idx] = NULL;
2215 return false;
2216}
2217
2218static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2219 unsigned int hctx_idx)
2220{
Jens Axboebd166ef2017-01-17 06:03:22 -07002221 if (set->tags[hctx_idx]) {
2222 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2223 blk_mq_free_rq_map(set->tags[hctx_idx]);
2224 set->tags[hctx_idx] = NULL;
2225 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07002226}
2227
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002228static void blk_mq_map_swqueue(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +01002229{
Ming Lei4412efe2018-04-25 04:01:44 +08002230 unsigned int i, hctx_idx;
Jens Axboe320ae512013-10-24 09:20:05 +01002231 struct blk_mq_hw_ctx *hctx;
2232 struct blk_mq_ctx *ctx;
Ming Lei2a34c082015-04-21 10:00:20 +08002233 struct blk_mq_tag_set *set = q->tag_set;
Jens Axboe320ae512013-10-24 09:20:05 +01002234
Akinobu Mita60de0742015-09-27 02:09:25 +09002235 /*
2236 * Avoid others reading imcomplete hctx->cpumask through sysfs
2237 */
2238 mutex_lock(&q->sysfs_lock);
2239
Jens Axboe320ae512013-10-24 09:20:05 +01002240 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboee4043dc2014-04-09 10:18:23 -06002241 cpumask_clear(hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01002242 hctx->nr_ctx = 0;
huhaid416c922018-05-18 08:32:30 -06002243 hctx->dispatch_from = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01002244 }
2245
2246 /*
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002247 * Map software to hardware queues.
Ming Lei4412efe2018-04-25 04:01:44 +08002248 *
2249 * If the cpu isn't present, the cpu is mapped to first hctx.
Jens Axboe320ae512013-10-24 09:20:05 +01002250 */
Christoph Hellwig20e4d8132018-01-12 10:53:06 +08002251 for_each_possible_cpu(i) {
Ming Lei4412efe2018-04-25 04:01:44 +08002252 hctx_idx = q->mq_map[i];
2253 /* unmapped hw queue can be remapped after CPU topo changed */
2254 if (!set->tags[hctx_idx] &&
2255 !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2256 /*
2257 * If tags initialization fail for some hctx,
2258 * that hctx won't be brought online. In this
2259 * case, remap the current ctx to hctx[0] which
2260 * is guaranteed to always have tags allocated
2261 */
2262 q->mq_map[i] = 0;
2263 }
2264
Thomas Gleixner897bb0c2016-03-19 11:30:33 +01002265 ctx = per_cpu_ptr(q->queue_ctx, i);
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02002266 hctx = blk_mq_map_queue(q, i);
Keith Busch868f2f02015-12-17 17:08:14 -07002267
Jens Axboee4043dc2014-04-09 10:18:23 -06002268 cpumask_set_cpu(i, hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01002269 ctx->index_hw = hctx->nr_ctx;
2270 hctx->ctxs[hctx->nr_ctx++] = ctx;
2271 }
Jens Axboe506e9312014-05-07 10:26:44 -06002272
Akinobu Mita60de0742015-09-27 02:09:25 +09002273 mutex_unlock(&q->sysfs_lock);
2274
Jens Axboe506e9312014-05-07 10:26:44 -06002275 queue_for_each_hw_ctx(q, hctx, i) {
Ming Lei4412efe2018-04-25 04:01:44 +08002276 /*
2277 * If no software queues are mapped to this hardware queue,
2278 * disable it and free the request entries.
2279 */
2280 if (!hctx->nr_ctx) {
2281 /* Never unmap queue 0. We need it as a
2282 * fallback in case of a new remap fails
2283 * allocation
2284 */
2285 if (i && set->tags[i])
2286 blk_mq_free_map_and_requests(set, i);
2287
2288 hctx->tags = NULL;
2289 continue;
2290 }
Jens Axboe484b4062014-05-21 14:01:15 -06002291
Ming Lei2a34c082015-04-21 10:00:20 +08002292 hctx->tags = set->tags[i];
2293 WARN_ON(!hctx->tags);
2294
Jens Axboe484b4062014-05-21 14:01:15 -06002295 /*
Chong Yuan889fa312015-04-15 11:39:29 -06002296 * Set the map size to the number of mapped software queues.
2297 * This is more accurate and more efficient than looping
2298 * over all possibly mapped software queues.
2299 */
Omar Sandoval88459642016-09-17 08:38:44 -06002300 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
Chong Yuan889fa312015-04-15 11:39:29 -06002301
2302 /*
Jens Axboe484b4062014-05-21 14:01:15 -06002303 * Initialize batch roundrobin counts
2304 */
Ming Leif82ddf12018-04-08 17:48:10 +08002305 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
Jens Axboe506e9312014-05-07 10:26:44 -06002306 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2307 }
Jens Axboe320ae512013-10-24 09:20:05 +01002308}
2309
Jens Axboe8e8320c2017-06-20 17:56:13 -06002310/*
2311 * Caller needs to ensure that we're either frozen/quiesced, or that
2312 * the queue isn't live yet.
2313 */
Jeff Moyer2404e602015-11-03 10:40:06 -05002314static void queue_set_hctx_shared(struct request_queue *q, bool shared)
Jens Axboe0d2602c2014-05-13 15:10:52 -06002315{
2316 struct blk_mq_hw_ctx *hctx;
Jens Axboe0d2602c2014-05-13 15:10:52 -06002317 int i;
2318
Jeff Moyer2404e602015-11-03 10:40:06 -05002319 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe8e8320c2017-06-20 17:56:13 -06002320 if (shared) {
2321 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2322 atomic_inc(&q->shared_hctx_restart);
Jeff Moyer2404e602015-11-03 10:40:06 -05002323 hctx->flags |= BLK_MQ_F_TAG_SHARED;
Jens Axboe8e8320c2017-06-20 17:56:13 -06002324 } else {
2325 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2326 atomic_dec(&q->shared_hctx_restart);
Jeff Moyer2404e602015-11-03 10:40:06 -05002327 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
Jens Axboe8e8320c2017-06-20 17:56:13 -06002328 }
Jeff Moyer2404e602015-11-03 10:40:06 -05002329 }
2330}
2331
Jens Axboe8e8320c2017-06-20 17:56:13 -06002332static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2333 bool shared)
Jeff Moyer2404e602015-11-03 10:40:06 -05002334{
2335 struct request_queue *q;
Jens Axboe0d2602c2014-05-13 15:10:52 -06002336
Bart Van Assche705cda92017-04-07 11:16:49 -07002337 lockdep_assert_held(&set->tag_list_lock);
2338
Jens Axboe0d2602c2014-05-13 15:10:52 -06002339 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2340 blk_mq_freeze_queue(q);
Jeff Moyer2404e602015-11-03 10:40:06 -05002341 queue_set_hctx_shared(q, shared);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002342 blk_mq_unfreeze_queue(q);
2343 }
2344}
2345
2346static void blk_mq_del_queue_tag_set(struct request_queue *q)
2347{
2348 struct blk_mq_tag_set *set = q->tag_set;
2349
Jens Axboe0d2602c2014-05-13 15:10:52 -06002350 mutex_lock(&set->tag_list_lock);
Bart Van Assche705cda92017-04-07 11:16:49 -07002351 list_del_rcu(&q->tag_set_list);
2352 INIT_LIST_HEAD(&q->tag_set_list);
Jeff Moyer2404e602015-11-03 10:40:06 -05002353 if (list_is_singular(&set->tag_list)) {
2354 /* just transitioned to unshared */
2355 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2356 /* update existing queue */
2357 blk_mq_update_tag_set_depth(set, false);
2358 }
Jens Axboe0d2602c2014-05-13 15:10:52 -06002359 mutex_unlock(&set->tag_list_lock);
Bart Van Assche705cda92017-04-07 11:16:49 -07002360
2361 synchronize_rcu();
Jens Axboe0d2602c2014-05-13 15:10:52 -06002362}
2363
2364static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2365 struct request_queue *q)
2366{
2367 q->tag_set = set;
2368
2369 mutex_lock(&set->tag_list_lock);
Jeff Moyer2404e602015-11-03 10:40:06 -05002370
Jens Axboeff821d22017-11-10 22:05:12 -07002371 /*
2372 * Check to see if we're transitioning to shared (from 1 to 2 queues).
2373 */
2374 if (!list_empty(&set->tag_list) &&
2375 !(set->flags & BLK_MQ_F_TAG_SHARED)) {
Jeff Moyer2404e602015-11-03 10:40:06 -05002376 set->flags |= BLK_MQ_F_TAG_SHARED;
2377 /* update existing queue */
2378 blk_mq_update_tag_set_depth(set, true);
2379 }
2380 if (set->flags & BLK_MQ_F_TAG_SHARED)
2381 queue_set_hctx_shared(q, true);
Bart Van Assche705cda92017-04-07 11:16:49 -07002382 list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
Jeff Moyer2404e602015-11-03 10:40:06 -05002383
Jens Axboe0d2602c2014-05-13 15:10:52 -06002384 mutex_unlock(&set->tag_list_lock);
2385}
2386
Ming Leie09aae72015-01-29 20:17:27 +08002387/*
2388 * It is the actual release handler for mq, but we do it from
2389 * request queue's release handler for avoiding use-after-free
2390 * and headache because q->mq_kobj shouldn't have been introduced,
2391 * but we can't group ctx/kctx kobj without it.
2392 */
2393void blk_mq_release(struct request_queue *q)
2394{
2395 struct blk_mq_hw_ctx *hctx;
2396 unsigned int i;
2397
2398 /* hctx kobj stays in hctx */
Ming Leic3b4afc2015-06-04 22:25:04 +08002399 queue_for_each_hw_ctx(q, hctx, i) {
2400 if (!hctx)
2401 continue;
Ming Lei6c8b2322017-02-22 18:14:01 +08002402 kobject_put(&hctx->kobj);
Ming Leic3b4afc2015-06-04 22:25:04 +08002403 }
Ming Leie09aae72015-01-29 20:17:27 +08002404
Akinobu Mitaa723bab2015-09-27 02:09:21 +09002405 q->mq_map = NULL;
2406
Ming Leie09aae72015-01-29 20:17:27 +08002407 kfree(q->queue_hw_ctx);
2408
Ming Lei7ea5fe32017-02-22 18:14:00 +08002409 /*
2410 * release .mq_kobj and sw queue's kobject now because
2411 * both share lifetime with request queue.
2412 */
2413 blk_mq_sysfs_deinit(q);
2414
Ming Leie09aae72015-01-29 20:17:27 +08002415 free_percpu(q->queue_ctx);
2416}
2417
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002418struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
Jens Axboe320ae512013-10-24 09:20:05 +01002419{
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002420 struct request_queue *uninit_q, *q;
2421
Bart Van Assche5ee05242018-02-28 10:15:31 -08002422 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL);
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002423 if (!uninit_q)
2424 return ERR_PTR(-ENOMEM);
2425
2426 q = blk_mq_init_allocated_queue(set, uninit_q);
2427 if (IS_ERR(q))
2428 blk_cleanup_queue(uninit_q);
2429
2430 return q;
2431}
2432EXPORT_SYMBOL(blk_mq_init_queue);
2433
Bart Van Assche07319672017-06-20 11:15:38 -07002434static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2435{
2436 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2437
Tejun Heo05707b62018-01-09 08:29:53 -08002438 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
Bart Van Assche07319672017-06-20 11:15:38 -07002439 __alignof__(struct blk_mq_hw_ctx)) !=
2440 sizeof(struct blk_mq_hw_ctx));
2441
2442 if (tag_set->flags & BLK_MQ_F_BLOCKING)
2443 hw_ctx_size += sizeof(struct srcu_struct);
2444
2445 return hw_ctx_size;
2446}
2447
Keith Busch868f2f02015-12-17 17:08:14 -07002448static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2449 struct request_queue *q)
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002450{
Keith Busch868f2f02015-12-17 17:08:14 -07002451 int i, j;
2452 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +01002453
Keith Busch868f2f02015-12-17 17:08:14 -07002454 blk_mq_sysfs_unregister(q);
Ming Leifb350e02018-01-06 16:27:40 +08002455
2456 /* protect against switching io scheduler */
2457 mutex_lock(&q->sysfs_lock);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002458 for (i = 0; i < set->nr_hw_queues; i++) {
Keith Busch868f2f02015-12-17 17:08:14 -07002459 int node;
Jens Axboef14bbe72014-05-27 12:06:53 -06002460
Keith Busch868f2f02015-12-17 17:08:14 -07002461 if (hctxs[i])
2462 continue;
2463
2464 node = blk_mq_hw_queue_to_node(q->mq_map, i);
Bart Van Assche07319672017-06-20 11:15:38 -07002465 hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
Christoph Hellwigcdef54d2014-05-28 18:11:06 +02002466 GFP_KERNEL, node);
Jens Axboe320ae512013-10-24 09:20:05 +01002467 if (!hctxs[i])
Keith Busch868f2f02015-12-17 17:08:14 -07002468 break;
Jens Axboe320ae512013-10-24 09:20:05 +01002469
Jens Axboea86073e2014-10-13 15:41:54 -06002470 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
Keith Busch868f2f02015-12-17 17:08:14 -07002471 node)) {
2472 kfree(hctxs[i]);
2473 hctxs[i] = NULL;
2474 break;
2475 }
Jens Axboee4043dc2014-04-09 10:18:23 -06002476
Jens Axboe0d2602c2014-05-13 15:10:52 -06002477 atomic_set(&hctxs[i]->nr_active, 0);
Jens Axboef14bbe72014-05-27 12:06:53 -06002478 hctxs[i]->numa_node = node;
Jens Axboe320ae512013-10-24 09:20:05 +01002479 hctxs[i]->queue_num = i;
Keith Busch868f2f02015-12-17 17:08:14 -07002480
2481 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2482 free_cpumask_var(hctxs[i]->cpumask);
2483 kfree(hctxs[i]);
2484 hctxs[i] = NULL;
2485 break;
2486 }
2487 blk_mq_hctx_kobj_init(hctxs[i]);
Jens Axboe320ae512013-10-24 09:20:05 +01002488 }
Keith Busch868f2f02015-12-17 17:08:14 -07002489 for (j = i; j < q->nr_hw_queues; j++) {
2490 struct blk_mq_hw_ctx *hctx = hctxs[j];
2491
2492 if (hctx) {
Jens Axboecc71a6f2017-01-11 14:29:56 -07002493 if (hctx->tags)
2494 blk_mq_free_map_and_requests(set, j);
Keith Busch868f2f02015-12-17 17:08:14 -07002495 blk_mq_exit_hctx(q, set, hctx, j);
Keith Busch868f2f02015-12-17 17:08:14 -07002496 kobject_put(&hctx->kobj);
Keith Busch868f2f02015-12-17 17:08:14 -07002497 hctxs[j] = NULL;
2498
2499 }
2500 }
2501 q->nr_hw_queues = i;
Ming Leifb350e02018-01-06 16:27:40 +08002502 mutex_unlock(&q->sysfs_lock);
Keith Busch868f2f02015-12-17 17:08:14 -07002503 blk_mq_sysfs_register(q);
2504}
2505
2506struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2507 struct request_queue *q)
2508{
Ming Lei66841672016-02-12 15:27:00 +08002509 /* mark the queue as mq asap */
2510 q->mq_ops = set->ops;
2511
Omar Sandoval34dbad52017-03-21 08:56:08 -07002512 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
Stephen Bates720b8cc2017-04-07 06:24:03 -06002513 blk_mq_poll_stats_bkt,
2514 BLK_MQ_POLL_STATS_BKTS, q);
Omar Sandoval34dbad52017-03-21 08:56:08 -07002515 if (!q->poll_cb)
2516 goto err_exit;
2517
Keith Busch868f2f02015-12-17 17:08:14 -07002518 q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2519 if (!q->queue_ctx)
Ming Linc7de5722016-05-25 23:23:27 -07002520 goto err_exit;
Keith Busch868f2f02015-12-17 17:08:14 -07002521
Ming Lei737f98c2017-02-22 18:13:59 +08002522 /* init q->mq_kobj and sw queues' kobjects */
2523 blk_mq_sysfs_init(q);
2524
Kees Cook590b5b72018-06-12 14:04:20 -07002525 q->queue_hw_ctx = kcalloc_node(nr_cpu_ids, sizeof(*(q->queue_hw_ctx)),
Keith Busch868f2f02015-12-17 17:08:14 -07002526 GFP_KERNEL, set->numa_node);
2527 if (!q->queue_hw_ctx)
2528 goto err_percpu;
2529
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002530 q->mq_map = set->mq_map;
Keith Busch868f2f02015-12-17 17:08:14 -07002531
2532 blk_mq_realloc_hw_ctxs(set, q);
2533 if (!q->nr_hw_queues)
2534 goto err_hctxs;
Jens Axboe320ae512013-10-24 09:20:05 +01002535
Christoph Hellwig287922e2015-10-30 20:57:30 +08002536 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
Ming Leie56f6982015-07-16 19:53:22 +08002537 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
Jens Axboe320ae512013-10-24 09:20:05 +01002538
2539 q->nr_queues = nr_cpu_ids;
Jens Axboe320ae512013-10-24 09:20:05 +01002540
Jens Axboe94eddfb2013-11-19 09:25:07 -07002541 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
Jens Axboe320ae512013-10-24 09:20:05 +01002542
Jens Axboe05f1dd52014-05-29 09:53:32 -06002543 if (!(set->flags & BLK_MQ_F_SG_MERGE))
Bart Van Asschef78bac22018-03-07 17:10:03 -08002544 queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
Jens Axboe05f1dd52014-05-29 09:53:32 -06002545
Christoph Hellwig1be036e2014-02-07 10:22:39 -08002546 q->sg_reserved_size = INT_MAX;
2547
Mike Snitzer28494502016-09-14 13:28:30 -04002548 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -06002549 INIT_LIST_HEAD(&q->requeue_list);
2550 spin_lock_init(&q->requeue_lock);
2551
Christoph Hellwig254d2592017-03-22 15:01:50 -04002552 blk_queue_make_request(q, blk_mq_make_request);
Christoph Hellwigea435e12017-11-02 21:29:54 +03002553 if (q->mq_ops->poll)
2554 q->poll_fn = blk_mq_poll;
Jens Axboe07068d52014-05-22 10:40:51 -06002555
Jens Axboeeba71762014-05-20 15:17:27 -06002556 /*
2557 * Do this after blk_queue_make_request() overrides it...
2558 */
2559 q->nr_requests = set->queue_depth;
2560
Jens Axboe64f1c212016-11-14 13:03:03 -07002561 /*
2562 * Default to classic polling
2563 */
2564 q->poll_nsec = -1;
2565
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002566 if (set->ops->complete)
2567 blk_queue_softirq_done(q, set->ops->complete);
Christoph Hellwig30a91cb2014-02-10 03:24:38 -08002568
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002569 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002570 blk_mq_add_queue_tag_set(set, q);
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002571 blk_mq_map_swqueue(q);
Akinobu Mita4593fdb2015-09-27 02:09:20 +09002572
Jens Axboed3484992017-01-13 14:43:58 -07002573 if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2574 int ret;
2575
Christoph Hellwig131d08e2018-05-31 19:11:40 +02002576 ret = elevator_init_mq(q);
Jens Axboed3484992017-01-13 14:43:58 -07002577 if (ret)
2578 return ERR_PTR(ret);
2579 }
2580
Jens Axboe320ae512013-10-24 09:20:05 +01002581 return q;
Christoph Hellwig18741982014-02-10 09:29:00 -07002582
Jens Axboe320ae512013-10-24 09:20:05 +01002583err_hctxs:
Keith Busch868f2f02015-12-17 17:08:14 -07002584 kfree(q->queue_hw_ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01002585err_percpu:
Keith Busch868f2f02015-12-17 17:08:14 -07002586 free_percpu(q->queue_ctx);
Ming Linc7de5722016-05-25 23:23:27 -07002587err_exit:
2588 q->mq_ops = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01002589 return ERR_PTR(-ENOMEM);
2590}
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002591EXPORT_SYMBOL(blk_mq_init_allocated_queue);
Jens Axboe320ae512013-10-24 09:20:05 +01002592
2593void blk_mq_free_queue(struct request_queue *q)
2594{
Ming Lei624dbe42014-05-27 23:35:13 +08002595 struct blk_mq_tag_set *set = q->tag_set;
Jens Axboe320ae512013-10-24 09:20:05 +01002596
Jens Axboe0d2602c2014-05-13 15:10:52 -06002597 blk_mq_del_queue_tag_set(q);
Ming Lei624dbe42014-05-27 23:35:13 +08002598 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01002599}
Jens Axboe320ae512013-10-24 09:20:05 +01002600
2601/* Basically redo blk_mq_init_queue with queue frozen */
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002602static void blk_mq_queue_reinit(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +01002603{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +02002604 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
Jens Axboe320ae512013-10-24 09:20:05 +01002605
Omar Sandoval9c1051a2017-05-04 08:17:21 -06002606 blk_mq_debugfs_unregister_hctxs(q);
Jens Axboe67aec142014-05-30 08:25:36 -06002607 blk_mq_sysfs_unregister(q);
2608
Jens Axboe320ae512013-10-24 09:20:05 +01002609 /*
2610 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
Jens Axboeff821d22017-11-10 22:05:12 -07002611 * we should change hctx numa_node according to the new topology (this
2612 * involves freeing and re-allocating memory, worth doing?)
Jens Axboe320ae512013-10-24 09:20:05 +01002613 */
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002614 blk_mq_map_swqueue(q);
Jens Axboe320ae512013-10-24 09:20:05 +01002615
Jens Axboe67aec142014-05-30 08:25:36 -06002616 blk_mq_sysfs_register(q);
Omar Sandoval9c1051a2017-05-04 08:17:21 -06002617 blk_mq_debugfs_register_hctxs(q);
Jens Axboe320ae512013-10-24 09:20:05 +01002618}
2619
Jens Axboea5164402014-09-10 09:02:03 -06002620static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2621{
2622 int i;
2623
Jens Axboecc71a6f2017-01-11 14:29:56 -07002624 for (i = 0; i < set->nr_hw_queues; i++)
2625 if (!__blk_mq_alloc_rq_map(set, i))
Jens Axboea5164402014-09-10 09:02:03 -06002626 goto out_unwind;
Jens Axboea5164402014-09-10 09:02:03 -06002627
2628 return 0;
2629
2630out_unwind:
2631 while (--i >= 0)
Jens Axboecc71a6f2017-01-11 14:29:56 -07002632 blk_mq_free_rq_map(set->tags[i]);
Jens Axboea5164402014-09-10 09:02:03 -06002633
Jens Axboea5164402014-09-10 09:02:03 -06002634 return -ENOMEM;
2635}
2636
2637/*
2638 * Allocate the request maps associated with this tag_set. Note that this
2639 * may reduce the depth asked for, if memory is tight. set->queue_depth
2640 * will be updated to reflect the allocated depth.
2641 */
2642static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2643{
2644 unsigned int depth;
2645 int err;
2646
2647 depth = set->queue_depth;
2648 do {
2649 err = __blk_mq_alloc_rq_maps(set);
2650 if (!err)
2651 break;
2652
2653 set->queue_depth >>= 1;
2654 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2655 err = -ENOMEM;
2656 break;
2657 }
2658 } while (set->queue_depth);
2659
2660 if (!set->queue_depth || err) {
2661 pr_err("blk-mq: failed to allocate request map\n");
2662 return -ENOMEM;
2663 }
2664
2665 if (depth != set->queue_depth)
2666 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2667 depth, set->queue_depth);
2668
2669 return 0;
2670}
2671
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002672static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2673{
Ming Lei7d4901a2018-01-06 16:27:39 +08002674 if (set->ops->map_queues) {
2675 int cpu;
2676 /*
2677 * transport .map_queues is usually done in the following
2678 * way:
2679 *
2680 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
2681 * mask = get_cpu_mask(queue)
2682 * for_each_cpu(cpu, mask)
2683 * set->mq_map[cpu] = queue;
2684 * }
2685 *
2686 * When we need to remap, the table has to be cleared for
2687 * killing stale mapping since one CPU may not be mapped
2688 * to any hw queue.
2689 */
2690 for_each_possible_cpu(cpu)
2691 set->mq_map[cpu] = 0;
2692
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002693 return set->ops->map_queues(set);
Ming Lei7d4901a2018-01-06 16:27:39 +08002694 } else
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002695 return blk_mq_map_queues(set);
2696}
2697
Jens Axboea4391c62014-06-05 15:21:56 -06002698/*
2699 * Alloc a tag set to be associated with one or more request queues.
2700 * May fail with EINVAL for various error conditions. May adjust the
2701 * requested depth down, if if it too large. In that case, the set
2702 * value will be stored in set->queue_depth.
2703 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002704int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2705{
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002706 int ret;
2707
Bart Van Assche205fb5f2014-10-30 14:45:11 +01002708 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2709
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002710 if (!set->nr_hw_queues)
2711 return -EINVAL;
Jens Axboea4391c62014-06-05 15:21:56 -06002712 if (!set->queue_depth)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002713 return -EINVAL;
2714 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2715 return -EINVAL;
2716
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02002717 if (!set->ops->queue_rq)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002718 return -EINVAL;
2719
Ming Leide148292017-10-14 17:22:29 +08002720 if (!set->ops->get_budget ^ !set->ops->put_budget)
2721 return -EINVAL;
2722
Jens Axboea4391c62014-06-05 15:21:56 -06002723 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2724 pr_info("blk-mq: reduced tag depth to %u\n",
2725 BLK_MQ_MAX_DEPTH);
2726 set->queue_depth = BLK_MQ_MAX_DEPTH;
2727 }
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002728
Shaohua Li6637fad2014-11-30 16:00:58 -08002729 /*
2730 * If a crashdump is active, then we are potentially in a very
2731 * memory constrained environment. Limit us to 1 queue and
2732 * 64 tags to prevent using too much memory.
2733 */
2734 if (is_kdump_kernel()) {
2735 set->nr_hw_queues = 1;
2736 set->queue_depth = min(64U, set->queue_depth);
2737 }
Keith Busch868f2f02015-12-17 17:08:14 -07002738 /*
2739 * There is no use for more h/w queues than cpus.
2740 */
2741 if (set->nr_hw_queues > nr_cpu_ids)
2742 set->nr_hw_queues = nr_cpu_ids;
Shaohua Li6637fad2014-11-30 16:00:58 -08002743
Kees Cook590b5b72018-06-12 14:04:20 -07002744 set->tags = kcalloc_node(nr_cpu_ids, sizeof(struct blk_mq_tags *),
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002745 GFP_KERNEL, set->numa_node);
2746 if (!set->tags)
Jens Axboea5164402014-09-10 09:02:03 -06002747 return -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002748
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002749 ret = -ENOMEM;
Kees Cook590b5b72018-06-12 14:04:20 -07002750 set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map),
2751 GFP_KERNEL, set->numa_node);
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002752 if (!set->mq_map)
2753 goto out_free_tags;
2754
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002755 ret = blk_mq_update_queue_map(set);
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002756 if (ret)
2757 goto out_free_mq_map;
2758
2759 ret = blk_mq_alloc_rq_maps(set);
2760 if (ret)
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002761 goto out_free_mq_map;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002762
Jens Axboe0d2602c2014-05-13 15:10:52 -06002763 mutex_init(&set->tag_list_lock);
2764 INIT_LIST_HEAD(&set->tag_list);
2765
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002766 return 0;
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002767
2768out_free_mq_map:
2769 kfree(set->mq_map);
2770 set->mq_map = NULL;
2771out_free_tags:
Robert Elliott5676e7b2014-09-02 11:38:44 -05002772 kfree(set->tags);
2773 set->tags = NULL;
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002774 return ret;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002775}
2776EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2777
2778void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2779{
2780 int i;
2781
Jens Axboecc71a6f2017-01-11 14:29:56 -07002782 for (i = 0; i < nr_cpu_ids; i++)
2783 blk_mq_free_map_and_requests(set, i);
Jens Axboe484b4062014-05-21 14:01:15 -06002784
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002785 kfree(set->mq_map);
2786 set->mq_map = NULL;
2787
Ming Lei981bd182014-04-24 00:07:34 +08002788 kfree(set->tags);
Robert Elliott5676e7b2014-09-02 11:38:44 -05002789 set->tags = NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002790}
2791EXPORT_SYMBOL(blk_mq_free_tag_set);
2792
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002793int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2794{
2795 struct blk_mq_tag_set *set = q->tag_set;
2796 struct blk_mq_hw_ctx *hctx;
2797 int i, ret;
2798
Jens Axboebd166ef2017-01-17 06:03:22 -07002799 if (!set)
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002800 return -EINVAL;
2801
Jens Axboe70f36b62017-01-19 10:59:07 -07002802 blk_mq_freeze_queue(q);
Ming Lei24f5a902018-01-06 16:27:38 +08002803 blk_mq_quiesce_queue(q);
Jens Axboe70f36b62017-01-19 10:59:07 -07002804
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002805 ret = 0;
2806 queue_for_each_hw_ctx(q, hctx, i) {
Keith Busche9137d42016-02-18 14:56:35 -07002807 if (!hctx->tags)
2808 continue;
Jens Axboebd166ef2017-01-17 06:03:22 -07002809 /*
2810 * If we're using an MQ scheduler, just update the scheduler
2811 * queue depth. This is similar to what the old code would do.
2812 */
Jens Axboe70f36b62017-01-19 10:59:07 -07002813 if (!hctx->sched_tags) {
weiping zhangc2e82a22017-09-22 23:36:28 +08002814 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
Jens Axboe70f36b62017-01-19 10:59:07 -07002815 false);
2816 } else {
2817 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2818 nr, true);
2819 }
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002820 if (ret)
2821 break;
2822 }
2823
2824 if (!ret)
2825 q->nr_requests = nr;
2826
Ming Lei24f5a902018-01-06 16:27:38 +08002827 blk_mq_unquiesce_queue(q);
Jens Axboe70f36b62017-01-19 10:59:07 -07002828 blk_mq_unfreeze_queue(q);
Jens Axboe70f36b62017-01-19 10:59:07 -07002829
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002830 return ret;
2831}
2832
Keith Busche4dc2b32017-05-30 14:39:11 -04002833static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2834 int nr_hw_queues)
Keith Busch868f2f02015-12-17 17:08:14 -07002835{
2836 struct request_queue *q;
2837
Bart Van Assche705cda92017-04-07 11:16:49 -07002838 lockdep_assert_held(&set->tag_list_lock);
2839
Keith Busch868f2f02015-12-17 17:08:14 -07002840 if (nr_hw_queues > nr_cpu_ids)
2841 nr_hw_queues = nr_cpu_ids;
2842 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2843 return;
2844
2845 list_for_each_entry(q, &set->tag_list, tag_set_list)
2846 blk_mq_freeze_queue(q);
2847
2848 set->nr_hw_queues = nr_hw_queues;
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002849 blk_mq_update_queue_map(set);
Keith Busch868f2f02015-12-17 17:08:14 -07002850 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2851 blk_mq_realloc_hw_ctxs(set, q);
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002852 blk_mq_queue_reinit(q);
Keith Busch868f2f02015-12-17 17:08:14 -07002853 }
2854
2855 list_for_each_entry(q, &set->tag_list, tag_set_list)
2856 blk_mq_unfreeze_queue(q);
2857}
Keith Busche4dc2b32017-05-30 14:39:11 -04002858
2859void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2860{
2861 mutex_lock(&set->tag_list_lock);
2862 __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
2863 mutex_unlock(&set->tag_list_lock);
2864}
Keith Busch868f2f02015-12-17 17:08:14 -07002865EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2866
Omar Sandoval34dbad52017-03-21 08:56:08 -07002867/* Enable polling stats and return whether they were already enabled. */
2868static bool blk_poll_stats_enable(struct request_queue *q)
2869{
2870 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
Bart Van Assche7dfdbc72018-03-07 17:10:05 -08002871 blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
Omar Sandoval34dbad52017-03-21 08:56:08 -07002872 return true;
2873 blk_stat_add_callback(q, q->poll_cb);
2874 return false;
2875}
2876
2877static void blk_mq_poll_stats_start(struct request_queue *q)
2878{
2879 /*
2880 * We don't arm the callback if polling stats are not enabled or the
2881 * callback is already active.
2882 */
2883 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2884 blk_stat_is_active(q->poll_cb))
2885 return;
2886
2887 blk_stat_activate_msecs(q->poll_cb, 100);
2888}
2889
2890static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
2891{
2892 struct request_queue *q = cb->data;
Stephen Bates720b8cc2017-04-07 06:24:03 -06002893 int bucket;
Omar Sandoval34dbad52017-03-21 08:56:08 -07002894
Stephen Bates720b8cc2017-04-07 06:24:03 -06002895 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
2896 if (cb->stat[bucket].nr_samples)
2897 q->poll_stat[bucket] = cb->stat[bucket];
2898 }
Omar Sandoval34dbad52017-03-21 08:56:08 -07002899}
2900
Jens Axboe64f1c212016-11-14 13:03:03 -07002901static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2902 struct blk_mq_hw_ctx *hctx,
2903 struct request *rq)
2904{
Jens Axboe64f1c212016-11-14 13:03:03 -07002905 unsigned long ret = 0;
Stephen Bates720b8cc2017-04-07 06:24:03 -06002906 int bucket;
Jens Axboe64f1c212016-11-14 13:03:03 -07002907
2908 /*
2909 * If stats collection isn't on, don't sleep but turn it on for
2910 * future users
2911 */
Omar Sandoval34dbad52017-03-21 08:56:08 -07002912 if (!blk_poll_stats_enable(q))
Jens Axboe64f1c212016-11-14 13:03:03 -07002913 return 0;
2914
2915 /*
Jens Axboe64f1c212016-11-14 13:03:03 -07002916 * As an optimistic guess, use half of the mean service time
2917 * for this type of request. We can (and should) make this smarter.
2918 * For instance, if the completion latencies are tight, we can
2919 * get closer than just half the mean. This is especially
2920 * important on devices where the completion latencies are longer
Stephen Bates720b8cc2017-04-07 06:24:03 -06002921 * than ~10 usec. We do use the stats for the relevant IO size
2922 * if available which does lead to better estimates.
Jens Axboe64f1c212016-11-14 13:03:03 -07002923 */
Stephen Bates720b8cc2017-04-07 06:24:03 -06002924 bucket = blk_mq_poll_stats_bkt(rq);
2925 if (bucket < 0)
2926 return ret;
2927
2928 if (q->poll_stat[bucket].nr_samples)
2929 ret = (q->poll_stat[bucket].mean + 1) / 2;
Jens Axboe64f1c212016-11-14 13:03:03 -07002930
2931 return ret;
2932}
2933
Jens Axboe06426ad2016-11-14 13:01:59 -07002934static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
Jens Axboe64f1c212016-11-14 13:03:03 -07002935 struct blk_mq_hw_ctx *hctx,
Jens Axboe06426ad2016-11-14 13:01:59 -07002936 struct request *rq)
2937{
2938 struct hrtimer_sleeper hs;
2939 enum hrtimer_mode mode;
Jens Axboe64f1c212016-11-14 13:03:03 -07002940 unsigned int nsecs;
Jens Axboe06426ad2016-11-14 13:01:59 -07002941 ktime_t kt;
2942
Jens Axboe76a86f92018-01-10 11:30:56 -07002943 if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
Jens Axboe64f1c212016-11-14 13:03:03 -07002944 return false;
2945
2946 /*
2947 * poll_nsec can be:
2948 *
2949 * -1: don't ever hybrid sleep
2950 * 0: use half of prev avg
2951 * >0: use this specific value
2952 */
2953 if (q->poll_nsec == -1)
2954 return false;
2955 else if (q->poll_nsec > 0)
2956 nsecs = q->poll_nsec;
2957 else
2958 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2959
2960 if (!nsecs)
Jens Axboe06426ad2016-11-14 13:01:59 -07002961 return false;
2962
Jens Axboe76a86f92018-01-10 11:30:56 -07002963 rq->rq_flags |= RQF_MQ_POLL_SLEPT;
Jens Axboe06426ad2016-11-14 13:01:59 -07002964
2965 /*
2966 * This will be replaced with the stats tracking code, using
2967 * 'avg_completion_time / 2' as the pre-sleep target.
2968 */
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01002969 kt = nsecs;
Jens Axboe06426ad2016-11-14 13:01:59 -07002970
2971 mode = HRTIMER_MODE_REL;
2972 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2973 hrtimer_set_expires(&hs.timer, kt);
2974
2975 hrtimer_init_sleeper(&hs, current);
2976 do {
Tejun Heo5a61c362018-01-09 08:29:52 -08002977 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
Jens Axboe06426ad2016-11-14 13:01:59 -07002978 break;
2979 set_current_state(TASK_UNINTERRUPTIBLE);
2980 hrtimer_start_expires(&hs.timer, mode);
2981 if (hs.task)
2982 io_schedule();
2983 hrtimer_cancel(&hs.timer);
2984 mode = HRTIMER_MODE_ABS;
2985 } while (hs.task && !signal_pending(current));
2986
2987 __set_current_state(TASK_RUNNING);
2988 destroy_hrtimer_on_stack(&hs.timer);
2989 return true;
2990}
2991
Jens Axboebbd7bb72016-11-04 09:34:34 -06002992static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2993{
2994 struct request_queue *q = hctx->queue;
2995 long state;
2996
Jens Axboe06426ad2016-11-14 13:01:59 -07002997 /*
2998 * If we sleep, have the caller restart the poll loop to reset
2999 * the state. Like for the other success return cases, the
3000 * caller is responsible for checking if the IO completed. If
3001 * the IO isn't complete, we'll get called again and will go
3002 * straight to the busy poll loop.
3003 */
Jens Axboe64f1c212016-11-14 13:03:03 -07003004 if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
Jens Axboe06426ad2016-11-14 13:01:59 -07003005 return true;
3006
Jens Axboebbd7bb72016-11-04 09:34:34 -06003007 hctx->poll_considered++;
3008
3009 state = current->state;
3010 while (!need_resched()) {
3011 int ret;
3012
3013 hctx->poll_invoked++;
3014
3015 ret = q->mq_ops->poll(hctx, rq->tag);
3016 if (ret > 0) {
3017 hctx->poll_success++;
3018 set_current_state(TASK_RUNNING);
3019 return true;
3020 }
3021
3022 if (signal_pending_state(state, current))
3023 set_current_state(TASK_RUNNING);
3024
3025 if (current->state == TASK_RUNNING)
3026 return true;
3027 if (ret < 0)
3028 break;
3029 cpu_relax();
3030 }
3031
Nitesh Shetty67b41102018-02-13 21:18:12 +05303032 __set_current_state(TASK_RUNNING);
Jens Axboebbd7bb72016-11-04 09:34:34 -06003033 return false;
3034}
3035
Christoph Hellwigea435e12017-11-02 21:29:54 +03003036static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
Jens Axboebbd7bb72016-11-04 09:34:34 -06003037{
3038 struct blk_mq_hw_ctx *hctx;
Jens Axboebbd7bb72016-11-04 09:34:34 -06003039 struct request *rq;
3040
Christoph Hellwigea435e12017-11-02 21:29:54 +03003041 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
Jens Axboebbd7bb72016-11-04 09:34:34 -06003042 return false;
3043
Jens Axboebbd7bb72016-11-04 09:34:34 -06003044 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
Jens Axboebd166ef2017-01-17 06:03:22 -07003045 if (!blk_qc_t_is_internal(cookie))
3046 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
Jens Axboe3a07bb12017-04-20 14:53:28 -06003047 else {
Jens Axboebd166ef2017-01-17 06:03:22 -07003048 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
Jens Axboe3a07bb12017-04-20 14:53:28 -06003049 /*
3050 * With scheduling, if the request has completed, we'll
3051 * get a NULL return here, as we clear the sched tag when
3052 * that happens. The request still remains valid, like always,
3053 * so we should be safe with just the NULL check.
3054 */
3055 if (!rq)
3056 return false;
3057 }
Jens Axboebbd7bb72016-11-04 09:34:34 -06003058
3059 return __blk_mq_poll(hctx, rq);
3060}
Jens Axboebbd7bb72016-11-04 09:34:34 -06003061
Jens Axboe320ae512013-10-24 09:20:05 +01003062static int __init blk_mq_init(void)
3063{
Thomas Gleixner9467f852016-09-22 08:05:17 -06003064 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
3065 blk_mq_hctx_notify_dead);
Jens Axboe320ae512013-10-24 09:20:05 +01003066 return 0;
3067}
3068subsys_initcall(blk_mq_init);