blob: f7428e11b109fc092eceb97b649b35a29f7550f3 [file] [log] [blame]
Christoph Hellwig3dcf60b2019-04-30 14:42:43 -04001// SPDX-License-Identifier: GPL-2.0
Jens Axboe75bb4622014-05-28 10:15:41 -06002/*
3 * Block multiqueue core code
4 *
5 * Copyright (C) 2013-2014 Jens Axboe
6 * Copyright (C) 2013-2014 Christoph Hellwig
7 */
Jens Axboe320ae512013-10-24 09:20:05 +01008#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/backing-dev.h>
11#include <linux/bio.h>
12#include <linux/blkdev.h>
Christoph Hellwigfe45e632021-09-20 14:33:27 +020013#include <linux/blk-integrity.h>
Catalin Marinasf75782e2015-09-14 18:16:02 +010014#include <linux/kmemleak.h>
Jens Axboe320ae512013-10-24 09:20:05 +010015#include <linux/mm.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/workqueue.h>
19#include <linux/smp.h>
Christoph Hellwige41d12f2021-09-20 14:33:13 +020020#include <linux/interrupt.h>
Jens Axboe320ae512013-10-24 09:20:05 +010021#include <linux/llist.h>
22#include <linux/list_sort.h>
23#include <linux/cpu.h>
24#include <linux/cache.h>
25#include <linux/sched/sysctl.h>
Ingo Molnar105ab3d2017-02-01 16:36:40 +010026#include <linux/sched/topology.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010027#include <linux/sched/signal.h>
Jens Axboe320ae512013-10-24 09:20:05 +010028#include <linux/delay.h>
Jens Axboeaedcd722014-09-17 08:27:03 -060029#include <linux/crash_dump.h>
Jens Axboe88c7b2b2016-08-25 08:07:30 -060030#include <linux/prefetch.h>
Satya Tangiralaa892c8d2020-05-14 00:37:18 +000031#include <linux/blk-crypto.h>
Jens Axboe320ae512013-10-24 09:20:05 +010032
33#include <trace/events/block.h>
34
35#include <linux/blk-mq.h>
Max Gurtovoy54d4e6a2019-09-16 18:44:29 +030036#include <linux/t10-pi.h>
Jens Axboe320ae512013-10-24 09:20:05 +010037#include "blk.h"
38#include "blk-mq.h"
Omar Sandoval9c1051a2017-05-04 08:17:21 -060039#include "blk-mq-debugfs.h"
Jens Axboe320ae512013-10-24 09:20:05 +010040#include "blk-mq-tag.h"
Bart Van Assche986d4132018-09-26 14:01:10 -070041#include "blk-pm.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -070042#include "blk-stat.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070043#include "blk-mq-sched.h"
Josef Bacikc1c80382018-07-03 11:14:59 -040044#include "blk-rq-qos.h"
Jens Axboe320ae512013-10-24 09:20:05 +010045
Sebastian Andrzej Siewiorf9ab4912021-01-23 21:10:27 +010046static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
Christoph Hellwigc3077b52020-06-11 08:44:41 +020047
Omar Sandoval34dbad52017-03-21 08:56:08 -070048static void blk_mq_poll_stats_start(struct request_queue *q);
49static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
50
Stephen Bates720b8cc2017-04-07 06:24:03 -060051static int blk_mq_poll_stats_bkt(const struct request *rq)
52{
Hou Tao3d244302019-05-21 15:59:03 +080053 int ddir, sectors, bucket;
Stephen Bates720b8cc2017-04-07 06:24:03 -060054
Jens Axboe99c749a2017-04-21 07:55:42 -060055 ddir = rq_data_dir(rq);
Hou Tao3d244302019-05-21 15:59:03 +080056 sectors = blk_rq_stats_sectors(rq);
Stephen Bates720b8cc2017-04-07 06:24:03 -060057
Hou Tao3d244302019-05-21 15:59:03 +080058 bucket = ddir + 2 * ilog2(sectors);
Stephen Bates720b8cc2017-04-07 06:24:03 -060059
60 if (bucket < 0)
61 return -1;
62 else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
63 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
64
65 return bucket;
66}
67
Jens Axboe320ae512013-10-24 09:20:05 +010068/*
Yufen Yu85fae292019-03-24 17:57:08 +080069 * Check if any of the ctx, dispatch list or elevator
70 * have pending work in this hardware queue.
Jens Axboe320ae512013-10-24 09:20:05 +010071 */
Jens Axboe79f720a2017-11-10 09:13:21 -070072static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
Jens Axboe320ae512013-10-24 09:20:05 +010073{
Jens Axboe79f720a2017-11-10 09:13:21 -070074 return !list_empty_careful(&hctx->dispatch) ||
75 sbitmap_any_bit_set(&hctx->ctx_map) ||
Jens Axboebd166ef2017-01-17 06:03:22 -070076 blk_mq_sched_has_work(hctx);
Jens Axboe320ae512013-10-24 09:20:05 +010077}
78
79/*
80 * Mark this ctx as having pending work in this hardware queue
81 */
82static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
83 struct blk_mq_ctx *ctx)
84{
Jens Axboef31967f2018-10-29 13:13:29 -060085 const int bit = ctx->index_hw[hctx->type];
86
87 if (!sbitmap_test_bit(&hctx->ctx_map, bit))
88 sbitmap_set_bit(&hctx->ctx_map, bit);
Jens Axboe1429d7c2014-05-19 09:23:55 -060089}
90
91static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
92 struct blk_mq_ctx *ctx)
93{
Jens Axboef31967f2018-10-29 13:13:29 -060094 const int bit = ctx->index_hw[hctx->type];
95
96 sbitmap_clear_bit(&hctx->ctx_map, bit);
Jens Axboe320ae512013-10-24 09:20:05 +010097}
98
Jens Axboef299b7c2017-08-08 17:51:45 -060099struct mq_inflight {
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100100 struct block_device *part;
Pavel Begunkova2e80f62019-09-30 21:55:34 +0300101 unsigned int inflight[2];
Jens Axboef299b7c2017-08-08 17:51:45 -0600102};
103
Jens Axboe7baa8572018-11-08 10:24:07 -0700104static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
Jens Axboef299b7c2017-08-08 17:51:45 -0600105 struct request *rq, void *priv,
106 bool reserved)
107{
108 struct mq_inflight *mi = priv;
109
Jeffle Xub0d97552020-12-02 19:11:45 +0800110 if ((!mi->part->bd_partno || rq->part == mi->part) &&
111 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
Pavel Begunkovbb4e6b12019-09-30 21:55:33 +0300112 mi->inflight[rq_data_dir(rq)]++;
Jens Axboe7baa8572018-11-08 10:24:07 -0700113
114 return true;
Jens Axboef299b7c2017-08-08 17:51:45 -0600115}
116
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100117unsigned int blk_mq_in_flight(struct request_queue *q,
118 struct block_device *part)
Jens Axboef299b7c2017-08-08 17:51:45 -0600119{
Pavel Begunkova2e80f62019-09-30 21:55:34 +0300120 struct mq_inflight mi = { .part = part };
Jens Axboef299b7c2017-08-08 17:51:45 -0600121
Jens Axboef299b7c2017-08-08 17:51:45 -0600122 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
Mikulas Patockae016b782018-12-06 11:41:21 -0500123
Pavel Begunkova2e80f62019-09-30 21:55:34 +0300124 return mi.inflight[0] + mi.inflight[1];
Omar Sandovalbf0ddab2018-04-26 00:21:59 -0700125}
126
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100127void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
128 unsigned int inflight[2])
Omar Sandovalbf0ddab2018-04-26 00:21:59 -0700129{
Pavel Begunkova2e80f62019-09-30 21:55:34 +0300130 struct mq_inflight mi = { .part = part };
Omar Sandovalbf0ddab2018-04-26 00:21:59 -0700131
Pavel Begunkovbb4e6b12019-09-30 21:55:33 +0300132 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
Pavel Begunkova2e80f62019-09-30 21:55:34 +0300133 inflight[0] = mi.inflight[0];
134 inflight[1] = mi.inflight[1];
Omar Sandovalbf0ddab2018-04-26 00:21:59 -0700135}
136
Ming Lei1671d522017-03-27 20:06:57 +0800137void blk_freeze_queue_start(struct request_queue *q)
Ming Lei43a5e4e2013-12-26 21:31:35 +0800138{
Bob Liu7996a8b2019-05-21 11:25:55 +0800139 mutex_lock(&q->mq_freeze_lock);
140 if (++q->mq_freeze_depth == 1) {
Dan Williams3ef28e82015-10-21 13:20:12 -0400141 percpu_ref_kill(&q->q_usage_counter);
Bob Liu7996a8b2019-05-21 11:25:55 +0800142 mutex_unlock(&q->mq_freeze_lock);
Jens Axboe344e9ff2018-11-15 12:22:51 -0700143 if (queue_is_mq(q))
Ming Lei055f6e12017-11-09 10:49:53 -0800144 blk_mq_run_hw_queues(q, false);
Bob Liu7996a8b2019-05-21 11:25:55 +0800145 } else {
146 mutex_unlock(&q->mq_freeze_lock);
Tejun Heocddd5d12014-08-16 08:02:24 -0400147 }
Tejun Heof3af0202014-11-04 13:52:27 -0500148}
Ming Lei1671d522017-03-27 20:06:57 +0800149EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
Tejun Heof3af0202014-11-04 13:52:27 -0500150
Keith Busch6bae363e2017-03-01 14:22:10 -0500151void blk_mq_freeze_queue_wait(struct request_queue *q)
Tejun Heof3af0202014-11-04 13:52:27 -0500152{
Dan Williams3ef28e82015-10-21 13:20:12 -0400153 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
Ming Lei43a5e4e2013-12-26 21:31:35 +0800154}
Keith Busch6bae363e2017-03-01 14:22:10 -0500155EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
Ming Lei43a5e4e2013-12-26 21:31:35 +0800156
Keith Buschf91328c2017-03-01 14:22:11 -0500157int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
158 unsigned long timeout)
159{
160 return wait_event_timeout(q->mq_freeze_wq,
161 percpu_ref_is_zero(&q->q_usage_counter),
162 timeout);
163}
164EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
Jens Axboe320ae512013-10-24 09:20:05 +0100165
Tejun Heof3af0202014-11-04 13:52:27 -0500166/*
167 * Guarantee no request is in use, so we can change any data structure of
168 * the queue afterward.
169 */
Dan Williams3ef28e82015-10-21 13:20:12 -0400170void blk_freeze_queue(struct request_queue *q)
Tejun Heof3af0202014-11-04 13:52:27 -0500171{
Dan Williams3ef28e82015-10-21 13:20:12 -0400172 /*
173 * In the !blk_mq case we are only calling this to kill the
174 * q_usage_counter, otherwise this increases the freeze depth
175 * and waits for it to return to zero. For this reason there is
176 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
177 * exported to drivers as the only user for unfreeze is blk_mq.
178 */
Ming Lei1671d522017-03-27 20:06:57 +0800179 blk_freeze_queue_start(q);
Tejun Heof3af0202014-11-04 13:52:27 -0500180 blk_mq_freeze_queue_wait(q);
181}
Dan Williams3ef28e82015-10-21 13:20:12 -0400182
183void blk_mq_freeze_queue(struct request_queue *q)
184{
185 /*
186 * ...just an alias to keep freeze and unfreeze actions balanced
187 * in the blk_mq_* namespace
188 */
189 blk_freeze_queue(q);
190}
Jens Axboec761d962015-01-02 15:05:12 -0700191EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
Tejun Heof3af0202014-11-04 13:52:27 -0500192
Christoph Hellwigaec89dc2021-09-29 09:12:41 +0200193void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
Jens Axboe320ae512013-10-24 09:20:05 +0100194{
Bob Liu7996a8b2019-05-21 11:25:55 +0800195 mutex_lock(&q->mq_freeze_lock);
Christoph Hellwigaec89dc2021-09-29 09:12:41 +0200196 if (force_atomic)
197 q->q_usage_counter.data->force_atomic = true;
Bob Liu7996a8b2019-05-21 11:25:55 +0800198 q->mq_freeze_depth--;
199 WARN_ON_ONCE(q->mq_freeze_depth < 0);
200 if (!q->mq_freeze_depth) {
Bart Van Asschebdd63162018-09-26 14:01:08 -0700201 percpu_ref_resurrect(&q->q_usage_counter);
Jens Axboe320ae512013-10-24 09:20:05 +0100202 wake_up_all(&q->mq_freeze_wq);
Tejun Heoadd703f2014-07-01 10:34:38 -0600203 }
Bob Liu7996a8b2019-05-21 11:25:55 +0800204 mutex_unlock(&q->mq_freeze_lock);
Jens Axboe320ae512013-10-24 09:20:05 +0100205}
Christoph Hellwigaec89dc2021-09-29 09:12:41 +0200206
207void blk_mq_unfreeze_queue(struct request_queue *q)
208{
209 __blk_mq_unfreeze_queue(q, false);
210}
Keith Buschb4c6a022014-12-19 17:54:14 -0700211EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
Jens Axboe320ae512013-10-24 09:20:05 +0100212
Bart Van Assche852ec802017-06-21 10:55:47 -0700213/*
214 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
215 * mpt3sas driver such that this function can be removed.
216 */
217void blk_mq_quiesce_queue_nowait(struct request_queue *q)
218{
Bart Van Assche8814ce82018-03-07 17:10:04 -0800219 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
Bart Van Assche852ec802017-06-21 10:55:47 -0700220}
221EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
222
Bart Van Assche6a83e742016-11-02 10:09:51 -0600223/**
Ming Lei69e07c42017-06-06 23:22:07 +0800224 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
Bart Van Assche6a83e742016-11-02 10:09:51 -0600225 * @q: request queue.
226 *
227 * Note: this function does not prevent that the struct request end_io()
Ming Lei69e07c42017-06-06 23:22:07 +0800228 * callback function is invoked. Once this function is returned, we make
229 * sure no dispatch can happen until the queue is unquiesced via
230 * blk_mq_unquiesce_queue().
Bart Van Assche6a83e742016-11-02 10:09:51 -0600231 */
232void blk_mq_quiesce_queue(struct request_queue *q)
233{
234 struct blk_mq_hw_ctx *hctx;
235 unsigned int i;
236 bool rcu = false;
237
Ming Lei1d9e9bc2017-06-06 23:22:08 +0800238 blk_mq_quiesce_queue_nowait(q);
Ming Leif4560ff2017-06-18 14:24:27 -0600239
Bart Van Assche6a83e742016-11-02 10:09:51 -0600240 queue_for_each_hw_ctx(q, hctx, i) {
241 if (hctx->flags & BLK_MQ_F_BLOCKING)
Tejun Heo05707b62018-01-09 08:29:53 -0800242 synchronize_srcu(hctx->srcu);
Bart Van Assche6a83e742016-11-02 10:09:51 -0600243 else
244 rcu = true;
245 }
246 if (rcu)
247 synchronize_rcu();
248}
249EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
250
Ming Leie4e73912017-06-06 23:22:03 +0800251/*
252 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
253 * @q: request queue.
254 *
255 * This function recovers queue into the state before quiescing
256 * which is done by blk_mq_quiesce_queue.
257 */
258void blk_mq_unquiesce_queue(struct request_queue *q)
259{
Bart Van Assche8814ce82018-03-07 17:10:04 -0800260 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
Ming Leif4560ff2017-06-18 14:24:27 -0600261
Ming Lei1d9e9bc2017-06-06 23:22:08 +0800262 /* dispatch requests which are inserted during quiescing */
263 blk_mq_run_hw_queues(q, true);
Ming Leie4e73912017-06-06 23:22:03 +0800264}
265EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
266
Jens Axboeaed3ea92014-12-22 14:04:42 -0700267void blk_mq_wake_waiters(struct request_queue *q)
268{
269 struct blk_mq_hw_ctx *hctx;
270 unsigned int i;
271
272 queue_for_each_hw_ctx(q, hctx, i)
273 if (blk_mq_hw_queue_mapped(hctx))
274 blk_mq_tag_wakeup_all(hctx->tags, true);
275}
276
Jens Axboefe1f4522018-11-28 10:50:07 -0700277/*
Hou Tao9a91b052019-05-21 15:59:04 +0800278 * Only need start/end time stamping if we have iostat or
279 * blk stats enabled, or using an IO scheduler.
Jens Axboefe1f4522018-11-28 10:50:07 -0700280 */
281static inline bool blk_mq_need_time_stamp(struct request *rq)
282{
Hou Tao9a91b052019-05-21 15:59:04 +0800283 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator;
Jens Axboefe1f4522018-11-28 10:50:07 -0700284}
285
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200286static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
Christoph Hellwig7ea4d8a2020-05-29 15:53:10 +0200287 unsigned int tag, u64 alloc_time_ns)
Jens Axboe320ae512013-10-24 09:20:05 +0100288{
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200289 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
290 struct request *rq = tags->static_rqs[tag];
Bart Van Asschec3a148d2017-06-20 11:15:43 -0700291
Christoph Hellwig42fdc5e2020-06-29 17:08:34 +0200292 if (data->q->elevator) {
Christoph Hellwig766473682020-05-29 15:53:12 +0200293 rq->tag = BLK_MQ_NO_TAG;
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200294 rq->internal_tag = tag;
295 } else {
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200296 rq->tag = tag;
Christoph Hellwig766473682020-05-29 15:53:12 +0200297 rq->internal_tag = BLK_MQ_NO_TAG;
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200298 }
299
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200300 /* csd/requeue_work/fifo_time is initialized before use */
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200301 rq->q = data->q;
302 rq->mq_ctx = data->ctx;
Jens Axboeea4f9952018-10-29 15:06:13 -0600303 rq->mq_hctx = data->hctx;
Ming Lei568f2702020-07-06 22:41:11 +0800304 rq->rq_flags = 0;
Christoph Hellwig7ea4d8a2020-05-29 15:53:10 +0200305 rq->cmd_flags = data->cmd_flags;
Bart Van Assche0854bcd2020-12-08 21:29:45 -0800306 if (data->flags & BLK_MQ_REQ_PM)
307 rq->rq_flags |= RQF_PM;
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200308 if (blk_queue_io_stat(data->q))
Christoph Hellwige8064022016-10-20 15:12:13 +0200309 rq->rq_flags |= RQF_IO_STAT;
Jens Axboe7c3fb702018-01-10 11:46:39 -0700310 INIT_LIST_HEAD(&rq->queuelist);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200311 INIT_HLIST_NODE(&rq->hash);
312 RB_CLEAR_NODE(&rq->rb_node);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200313 rq->rq_disk = NULL;
314 rq->part = NULL;
Tejun Heo6f816b42019-08-28 15:05:57 -0700315#ifdef CONFIG_BLK_RQ_ALLOC_TIME
316 rq->alloc_time_ns = alloc_time_ns;
317#endif
Jens Axboefe1f4522018-11-28 10:50:07 -0700318 if (blk_mq_need_time_stamp(rq))
319 rq->start_time_ns = ktime_get_ns();
320 else
321 rq->start_time_ns = 0;
Omar Sandoval544ccc8d2018-05-09 02:08:50 -0700322 rq->io_start_time_ns = 0;
Hou Tao3d244302019-05-21 15:59:03 +0800323 rq->stats_sectors = 0;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200324 rq->nr_phys_segments = 0;
325#if defined(CONFIG_BLK_DEV_INTEGRITY)
326 rq->nr_integrity_segments = 0;
327#endif
Satya Tangiralaa892c8d2020-05-14 00:37:18 +0000328 blk_crypto_rq_set_defaults(rq);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200329 /* tag was already set */
Christoph Hellwig079076b2018-11-14 17:02:05 +0100330 WRITE_ONCE(rq->deadline, 0);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200331
Jens Axboef6be4fb2014-06-06 11:03:48 -0600332 rq->timeout = 0;
333
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200334 rq->end_io = NULL;
335 rq->end_io_data = NULL;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200336
Christoph Hellwig7ea4d8a2020-05-29 15:53:10 +0200337 data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++;
Keith Busch12f5b932018-05-29 15:52:28 +0200338 refcount_set(&rq->ref, 1);
Christoph Hellwig7ea4d8a2020-05-29 15:53:10 +0200339
340 if (!op_is_flush(data->cmd_flags)) {
341 struct elevator_queue *e = data->q->elevator;
342
343 rq->elv.icq = NULL;
344 if (e && e->type->ops.prepare_request) {
345 if (e->type->icq_cache)
346 blk_mq_sched_assign_ioc(rq);
347
348 e->type->ops.prepare_request(rq);
349 rq->rq_flags |= RQF_ELVPRIV;
350 }
351 }
352
353 data->hctx->queued++;
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200354 return rq;
Jens Axboe320ae512013-10-24 09:20:05 +0100355}
356
Christoph Hellwige6e7abf2020-05-29 15:53:09 +0200357static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200358{
Christoph Hellwige6e7abf2020-05-29 15:53:09 +0200359 struct request_queue *q = data->q;
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200360 struct elevator_queue *e = q->elevator;
Tejun Heo6f816b42019-08-28 15:05:57 -0700361 u64 alloc_time_ns = 0;
Christoph Hellwig600c3b02020-05-29 15:53:13 +0200362 unsigned int tag;
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200363
Tejun Heo6f816b42019-08-28 15:05:57 -0700364 /* alloc_time includes depth and tag waits */
365 if (blk_queue_rq_alloc_time(q))
366 alloc_time_ns = ktime_get_ns();
367
Jens Axboef9afca42018-10-29 13:11:38 -0600368 if (data->cmd_flags & REQ_NOWAIT)
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -0500369 data->flags |= BLK_MQ_REQ_NOWAIT;
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200370
371 if (e) {
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200372 /*
Lin Feng8d663f32021-04-15 11:39:20 +0800373 * Flush/passthrough requests are special and go directly to the
Jens Axboe17a51192018-05-09 13:28:50 -0600374 * dispatch list. Don't include reserved tags in the
375 * limiting, as it isn't useful.
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200376 */
Jens Axboef9afca42018-10-29 13:11:38 -0600377 if (!op_is_flush(data->cmd_flags) &&
Lin Feng8d663f32021-04-15 11:39:20 +0800378 !blk_op_is_passthrough(data->cmd_flags) &&
Jens Axboef9afca42018-10-29 13:11:38 -0600379 e->type->ops.limit_depth &&
Jens Axboe17a51192018-05-09 13:28:50 -0600380 !(data->flags & BLK_MQ_REQ_RESERVED))
Jens Axboef9afca42018-10-29 13:11:38 -0600381 e->type->ops.limit_depth(data->cmd_flags, data);
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200382 }
383
Ming Leibf0beec2020-05-29 15:53:15 +0200384retry:
Christoph Hellwig600c3b02020-05-29 15:53:13 +0200385 data->ctx = blk_mq_get_ctx(q);
386 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
Christoph Hellwig42fdc5e2020-06-29 17:08:34 +0200387 if (!e)
Christoph Hellwig600c3b02020-05-29 15:53:13 +0200388 blk_mq_tag_busy(data->hctx);
389
Ming Leibf0beec2020-05-29 15:53:15 +0200390 /*
391 * Waiting allocations only fail because of an inactive hctx. In that
392 * case just retry the hctx assignment and tag allocation as CPU hotplug
393 * should have migrated us to an online CPU by now.
394 */
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200395 tag = blk_mq_get_tag(data);
Ming Leibf0beec2020-05-29 15:53:15 +0200396 if (tag == BLK_MQ_NO_TAG) {
397 if (data->flags & BLK_MQ_REQ_NOWAIT)
398 return NULL;
399
400 /*
401 * Give up the CPU and sleep for a random short time to ensure
402 * that thread using a realtime scheduling class are migrated
Randy Dunlap70f15a42020-07-30 18:42:31 -0700403 * off the CPU, and thus off the hctx that is going away.
Ming Leibf0beec2020-05-29 15:53:15 +0200404 */
405 msleep(3);
406 goto retry;
407 }
Christoph Hellwig7ea4d8a2020-05-29 15:53:10 +0200408 return blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200409}
410
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700411struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800412 blk_mq_req_flags_t flags)
Jens Axboe320ae512013-10-24 09:20:05 +0100413{
Christoph Hellwige6e7abf2020-05-29 15:53:09 +0200414 struct blk_mq_alloc_data data = {
415 .q = q,
416 .flags = flags,
417 .cmd_flags = op,
418 };
Jens Axboebd166ef2017-01-17 06:03:22 -0700419 struct request *rq;
Joe Lawrencea492f072014-08-28 08:15:21 -0600420 int ret;
Jens Axboe320ae512013-10-24 09:20:05 +0100421
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800422 ret = blk_queue_enter(q, flags);
Joe Lawrencea492f072014-08-28 08:15:21 -0600423 if (ret)
424 return ERR_PTR(ret);
Jens Axboe320ae512013-10-24 09:20:05 +0100425
Christoph Hellwige6e7abf2020-05-29 15:53:09 +0200426 rq = __blk_mq_alloc_request(&data);
Jens Axboebd166ef2017-01-17 06:03:22 -0700427 if (!rq)
Christoph Hellwiga5ea581102020-05-16 20:27:58 +0200428 goto out_queue_exit;
Christoph Hellwig0c4de0f2016-07-19 11:31:50 +0200429 rq->__data_len = 0;
430 rq->__sector = (sector_t) -1;
431 rq->bio = rq->biotail = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +0100432 return rq;
Christoph Hellwiga5ea581102020-05-16 20:27:58 +0200433out_queue_exit:
434 blk_queue_exit(q);
435 return ERR_PTR(-EWOULDBLOCK);
Jens Axboe320ae512013-10-24 09:20:05 +0100436}
Jens Axboe4bb659b2014-05-09 09:36:49 -0600437EXPORT_SYMBOL(blk_mq_alloc_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100438
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700439struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800440 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
Ming Lin1f5bd332016-06-13 16:45:21 +0200441{
Christoph Hellwige6e7abf2020-05-29 15:53:09 +0200442 struct blk_mq_alloc_data data = {
443 .q = q,
444 .flags = flags,
445 .cmd_flags = op,
446 };
Christoph Hellwig600c3b02020-05-29 15:53:13 +0200447 u64 alloc_time_ns = 0;
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800448 unsigned int cpu;
Christoph Hellwig600c3b02020-05-29 15:53:13 +0200449 unsigned int tag;
Ming Lin1f5bd332016-06-13 16:45:21 +0200450 int ret;
451
Christoph Hellwig600c3b02020-05-29 15:53:13 +0200452 /* alloc_time includes depth and tag waits */
453 if (blk_queue_rq_alloc_time(q))
454 alloc_time_ns = ktime_get_ns();
455
Ming Lin1f5bd332016-06-13 16:45:21 +0200456 /*
457 * If the tag allocator sleeps we could get an allocation for a
458 * different hardware context. No need to complicate the low level
459 * allocator for this for the rare use case of a command tied to
460 * a specific queue.
461 */
Christoph Hellwig600c3b02020-05-29 15:53:13 +0200462 if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
Ming Lin1f5bd332016-06-13 16:45:21 +0200463 return ERR_PTR(-EINVAL);
464
465 if (hctx_idx >= q->nr_hw_queues)
466 return ERR_PTR(-EIO);
467
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800468 ret = blk_queue_enter(q, flags);
Ming Lin1f5bd332016-06-13 16:45:21 +0200469 if (ret)
470 return ERR_PTR(ret);
471
Christoph Hellwigc8712c62016-09-23 10:25:48 -0600472 /*
473 * Check if the hardware context is actually mapped to anything.
474 * If not tell the caller that it should skip this queue.
475 */
Christoph Hellwiga5ea581102020-05-16 20:27:58 +0200476 ret = -EXDEV;
Christoph Hellwige6e7abf2020-05-29 15:53:09 +0200477 data.hctx = q->queue_hw_ctx[hctx_idx];
478 if (!blk_mq_hw_queue_mapped(data.hctx))
Christoph Hellwiga5ea581102020-05-16 20:27:58 +0200479 goto out_queue_exit;
Christoph Hellwige6e7abf2020-05-29 15:53:09 +0200480 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
481 data.ctx = __blk_mq_get_ctx(q, cpu);
Ming Lin1f5bd332016-06-13 16:45:21 +0200482
Christoph Hellwig42fdc5e2020-06-29 17:08:34 +0200483 if (!q->elevator)
Christoph Hellwig600c3b02020-05-29 15:53:13 +0200484 blk_mq_tag_busy(data.hctx);
485
Christoph Hellwiga5ea581102020-05-16 20:27:58 +0200486 ret = -EWOULDBLOCK;
Christoph Hellwig600c3b02020-05-29 15:53:13 +0200487 tag = blk_mq_get_tag(&data);
488 if (tag == BLK_MQ_NO_TAG)
Christoph Hellwiga5ea581102020-05-16 20:27:58 +0200489 goto out_queue_exit;
Christoph Hellwig600c3b02020-05-29 15:53:13 +0200490 return blk_mq_rq_ctx_init(&data, tag, alloc_time_ns);
491
Christoph Hellwiga5ea581102020-05-16 20:27:58 +0200492out_queue_exit:
493 blk_queue_exit(q);
494 return ERR_PTR(ret);
Ming Lin1f5bd332016-06-13 16:45:21 +0200495}
496EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
497
Keith Busch12f5b932018-05-29 15:52:28 +0200498static void __blk_mq_free_request(struct request *rq)
499{
500 struct request_queue *q = rq->q;
501 struct blk_mq_ctx *ctx = rq->mq_ctx;
Jens Axboeea4f9952018-10-29 15:06:13 -0600502 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
Keith Busch12f5b932018-05-29 15:52:28 +0200503 const int sched_tag = rq->internal_tag;
504
Satya Tangiralaa892c8d2020-05-14 00:37:18 +0000505 blk_crypto_free_request(rq);
Bart Van Assche986d4132018-09-26 14:01:10 -0700506 blk_pm_mark_last_busy(rq);
Jens Axboeea4f9952018-10-29 15:06:13 -0600507 rq->mq_hctx = NULL;
Christoph Hellwig766473682020-05-29 15:53:12 +0200508 if (rq->tag != BLK_MQ_NO_TAG)
John Garrycae740a2020-02-26 20:10:15 +0800509 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
Christoph Hellwig766473682020-05-29 15:53:12 +0200510 if (sched_tag != BLK_MQ_NO_TAG)
John Garrycae740a2020-02-26 20:10:15 +0800511 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
Keith Busch12f5b932018-05-29 15:52:28 +0200512 blk_mq_sched_restart(hctx);
513 blk_queue_exit(q);
514}
515
Christoph Hellwig6af54052017-06-16 18:15:22 +0200516void blk_mq_free_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100517{
Jens Axboe320ae512013-10-24 09:20:05 +0100518 struct request_queue *q = rq->q;
Christoph Hellwig6af54052017-06-16 18:15:22 +0200519 struct elevator_queue *e = q->elevator;
520 struct blk_mq_ctx *ctx = rq->mq_ctx;
Jens Axboeea4f9952018-10-29 15:06:13 -0600521 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
Jens Axboe320ae512013-10-24 09:20:05 +0100522
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200523 if (rq->rq_flags & RQF_ELVPRIV) {
Jens Axboef9cd4bf2018-11-01 16:41:41 -0600524 if (e && e->type->ops.finish_request)
525 e->type->ops.finish_request(rq);
Christoph Hellwig6af54052017-06-16 18:15:22 +0200526 if (rq->elv.icq) {
527 put_io_context(rq->elv.icq->ioc);
528 rq->elv.icq = NULL;
529 }
530 }
531
532 ctx->rq_completed[rq_is_sync(rq)]++;
Christoph Hellwige8064022016-10-20 15:12:13 +0200533 if (rq->rq_flags & RQF_MQ_INFLIGHT)
John Garrybccf5e22020-08-19 23:20:26 +0800534 __blk_mq_dec_active_requests(hctx);
Jens Axboe87760e52016-11-09 12:38:14 -0700535
Jens Axboe7beb2f82017-09-30 02:08:24 -0600536 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
Christoph Hellwigd152c682021-08-16 15:46:24 +0200537 laptop_io_completion(q->disk->bdi);
Jens Axboe7beb2f82017-09-30 02:08:24 -0600538
Josef Bacika7905042018-07-03 09:32:35 -0600539 rq_qos_done(q, rq);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600540
Keith Busch12f5b932018-05-29 15:52:28 +0200541 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
542 if (refcount_dec_and_test(&rq->ref))
543 __blk_mq_free_request(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100544}
Jens Axboe1a3b5952014-11-17 10:40:48 -0700545EXPORT_SYMBOL_GPL(blk_mq_free_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100546
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200547inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
Jens Axboe320ae512013-10-24 09:20:05 +0100548{
Jens Axboefe1f4522018-11-28 10:50:07 -0700549 u64 now = 0;
550
551 if (blk_mq_need_time_stamp(rq))
552 now = ktime_get_ns();
Omar Sandoval522a7772018-05-09 02:08:53 -0700553
Omar Sandoval4bc63392018-05-09 02:08:52 -0700554 if (rq->rq_flags & RQF_STATS) {
555 blk_mq_poll_stats_start(rq->q);
Omar Sandoval522a7772018-05-09 02:08:53 -0700556 blk_stat_add(rq, now);
Omar Sandoval4bc63392018-05-09 02:08:52 -0700557 }
558
Baolin Wang87890092020-07-04 15:28:21 +0800559 blk_mq_sched_completed_request(rq, now);
Omar Sandovaled886602018-09-27 15:55:51 -0700560
Omar Sandoval522a7772018-05-09 02:08:53 -0700561 blk_account_io_done(rq, now);
Ming Lei0d11e6a2013-12-05 10:50:39 -0700562
Christoph Hellwig91b63632014-04-16 09:44:53 +0200563 if (rq->end_io) {
Josef Bacika7905042018-07-03 09:32:35 -0600564 rq_qos_done(rq->q, rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100565 rq->end_io(rq, error);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200566 } else {
Jens Axboe320ae512013-10-24 09:20:05 +0100567 blk_mq_free_request(rq);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200568 }
Jens Axboe320ae512013-10-24 09:20:05 +0100569}
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700570EXPORT_SYMBOL(__blk_mq_end_request);
Christoph Hellwig63151a42014-04-16 09:44:52 +0200571
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200572void blk_mq_end_request(struct request *rq, blk_status_t error)
Christoph Hellwig63151a42014-04-16 09:44:52 +0200573{
574 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
575 BUG();
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700576 __blk_mq_end_request(rq, error);
Christoph Hellwig63151a42014-04-16 09:44:52 +0200577}
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700578EXPORT_SYMBOL(blk_mq_end_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100579
Sebastian Andrzej Siewiorf9ab4912021-01-23 21:10:27 +0100580static void blk_complete_reqs(struct llist_head *list)
Christoph Hellwigc3077b52020-06-11 08:44:41 +0200581{
Sebastian Andrzej Siewiorf9ab4912021-01-23 21:10:27 +0100582 struct llist_node *entry = llist_reverse_order(llist_del_all(list));
583 struct request *rq, *next;
Christoph Hellwigc3077b52020-06-11 08:44:41 +0200584
Sebastian Andrzej Siewiorf9ab4912021-01-23 21:10:27 +0100585 llist_for_each_entry_safe(rq, next, entry, ipi_list)
Christoph Hellwigc3077b52020-06-11 08:44:41 +0200586 rq->q->mq_ops->complete(rq);
Christoph Hellwigc3077b52020-06-11 08:44:41 +0200587}
588
Sebastian Andrzej Siewiorf9ab4912021-01-23 21:10:27 +0100589static __latent_entropy void blk_done_softirq(struct softirq_action *h)
Christoph Hellwig115243f2020-06-11 08:44:42 +0200590{
Sebastian Andrzej Siewiorf9ab4912021-01-23 21:10:27 +0100591 blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
Christoph Hellwigc3077b52020-06-11 08:44:41 +0200592}
593
Christoph Hellwigc3077b52020-06-11 08:44:41 +0200594static int blk_softirq_cpu_dead(unsigned int cpu)
595{
Sebastian Andrzej Siewiorf9ab4912021-01-23 21:10:27 +0100596 blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
Christoph Hellwigc3077b52020-06-11 08:44:41 +0200597 return 0;
598}
599
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800600static void __blk_mq_complete_request_remote(void *data)
Jens Axboe320ae512013-10-24 09:20:05 +0100601{
Sebastian Andrzej Siewiorf9ab4912021-01-23 21:10:27 +0100602 __raise_softirq_irqoff(BLOCK_SOFTIRQ);
Jens Axboe320ae512013-10-24 09:20:05 +0100603}
604
Christoph Hellwig963395262020-06-11 08:44:49 +0200605static inline bool blk_mq_complete_need_ipi(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100606{
Christoph Hellwig963395262020-06-11 08:44:49 +0200607 int cpu = raw_smp_processor_id();
Jens Axboe320ae512013-10-24 09:20:05 +0100608
Christoph Hellwig963395262020-06-11 08:44:49 +0200609 if (!IS_ENABLED(CONFIG_SMP) ||
610 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
611 return false;
Sebastian Andrzej Siewior71425182020-12-04 20:13:54 +0100612 /*
613 * With force threaded interrupts enabled, raising softirq from an SMP
614 * function call will always result in waking the ksoftirqd thread.
615 * This is probably worse than completing the request on a different
616 * cache domain.
617 */
Tanner Love91cc4702021-06-02 14:03:38 -0400618 if (force_irqthreads())
Sebastian Andrzej Siewior71425182020-12-04 20:13:54 +0100619 return false;
Christoph Hellwig963395262020-06-11 08:44:49 +0200620
621 /* same CPU or cache domain? Complete locally */
622 if (cpu == rq->mq_ctx->cpu ||
623 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
624 cpus_share_cache(cpu, rq->mq_ctx->cpu)))
625 return false;
626
627 /* don't try to IPI to an offline CPU */
628 return cpu_online(rq->mq_ctx->cpu);
629}
630
Sebastian Andrzej Siewiorf9ab4912021-01-23 21:10:27 +0100631static void blk_mq_complete_send_ipi(struct request *rq)
632{
633 struct llist_head *list;
634 unsigned int cpu;
635
636 cpu = rq->mq_ctx->cpu;
637 list = &per_cpu(blk_cpu_done, cpu);
638 if (llist_add(&rq->ipi_list, list)) {
639 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
640 smp_call_function_single_async(cpu, &rq->csd);
641 }
642}
643
644static void blk_mq_raise_softirq(struct request *rq)
645{
646 struct llist_head *list;
647
648 preempt_disable();
649 list = this_cpu_ptr(&blk_cpu_done);
650 if (llist_add(&rq->ipi_list, list))
651 raise_softirq(BLOCK_SOFTIRQ);
652 preempt_enable();
653}
654
Christoph Hellwig40d09b52020-06-11 08:44:50 +0200655bool blk_mq_complete_request_remote(struct request *rq)
656{
Keith Buschaf78ff72018-11-26 09:54:30 -0700657 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
Ming Lei36e76532018-09-28 16:42:20 +0800658
Jens Axboe4ab32bf2018-11-18 16:15:35 -0700659 /*
660 * For a polled request, always complete locallly, it's pointless
661 * to redirect the completion.
662 */
Christoph Hellwig40d09b52020-06-11 08:44:50 +0200663 if (rq->cmd_flags & REQ_HIPRI)
664 return false;
Jens Axboe320ae512013-10-24 09:20:05 +0100665
Christoph Hellwig40d09b52020-06-11 08:44:50 +0200666 if (blk_mq_complete_need_ipi(rq)) {
Sebastian Andrzej Siewiorf9ab4912021-01-23 21:10:27 +0100667 blk_mq_complete_send_ipi(rq);
668 return true;
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800669 }
Christoph Hellwig40d09b52020-06-11 08:44:50 +0200670
Sebastian Andrzej Siewiorf9ab4912021-01-23 21:10:27 +0100671 if (rq->q->nr_hw_queues == 1) {
672 blk_mq_raise_softirq(rq);
673 return true;
674 }
675 return false;
Jens Axboe320ae512013-10-24 09:20:05 +0100676}
Christoph Hellwig40d09b52020-06-11 08:44:50 +0200677EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
678
Jens Axboe320ae512013-10-24 09:20:05 +0100679/**
Christoph Hellwig15f73f52020-06-11 08:44:47 +0200680 * blk_mq_complete_request - end I/O on a request
681 * @rq: the request being processed
Jens Axboe320ae512013-10-24 09:20:05 +0100682 *
Christoph Hellwig15f73f52020-06-11 08:44:47 +0200683 * Description:
684 * Complete a request by scheduling the ->complete_rq operation.
685 **/
686void blk_mq_complete_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100687{
Christoph Hellwig40d09b52020-06-11 08:44:50 +0200688 if (!blk_mq_complete_request_remote(rq))
Christoph Hellwig963395262020-06-11 08:44:49 +0200689 rq->q->mq_ops->complete(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100690}
Christoph Hellwig15f73f52020-06-11 08:44:47 +0200691EXPORT_SYMBOL(blk_mq_complete_request);
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800692
Jens Axboe04ced152018-01-09 08:29:46 -0800693static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
Bart Van Asscheb7435db2018-01-10 11:34:27 -0800694 __releases(hctx->srcu)
Jens Axboe04ced152018-01-09 08:29:46 -0800695{
696 if (!(hctx->flags & BLK_MQ_F_BLOCKING))
697 rcu_read_unlock();
698 else
Tejun Heo05707b62018-01-09 08:29:53 -0800699 srcu_read_unlock(hctx->srcu, srcu_idx);
Jens Axboe04ced152018-01-09 08:29:46 -0800700}
701
702static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
Bart Van Asscheb7435db2018-01-10 11:34:27 -0800703 __acquires(hctx->srcu)
Jens Axboe04ced152018-01-09 08:29:46 -0800704{
Jens Axboe08b5a6e2018-01-09 09:32:25 -0700705 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
706 /* shut up gcc false positive */
707 *srcu_idx = 0;
Jens Axboe04ced152018-01-09 08:29:46 -0800708 rcu_read_lock();
Jens Axboe08b5a6e2018-01-09 09:32:25 -0700709 } else
Tejun Heo05707b62018-01-09 08:29:53 -0800710 *srcu_idx = srcu_read_lock(hctx->srcu);
Jens Axboe04ced152018-01-09 08:29:46 -0800711}
712
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800713/**
André Almeida105663f2020-01-06 15:08:18 -0300714 * blk_mq_start_request - Start processing a request
715 * @rq: Pointer to request to be started
716 *
717 * Function used by device drivers to notify the block layer that a request
718 * is going to be processed now, so blk layer can do proper initializations
719 * such as starting the timeout timer.
720 */
Christoph Hellwige2490072014-09-13 16:40:09 -0700721void blk_mq_start_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100722{
723 struct request_queue *q = rq->q;
724
Christoph Hellwiga54895f2020-12-03 17:21:39 +0100725 trace_block_rq_issue(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100726
Jens Axboecf43e6b2016-11-07 21:32:37 -0700727 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
Omar Sandoval544ccc8d2018-05-09 02:08:50 -0700728 rq->io_start_time_ns = ktime_get_ns();
Hou Tao3d244302019-05-21 15:59:03 +0800729 rq->stats_sectors = blk_rq_sectors(rq);
Jens Axboecf43e6b2016-11-07 21:32:37 -0700730 rq->rq_flags |= RQF_STATS;
Josef Bacika7905042018-07-03 09:32:35 -0600731 rq_qos_issue(q, rq);
Jens Axboecf43e6b2016-11-07 21:32:37 -0700732 }
733
Tejun Heo1d9bd512018-01-09 08:29:48 -0800734 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
Jens Axboe538b7532014-09-16 10:37:37 -0600735
Tejun Heo1d9bd512018-01-09 08:29:48 -0800736 blk_add_timer(rq);
Keith Busch12f5b932018-05-29 15:52:28 +0200737 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800738
Max Gurtovoy54d4e6a2019-09-16 18:44:29 +0300739#ifdef CONFIG_BLK_DEV_INTEGRITY
740 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
741 q->integrity.profile->prepare_fn(rq);
742#endif
Jens Axboe320ae512013-10-24 09:20:05 +0100743}
Christoph Hellwige2490072014-09-13 16:40:09 -0700744EXPORT_SYMBOL(blk_mq_start_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100745
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200746static void __blk_mq_requeue_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100747{
748 struct request_queue *q = rq->q;
749
Ming Lei923218f2017-11-02 23:24:38 +0800750 blk_mq_put_driver_tag(rq);
751
Christoph Hellwiga54895f2020-12-03 17:21:39 +0100752 trace_block_rq_requeue(rq);
Josef Bacika7905042018-07-03 09:32:35 -0600753 rq_qos_requeue(q, rq);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800754
Keith Busch12f5b932018-05-29 15:52:28 +0200755 if (blk_mq_request_started(rq)) {
756 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
Christoph Hellwigda661262018-06-14 13:58:45 +0200757 rq->rq_flags &= ~RQF_TIMED_OUT;
Christoph Hellwige2490072014-09-13 16:40:09 -0700758 }
Jens Axboe320ae512013-10-24 09:20:05 +0100759}
760
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700761void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200762{
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200763 __blk_mq_requeue_request(rq);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200764
Ming Lei105976f2018-02-23 23:36:56 +0800765 /* this request will be re-inserted to io scheduler queue */
766 blk_mq_sched_requeue_request(rq);
767
Jens Axboe7d692332018-10-24 10:48:12 -0600768 BUG_ON(!list_empty(&rq->queuelist));
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700769 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200770}
771EXPORT_SYMBOL(blk_mq_requeue_request);
772
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600773static void blk_mq_requeue_work(struct work_struct *work)
774{
775 struct request_queue *q =
Mike Snitzer28494502016-09-14 13:28:30 -0400776 container_of(work, struct request_queue, requeue_work.work);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600777 LIST_HEAD(rq_list);
778 struct request *rq, *next;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600779
Jens Axboe18e97812017-07-27 08:03:57 -0600780 spin_lock_irq(&q->requeue_lock);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600781 list_splice_init(&q->requeue_list, &rq_list);
Jens Axboe18e97812017-07-27 08:03:57 -0600782 spin_unlock_irq(&q->requeue_lock);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600783
784 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
Jianchao Wangaef18972019-02-12 09:56:25 +0800785 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600786 continue;
787
Christoph Hellwige8064022016-10-20 15:12:13 +0200788 rq->rq_flags &= ~RQF_SOFTBARRIER;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600789 list_del_init(&rq->queuelist);
Jianchao Wangaef18972019-02-12 09:56:25 +0800790 /*
791 * If RQF_DONTPREP, rq has contained some driver specific
792 * data, so insert it to hctx dispatch list to avoid any
793 * merge.
794 */
795 if (rq->rq_flags & RQF_DONTPREP)
Ming Lei01e99ae2020-02-25 09:04:32 +0800796 blk_mq_request_bypass_insert(rq, false, false);
Jianchao Wangaef18972019-02-12 09:56:25 +0800797 else
798 blk_mq_sched_insert_request(rq, true, false, false);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600799 }
800
801 while (!list_empty(&rq_list)) {
802 rq = list_entry(rq_list.next, struct request, queuelist);
803 list_del_init(&rq->queuelist);
Mike Snitzer9e97d292018-01-17 11:25:58 -0500804 blk_mq_sched_insert_request(rq, false, false, false);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600805 }
806
Bart Van Assche52d7f1b2016-10-28 17:20:32 -0700807 blk_mq_run_hw_queues(q, false);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600808}
809
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700810void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
811 bool kick_requeue_list)
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600812{
813 struct request_queue *q = rq->q;
814 unsigned long flags;
815
816 /*
817 * We abuse this flag that is otherwise used by the I/O scheduler to
Jens Axboeff821d22017-11-10 22:05:12 -0700818 * request head insertion from the workqueue.
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600819 */
Christoph Hellwige8064022016-10-20 15:12:13 +0200820 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600821
822 spin_lock_irqsave(&q->requeue_lock, flags);
823 if (at_head) {
Christoph Hellwige8064022016-10-20 15:12:13 +0200824 rq->rq_flags |= RQF_SOFTBARRIER;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600825 list_add(&rq->queuelist, &q->requeue_list);
826 } else {
827 list_add_tail(&rq->queuelist, &q->requeue_list);
828 }
829 spin_unlock_irqrestore(&q->requeue_lock, flags);
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700830
831 if (kick_requeue_list)
832 blk_mq_kick_requeue_list(q);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600833}
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600834
835void blk_mq_kick_requeue_list(struct request_queue *q)
836{
Bart Van Asscheae943d22018-01-19 08:58:55 -0800837 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600838}
839EXPORT_SYMBOL(blk_mq_kick_requeue_list);
840
Mike Snitzer28494502016-09-14 13:28:30 -0400841void blk_mq_delay_kick_requeue_list(struct request_queue *q,
842 unsigned long msecs)
843{
Bart Van Assched4acf362017-08-09 11:28:06 -0700844 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
845 msecs_to_jiffies(msecs));
Mike Snitzer28494502016-09-14 13:28:30 -0400846}
847EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
848
Jens Axboe0e62f512014-06-04 10:23:49 -0600849struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
850{
Jens Axboe88c7b2b2016-08-25 08:07:30 -0600851 if (tag < tags->nr_tags) {
852 prefetch(tags->rqs[tag]);
Hannes Reinecke4ee86ba2016-03-15 12:03:28 -0700853 return tags->rqs[tag];
Jens Axboe88c7b2b2016-08-25 08:07:30 -0600854 }
Hannes Reinecke4ee86ba2016-03-15 12:03:28 -0700855
856 return NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600857}
858EXPORT_SYMBOL(blk_mq_tag_to_rq);
859
Jens Axboe3c94d832018-12-17 21:11:17 -0700860static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
861 void *priv, bool reserved)
Jens Axboeae879912018-11-08 09:03:51 -0700862{
863 /*
Ming Lei05a4fed2020-07-07 11:04:33 -0400864 * If we find a request that isn't idle and the queue matches,
Jens Axboe3c94d832018-12-17 21:11:17 -0700865 * we know the queue is busy. Return false to stop the iteration.
Jens Axboeae879912018-11-08 09:03:51 -0700866 */
Ming Lei05a4fed2020-07-07 11:04:33 -0400867 if (blk_mq_request_started(rq) && rq->q == hctx->queue) {
Jens Axboeae879912018-11-08 09:03:51 -0700868 bool *busy = priv;
869
870 *busy = true;
871 return false;
872 }
873
874 return true;
875}
876
Jens Axboe3c94d832018-12-17 21:11:17 -0700877bool blk_mq_queue_inflight(struct request_queue *q)
Jens Axboeae879912018-11-08 09:03:51 -0700878{
879 bool busy = false;
880
Jens Axboe3c94d832018-12-17 21:11:17 -0700881 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
Jens Axboeae879912018-11-08 09:03:51 -0700882 return busy;
883}
Jens Axboe3c94d832018-12-17 21:11:17 -0700884EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
Jens Axboeae879912018-11-08 09:03:51 -0700885
Tejun Heo358f70d2018-01-09 08:29:50 -0800886static void blk_mq_rq_timed_out(struct request *req, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100887{
Christoph Hellwigda661262018-06-14 13:58:45 +0200888 req->rq_flags |= RQF_TIMED_OUT;
Christoph Hellwigd1210d52018-05-29 15:52:39 +0200889 if (req->q->mq_ops->timeout) {
890 enum blk_eh_timer_return ret;
Jens Axboe87ee7b12014-04-24 08:51:47 -0600891
Christoph Hellwigd1210d52018-05-29 15:52:39 +0200892 ret = req->q->mq_ops->timeout(req, reserved);
893 if (ret == BLK_EH_DONE)
894 return;
895 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700896 }
Christoph Hellwigd1210d52018-05-29 15:52:39 +0200897
898 blk_add_timer(req);
Jens Axboe87ee7b12014-04-24 08:51:47 -0600899}
Keith Busch5b3f25f2015-01-07 18:55:46 -0700900
Keith Busch12f5b932018-05-29 15:52:28 +0200901static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
902{
903 unsigned long deadline;
904
905 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
906 return false;
Christoph Hellwigda661262018-06-14 13:58:45 +0200907 if (rq->rq_flags & RQF_TIMED_OUT)
908 return false;
Keith Busch12f5b932018-05-29 15:52:28 +0200909
Christoph Hellwig079076b2018-11-14 17:02:05 +0100910 deadline = READ_ONCE(rq->deadline);
Keith Busch12f5b932018-05-29 15:52:28 +0200911 if (time_after_eq(jiffies, deadline))
912 return true;
913
914 if (*next == 0)
915 *next = deadline;
916 else if (time_after(*next, deadline))
917 *next = deadline;
918 return false;
919}
920
Ming Lei2e315dc2021-05-11 23:22:34 +0800921void blk_mq_put_rq_ref(struct request *rq)
922{
Ming Leia9ed27a2021-08-18 09:09:25 +0800923 if (is_flush_rq(rq))
Ming Lei2e315dc2021-05-11 23:22:34 +0800924 rq->end_io(rq, 0);
925 else if (refcount_dec_and_test(&rq->ref))
926 __blk_mq_free_request(rq);
927}
928
Jens Axboe7baa8572018-11-08 10:24:07 -0700929static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700930 struct request *rq, void *priv, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100931{
Keith Busch12f5b932018-05-29 15:52:28 +0200932 unsigned long *next = priv;
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700933
Keith Busch12f5b932018-05-29 15:52:28 +0200934 /*
Ming Leic797b402021-08-11 23:52:02 +0800935 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
936 * be reallocated underneath the timeout handler's processing, then
937 * the expire check is reliable. If the request is not expired, then
938 * it was completed and reallocated as a new request after returning
939 * from blk_mq_check_expired().
Keith Busch12f5b932018-05-29 15:52:28 +0200940 */
941 if (blk_mq_req_expired(rq, next))
Tejun Heo1d9bd512018-01-09 08:29:48 -0800942 blk_mq_rq_timed_out(rq, reserved);
Jens Axboe7baa8572018-11-08 10:24:07 -0700943 return true;
Tejun Heo1d9bd512018-01-09 08:29:48 -0800944}
945
Christoph Hellwig287922e2015-10-30 20:57:30 +0800946static void blk_mq_timeout_work(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +0100947{
Christoph Hellwig287922e2015-10-30 20:57:30 +0800948 struct request_queue *q =
949 container_of(work, struct request_queue, timeout_work);
Keith Busch12f5b932018-05-29 15:52:28 +0200950 unsigned long next = 0;
Tejun Heo1d9bd512018-01-09 08:29:48 -0800951 struct blk_mq_hw_ctx *hctx;
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700952 int i;
Jens Axboe320ae512013-10-24 09:20:05 +0100953
Gabriel Krisman Bertazi71f79fb2016-08-01 08:23:39 -0600954 /* A deadlock might occur if a request is stuck requiring a
955 * timeout at the same time a queue freeze is waiting
956 * completion, since the timeout code would not be able to
957 * acquire the queue reference here.
958 *
959 * That's why we don't use blk_queue_enter here; instead, we use
960 * percpu_ref_tryget directly, because we need to be able to
961 * obtain a reference even in the short window between the queue
962 * starting to freeze, by dropping the first reference in
Ming Lei1671d522017-03-27 20:06:57 +0800963 * blk_freeze_queue_start, and the moment the last request is
Gabriel Krisman Bertazi71f79fb2016-08-01 08:23:39 -0600964 * consumed, marked by the instant q_usage_counter reaches
965 * zero.
966 */
967 if (!percpu_ref_tryget(&q->q_usage_counter))
Christoph Hellwig287922e2015-10-30 20:57:30 +0800968 return;
969
Keith Busch12f5b932018-05-29 15:52:28 +0200970 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
Jens Axboe320ae512013-10-24 09:20:05 +0100971
Keith Busch12f5b932018-05-29 15:52:28 +0200972 if (next != 0) {
973 mod_timer(&q->timeout, next);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600974 } else {
Bart Van Asschefcd36c32018-01-10 08:33:33 -0800975 /*
976 * Request timeouts are handled as a forward rolling timer. If
977 * we end up here it means that no requests are pending and
978 * also that no request has been pending for a while. Mark
979 * each hctx as idle.
980 */
Ming Leif054b562015-04-21 10:00:19 +0800981 queue_for_each_hw_ctx(q, hctx, i) {
982 /* the hctx may be unmapped, so check it here */
983 if (blk_mq_hw_queue_mapped(hctx))
984 blk_mq_tag_idle(hctx);
985 }
Jens Axboe0d2602c2014-05-13 15:10:52 -0600986 }
Christoph Hellwig287922e2015-10-30 20:57:30 +0800987 blk_queue_exit(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100988}
989
Omar Sandoval88459642016-09-17 08:38:44 -0600990struct flush_busy_ctx_data {
991 struct blk_mq_hw_ctx *hctx;
992 struct list_head *list;
993};
994
995static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
996{
997 struct flush_busy_ctx_data *flush_data = data;
998 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
999 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
Ming Leic16d6b52018-12-17 08:44:05 -07001000 enum hctx_type type = hctx->type;
Omar Sandoval88459642016-09-17 08:38:44 -06001001
Omar Sandoval88459642016-09-17 08:38:44 -06001002 spin_lock(&ctx->lock);
Ming Leic16d6b52018-12-17 08:44:05 -07001003 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
Omar Sandovale9a99a62018-02-27 16:56:42 -08001004 sbitmap_clear_bit(sb, bitnr);
Omar Sandoval88459642016-09-17 08:38:44 -06001005 spin_unlock(&ctx->lock);
1006 return true;
1007}
1008
Jens Axboe320ae512013-10-24 09:20:05 +01001009/*
Jens Axboe1429d7c2014-05-19 09:23:55 -06001010 * Process software queues that have been marked busy, splicing them
1011 * to the for-dispatch
1012 */
Jens Axboe2c3ad662016-12-14 14:34:47 -07001013void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
Jens Axboe1429d7c2014-05-19 09:23:55 -06001014{
Omar Sandoval88459642016-09-17 08:38:44 -06001015 struct flush_busy_ctx_data data = {
1016 .hctx = hctx,
1017 .list = list,
1018 };
Jens Axboe1429d7c2014-05-19 09:23:55 -06001019
Omar Sandoval88459642016-09-17 08:38:44 -06001020 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
Jens Axboe1429d7c2014-05-19 09:23:55 -06001021}
Jens Axboe2c3ad662016-12-14 14:34:47 -07001022EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
Jens Axboe1429d7c2014-05-19 09:23:55 -06001023
Ming Leib3476892017-10-14 17:22:30 +08001024struct dispatch_rq_data {
1025 struct blk_mq_hw_ctx *hctx;
1026 struct request *rq;
1027};
1028
1029static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1030 void *data)
1031{
1032 struct dispatch_rq_data *dispatch_data = data;
1033 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1034 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
Ming Leic16d6b52018-12-17 08:44:05 -07001035 enum hctx_type type = hctx->type;
Ming Leib3476892017-10-14 17:22:30 +08001036
1037 spin_lock(&ctx->lock);
Ming Leic16d6b52018-12-17 08:44:05 -07001038 if (!list_empty(&ctx->rq_lists[type])) {
1039 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
Ming Leib3476892017-10-14 17:22:30 +08001040 list_del_init(&dispatch_data->rq->queuelist);
Ming Leic16d6b52018-12-17 08:44:05 -07001041 if (list_empty(&ctx->rq_lists[type]))
Ming Leib3476892017-10-14 17:22:30 +08001042 sbitmap_clear_bit(sb, bitnr);
1043 }
1044 spin_unlock(&ctx->lock);
1045
1046 return !dispatch_data->rq;
1047}
1048
1049struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1050 struct blk_mq_ctx *start)
1051{
Jens Axboef31967f2018-10-29 13:13:29 -06001052 unsigned off = start ? start->index_hw[hctx->type] : 0;
Ming Leib3476892017-10-14 17:22:30 +08001053 struct dispatch_rq_data data = {
1054 .hctx = hctx,
1055 .rq = NULL,
1056 };
1057
1058 __sbitmap_for_each_set(&hctx->ctx_map, off,
1059 dispatch_rq_from_ctx, &data);
1060
1061 return data.rq;
1062}
1063
Jens Axboe703fd1c2016-09-16 13:59:14 -06001064static inline unsigned int queued_to_index(unsigned int queued)
1065{
1066 if (!queued)
1067 return 0;
Jens Axboe1429d7c2014-05-19 09:23:55 -06001068
Jens Axboe703fd1c2016-09-16 13:59:14 -06001069 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
Jens Axboe1429d7c2014-05-19 09:23:55 -06001070}
1071
Ming Lei570e9b72020-06-30 22:03:55 +08001072static bool __blk_mq_get_driver_tag(struct request *rq)
1073{
John Garryae0f1a72021-10-05 18:23:38 +08001074 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
Ming Lei570e9b72020-06-30 22:03:55 +08001075 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
Ming Lei570e9b72020-06-30 22:03:55 +08001076 int tag;
1077
Ming Lei568f2702020-07-06 22:41:11 +08001078 blk_mq_tag_busy(rq->mq_hctx);
1079
Ming Lei570e9b72020-06-30 22:03:55 +08001080 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
John Garryae0f1a72021-10-05 18:23:38 +08001081 bt = &rq->mq_hctx->tags->breserved_tags;
Ming Lei570e9b72020-06-30 22:03:55 +08001082 tag_offset = 0;
Ming Lei28500852020-09-11 18:41:14 +08001083 } else {
1084 if (!hctx_may_queue(rq->mq_hctx, bt))
1085 return false;
Ming Lei570e9b72020-06-30 22:03:55 +08001086 }
1087
Ming Lei570e9b72020-06-30 22:03:55 +08001088 tag = __sbitmap_queue_get(bt);
1089 if (tag == BLK_MQ_NO_TAG)
1090 return false;
1091
1092 rq->tag = tag + tag_offset;
Ming Lei570e9b72020-06-30 22:03:55 +08001093 return true;
1094}
1095
Jan Kara613471542021-06-03 12:47:21 +02001096bool blk_mq_get_driver_tag(struct request *rq)
Ming Lei570e9b72020-06-30 22:03:55 +08001097{
Ming Lei568f2702020-07-06 22:41:11 +08001098 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1099
1100 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq))
1101 return false;
1102
Ming Lei51db1c32020-08-19 23:20:19 +08001103 if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
Ming Lei568f2702020-07-06 22:41:11 +08001104 !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
1105 rq->rq_flags |= RQF_MQ_INFLIGHT;
John Garrybccf5e22020-08-19 23:20:26 +08001106 __blk_mq_inc_active_requests(hctx);
Ming Lei568f2702020-07-06 22:41:11 +08001107 }
1108 hctx->tags->rqs[rq->tag] = rq;
1109 return true;
Ming Lei570e9b72020-06-30 22:03:55 +08001110}
1111
Jens Axboeeb619fd2017-11-09 08:32:43 -07001112static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1113 int flags, void *key)
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001114{
1115 struct blk_mq_hw_ctx *hctx;
1116
1117 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1118
Ming Lei5815839b2018-06-25 19:31:47 +08001119 spin_lock(&hctx->dispatch_wait_lock);
Jens Axboee8618572019-03-25 12:34:10 -06001120 if (!list_empty(&wait->entry)) {
1121 struct sbitmap_queue *sbq;
1122
1123 list_del_init(&wait->entry);
John Garryae0f1a72021-10-05 18:23:38 +08001124 sbq = &hctx->tags->bitmap_tags;
Jens Axboee8618572019-03-25 12:34:10 -06001125 atomic_dec(&sbq->ws_active);
1126 }
Ming Lei5815839b2018-06-25 19:31:47 +08001127 spin_unlock(&hctx->dispatch_wait_lock);
1128
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001129 blk_mq_run_hw_queue(hctx, true);
1130 return 1;
1131}
1132
Jens Axboef906a6a2017-11-09 16:10:13 -07001133/*
1134 * Mark us waiting for a tag. For shared tags, this involves hooking us into
Bart Van Asscheee3e4de2018-01-09 10:09:15 -08001135 * the tag wakeups. For non-shared tags, we can simply mark us needing a
1136 * restart. For both cases, take care to check the condition again after
Jens Axboef906a6a2017-11-09 16:10:13 -07001137 * marking us as waiting.
1138 */
Ming Lei2278d692018-06-25 19:31:46 +08001139static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
Jens Axboef906a6a2017-11-09 16:10:13 -07001140 struct request *rq)
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001141{
John Garryae0f1a72021-10-05 18:23:38 +08001142 struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
Ming Lei5815839b2018-06-25 19:31:47 +08001143 struct wait_queue_head *wq;
Jens Axboef906a6a2017-11-09 16:10:13 -07001144 wait_queue_entry_t *wait;
1145 bool ret;
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001146
Ming Lei51db1c32020-08-19 23:20:19 +08001147 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
Yufen Yu684b7322019-03-15 11:05:10 +08001148 blk_mq_sched_mark_restart_hctx(hctx);
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001149
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001150 /*
1151 * It's possible that a tag was freed in the window between the
1152 * allocation failure and adding the hardware queue to the wait
1153 * queue.
1154 *
1155 * Don't clear RESTART here, someone else could have set it.
1156 * At most this will cost an extra queue run.
1157 */
Ming Lei8ab6bb9e2018-06-25 19:31:45 +08001158 return blk_mq_get_driver_tag(rq);
Jens Axboeeb619fd2017-11-09 08:32:43 -07001159 }
1160
Ming Lei2278d692018-06-25 19:31:46 +08001161 wait = &hctx->dispatch_wait;
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001162 if (!list_empty_careful(&wait->entry))
1163 return false;
1164
Jens Axboee8618572019-03-25 12:34:10 -06001165 wq = &bt_wait_ptr(sbq, hctx)->wait;
Ming Lei5815839b2018-06-25 19:31:47 +08001166
1167 spin_lock_irq(&wq->lock);
1168 spin_lock(&hctx->dispatch_wait_lock);
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001169 if (!list_empty(&wait->entry)) {
Ming Lei5815839b2018-06-25 19:31:47 +08001170 spin_unlock(&hctx->dispatch_wait_lock);
1171 spin_unlock_irq(&wq->lock);
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001172 return false;
1173 }
1174
Jens Axboee8618572019-03-25 12:34:10 -06001175 atomic_inc(&sbq->ws_active);
Ming Lei5815839b2018-06-25 19:31:47 +08001176 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1177 __add_wait_queue(wq, wait);
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001178
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001179 /*
Jens Axboeeb619fd2017-11-09 08:32:43 -07001180 * It's possible that a tag was freed in the window between the
1181 * allocation failure and adding the hardware queue to the wait
1182 * queue.
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001183 */
Ming Lei8ab6bb9e2018-06-25 19:31:45 +08001184 ret = blk_mq_get_driver_tag(rq);
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001185 if (!ret) {
Ming Lei5815839b2018-06-25 19:31:47 +08001186 spin_unlock(&hctx->dispatch_wait_lock);
1187 spin_unlock_irq(&wq->lock);
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001188 return false;
Jens Axboef906a6a2017-11-09 16:10:13 -07001189 }
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001190
1191 /*
1192 * We got a tag, remove ourselves from the wait queue to ensure
1193 * someone else gets the wakeup.
1194 */
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001195 list_del_init(&wait->entry);
Jens Axboee8618572019-03-25 12:34:10 -06001196 atomic_dec(&sbq->ws_active);
Ming Lei5815839b2018-06-25 19:31:47 +08001197 spin_unlock(&hctx->dispatch_wait_lock);
1198 spin_unlock_irq(&wq->lock);
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001199
1200 return true;
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001201}
1202
Ming Lei6e7687172018-07-03 09:03:16 -06001203#define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8
1204#define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4
1205/*
1206 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1207 * - EWMA is one simple way to compute running average value
1208 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1209 * - take 4 as factor for avoiding to get too small(0) result, and this
1210 * factor doesn't matter because EWMA decreases exponentially
1211 */
1212static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1213{
1214 unsigned int ewma;
1215
Ming Lei6e7687172018-07-03 09:03:16 -06001216 ewma = hctx->dispatch_busy;
1217
1218 if (!ewma && !busy)
1219 return;
1220
1221 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1222 if (busy)
1223 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1224 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1225
1226 hctx->dispatch_busy = ewma;
1227}
1228
Ming Lei86ff7c22018-01-30 22:04:57 -05001229#define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
1230
Johannes Thumshirnc92a4102020-03-25 00:24:44 +09001231static void blk_mq_handle_dev_resource(struct request *rq,
1232 struct list_head *list)
1233{
1234 struct request *next =
1235 list_first_entry_or_null(list, struct request, queuelist);
1236
1237 /*
1238 * If an I/O scheduler has been configured and we got a driver tag for
1239 * the next request already, free it.
1240 */
1241 if (next)
1242 blk_mq_put_driver_tag(next);
1243
1244 list_add(&rq->queuelist, list);
1245 __blk_mq_requeue_request(rq);
1246}
1247
Keith Busch0512a752020-05-12 17:55:47 +09001248static void blk_mq_handle_zone_resource(struct request *rq,
1249 struct list_head *zone_list)
1250{
1251 /*
1252 * If we end up here it is because we cannot dispatch a request to a
1253 * specific zone due to LLD level zone-write locking or other zone
1254 * related resource not being available. In this case, set the request
1255 * aside in zone_list for retrying it later.
1256 */
1257 list_add(&rq->queuelist, zone_list);
1258 __blk_mq_requeue_request(rq);
1259}
1260
Ming Lei75383522020-06-30 18:24:58 +08001261enum prep_dispatch {
1262 PREP_DISPATCH_OK,
1263 PREP_DISPATCH_NO_TAG,
1264 PREP_DISPATCH_NO_BUDGET,
1265};
1266
1267static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
1268 bool need_budget)
1269{
1270 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
Ming Lei2a5a24a2021-01-22 10:33:12 +08001271 int budget_token = -1;
Ming Lei75383522020-06-30 18:24:58 +08001272
Ming Lei2a5a24a2021-01-22 10:33:12 +08001273 if (need_budget) {
1274 budget_token = blk_mq_get_dispatch_budget(rq->q);
1275 if (budget_token < 0) {
1276 blk_mq_put_driver_tag(rq);
1277 return PREP_DISPATCH_NO_BUDGET;
1278 }
1279 blk_mq_set_rq_budget_token(rq, budget_token);
Ming Lei75383522020-06-30 18:24:58 +08001280 }
1281
1282 if (!blk_mq_get_driver_tag(rq)) {
1283 /*
1284 * The initial allocation attempt failed, so we need to
1285 * rerun the hardware queue when a tag is freed. The
1286 * waitqueue takes care of that. If the queue is run
1287 * before we add this entry back on the dispatch list,
1288 * we'll re-run it below.
1289 */
1290 if (!blk_mq_mark_tag_wait(hctx, rq)) {
Ming Lei1fd40b52020-06-30 18:25:00 +08001291 /*
1292 * All budgets not got from this function will be put
1293 * together during handling partial dispatch
1294 */
1295 if (need_budget)
Ming Lei2a5a24a2021-01-22 10:33:12 +08001296 blk_mq_put_dispatch_budget(rq->q, budget_token);
Ming Lei75383522020-06-30 18:24:58 +08001297 return PREP_DISPATCH_NO_TAG;
1298 }
1299 }
1300
1301 return PREP_DISPATCH_OK;
1302}
1303
Ming Lei1fd40b52020-06-30 18:25:00 +08001304/* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
1305static void blk_mq_release_budgets(struct request_queue *q,
Ming Lei2a5a24a2021-01-22 10:33:12 +08001306 struct list_head *list)
Ming Lei1fd40b52020-06-30 18:25:00 +08001307{
Ming Lei2a5a24a2021-01-22 10:33:12 +08001308 struct request *rq;
Ming Lei1fd40b52020-06-30 18:25:00 +08001309
Ming Lei2a5a24a2021-01-22 10:33:12 +08001310 list_for_each_entry(rq, list, queuelist) {
1311 int budget_token = blk_mq_get_rq_budget_token(rq);
1312
1313 if (budget_token >= 0)
1314 blk_mq_put_dispatch_budget(q, budget_token);
1315 }
Ming Lei1fd40b52020-06-30 18:25:00 +08001316}
1317
Jens Axboe1f57f8d2018-06-28 11:54:01 -06001318/*
1319 * Returns true if we did some work AND can potentially do more.
1320 */
Ming Lei445874e2020-06-30 18:24:57 +08001321bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
Ming Lei1fd40b52020-06-30 18:25:00 +08001322 unsigned int nr_budgets)
Jens Axboef04c3df2016-12-07 08:41:17 -07001323{
Ming Lei75383522020-06-30 18:24:58 +08001324 enum prep_dispatch prep;
Ming Lei445874e2020-06-30 18:24:57 +08001325 struct request_queue *q = hctx->queue;
Jianchao Wang6d6f167c2017-11-02 23:24:32 +08001326 struct request *rq, *nxt;
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001327 int errors, queued;
Ming Lei86ff7c22018-01-30 22:04:57 -05001328 blk_status_t ret = BLK_STS_OK;
Keith Busch0512a752020-05-12 17:55:47 +09001329 LIST_HEAD(zone_list);
Jens Axboef04c3df2016-12-07 08:41:17 -07001330
Omar Sandoval81380ca2017-04-07 08:56:26 -06001331 if (list_empty(list))
1332 return false;
1333
Jens Axboef04c3df2016-12-07 08:41:17 -07001334 /*
Jens Axboef04c3df2016-12-07 08:41:17 -07001335 * Now process all the entries, sending them to the driver.
1336 */
Jens Axboe93efe982017-03-24 12:04:19 -06001337 errors = queued = 0;
Omar Sandoval81380ca2017-04-07 08:56:26 -06001338 do {
Jens Axboef04c3df2016-12-07 08:41:17 -07001339 struct blk_mq_queue_data bd;
1340
1341 rq = list_first_entry(list, struct request, queuelist);
Ming Lei0bca7992018-04-05 00:35:21 +08001342
Ming Lei445874e2020-06-30 18:24:57 +08001343 WARN_ON_ONCE(hctx != rq->mq_hctx);
Ming Lei1fd40b52020-06-30 18:25:00 +08001344 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
Ming Lei75383522020-06-30 18:24:58 +08001345 if (prep != PREP_DISPATCH_OK)
Ming Lei0bca7992018-04-05 00:35:21 +08001346 break;
Ming Leide148292017-10-14 17:22:29 +08001347
Jens Axboef04c3df2016-12-07 08:41:17 -07001348 list_del_init(&rq->queuelist);
1349
1350 bd.rq = rq;
Jens Axboe113285b2017-03-02 13:26:04 -07001351
1352 /*
1353 * Flag last if we have no more requests, or if we have more
1354 * but can't assign a driver tag to it.
1355 */
1356 if (list_empty(list))
1357 bd.last = true;
1358 else {
Jens Axboe113285b2017-03-02 13:26:04 -07001359 nxt = list_first_entry(list, struct request, queuelist);
Ming Lei8ab6bb9e2018-06-25 19:31:45 +08001360 bd.last = !blk_mq_get_driver_tag(nxt);
Jens Axboe113285b2017-03-02 13:26:04 -07001361 }
Jens Axboef04c3df2016-12-07 08:41:17 -07001362
Ming Lei1fd40b52020-06-30 18:25:00 +08001363 /*
1364 * once the request is queued to lld, no need to cover the
1365 * budget any more
1366 */
1367 if (nr_budgets)
1368 nr_budgets--;
Jens Axboef04c3df2016-12-07 08:41:17 -07001369 ret = q->mq_ops->queue_rq(hctx, &bd);
Ming Lei7bf13722020-07-01 21:58:57 +08001370 switch (ret) {
1371 case BLK_STS_OK:
1372 queued++;
Jens Axboef04c3df2016-12-07 08:41:17 -07001373 break;
Ming Lei7bf13722020-07-01 21:58:57 +08001374 case BLK_STS_RESOURCE:
1375 case BLK_STS_DEV_RESOURCE:
1376 blk_mq_handle_dev_resource(rq, list);
1377 goto out;
1378 case BLK_STS_ZONE_RESOURCE:
Keith Busch0512a752020-05-12 17:55:47 +09001379 /*
1380 * Move the request to zone_list and keep going through
1381 * the dispatch list to find more requests the drive can
1382 * accept.
1383 */
1384 blk_mq_handle_zone_resource(rq, &zone_list);
Ming Lei7bf13722020-07-01 21:58:57 +08001385 break;
1386 default:
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001387 errors++;
Hannes Reineckee21ee5a2020-09-30 10:02:53 +02001388 blk_mq_end_request(rq, ret);
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001389 }
Omar Sandoval81380ca2017-04-07 08:56:26 -06001390 } while (!list_empty(list));
Ming Lei7bf13722020-07-01 21:58:57 +08001391out:
Keith Busch0512a752020-05-12 17:55:47 +09001392 if (!list_empty(&zone_list))
1393 list_splice_tail_init(&zone_list, list);
1394
Jens Axboef04c3df2016-12-07 08:41:17 -07001395 hctx->dispatched[queued_to_index(queued)]++;
1396
yangerkun632bfb62020-09-05 19:25:56 +08001397 /* If we didn't flush the entire list, we could have told the driver
1398 * there was more coming, but that turned out to be a lie.
1399 */
1400 if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
1401 q->mq_ops->commit_rqs(hctx);
Jens Axboef04c3df2016-12-07 08:41:17 -07001402 /*
1403 * Any items that need requeuing? Stuff them into hctx->dispatch,
1404 * that is where we will continue on next queue run.
1405 */
1406 if (!list_empty(list)) {
Ming Lei86ff7c22018-01-30 22:04:57 -05001407 bool needs_restart;
Ming Lei75383522020-06-30 18:24:58 +08001408 /* For non-shared tags, the RESTART check will suffice */
1409 bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
Ming Lei51db1c32020-08-19 23:20:19 +08001410 (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
Ming Lei75383522020-06-30 18:24:58 +08001411 bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET;
Ming Lei86ff7c22018-01-30 22:04:57 -05001412
Ming Lei2a5a24a2021-01-22 10:33:12 +08001413 if (nr_budgets)
1414 blk_mq_release_budgets(q, list);
Jens Axboef04c3df2016-12-07 08:41:17 -07001415
1416 spin_lock(&hctx->lock);
Ming Lei01e99ae2020-02-25 09:04:32 +08001417 list_splice_tail_init(list, &hctx->dispatch);
Jens Axboef04c3df2016-12-07 08:41:17 -07001418 spin_unlock(&hctx->lock);
1419
1420 /*
Ming Leid7d85352020-08-17 18:01:15 +08001421 * Order adding requests to hctx->dispatch and checking
1422 * SCHED_RESTART flag. The pair of this smp_mb() is the one
1423 * in blk_mq_sched_restart(). Avoid restart code path to
1424 * miss the new added requests to hctx->dispatch, meantime
1425 * SCHED_RESTART is observed here.
1426 */
1427 smp_mb();
1428
1429 /*
Bart Van Assche710c7852017-04-07 11:16:51 -07001430 * If SCHED_RESTART was set by the caller of this function and
1431 * it is no longer set that means that it was cleared by another
1432 * thread and hence that a queue rerun is needed.
Jens Axboef04c3df2016-12-07 08:41:17 -07001433 *
Jens Axboeeb619fd2017-11-09 08:32:43 -07001434 * If 'no_tag' is set, that means that we failed getting
1435 * a driver tag with an I/O scheduler attached. If our dispatch
1436 * waitqueue is no longer active, ensure that we run the queue
1437 * AFTER adding our entries back to the list.
Jens Axboebd166ef2017-01-17 06:03:22 -07001438 *
Bart Van Assche710c7852017-04-07 11:16:51 -07001439 * If no I/O scheduler has been configured it is possible that
1440 * the hardware queue got stopped and restarted before requests
1441 * were pushed back onto the dispatch list. Rerun the queue to
1442 * avoid starvation. Notes:
1443 * - blk_mq_run_hw_queue() checks whether or not a queue has
1444 * been stopped before rerunning a queue.
1445 * - Some but not all block drivers stop a queue before
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001446 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
Bart Van Assche710c7852017-04-07 11:16:51 -07001447 * and dm-rq.
Ming Lei86ff7c22018-01-30 22:04:57 -05001448 *
1449 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
1450 * bit is set, run queue after a delay to avoid IO stalls
Douglas Andersonab3cee32020-04-20 09:24:51 -07001451 * that could otherwise occur if the queue is idle. We'll do
1452 * similar if we couldn't get budget and SCHED_RESTART is set.
Jens Axboebd166ef2017-01-17 06:03:22 -07001453 */
Ming Lei86ff7c22018-01-30 22:04:57 -05001454 needs_restart = blk_mq_sched_needs_restart(hctx);
1455 if (!needs_restart ||
Jens Axboeeb619fd2017-11-09 08:32:43 -07001456 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
Jens Axboebd166ef2017-01-17 06:03:22 -07001457 blk_mq_run_hw_queue(hctx, true);
Douglas Andersonab3cee32020-04-20 09:24:51 -07001458 else if (needs_restart && (ret == BLK_STS_RESOURCE ||
1459 no_budget_avail))
Ming Lei86ff7c22018-01-30 22:04:57 -05001460 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
Jens Axboe1f57f8d2018-06-28 11:54:01 -06001461
Ming Lei6e7687172018-07-03 09:03:16 -06001462 blk_mq_update_dispatch_busy(hctx, true);
Jens Axboe1f57f8d2018-06-28 11:54:01 -06001463 return false;
Ming Lei6e7687172018-07-03 09:03:16 -06001464 } else
1465 blk_mq_update_dispatch_busy(hctx, false);
Jens Axboef04c3df2016-12-07 08:41:17 -07001466
Jens Axboe93efe982017-03-24 12:04:19 -06001467 return (queued + errors) != 0;
Jens Axboef04c3df2016-12-07 08:41:17 -07001468}
1469
André Almeida105663f2020-01-06 15:08:18 -03001470/**
1471 * __blk_mq_run_hw_queue - Run a hardware queue.
1472 * @hctx: Pointer to the hardware queue to run.
1473 *
1474 * Send pending requests to the hardware.
1475 */
Bart Van Assche6a83e742016-11-02 10:09:51 -06001476static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1477{
1478 int srcu_idx;
1479
Jens Axboeb7a71e62017-08-01 09:28:24 -06001480 /*
Jens Axboeb7a71e62017-08-01 09:28:24 -06001481 * We can't run the queue inline with ints disabled. Ensure that
1482 * we catch bad users of this early.
1483 */
1484 WARN_ON_ONCE(in_interrupt());
1485
Jens Axboe04ced152018-01-09 08:29:46 -08001486 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
Jens Axboebf4907c2017-03-30 12:30:39 -06001487
Jens Axboe04ced152018-01-09 08:29:46 -08001488 hctx_lock(hctx, &srcu_idx);
1489 blk_mq_sched_dispatch_requests(hctx);
1490 hctx_unlock(hctx, srcu_idx);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001491}
1492
Ming Leif82ddf12018-04-08 17:48:10 +08001493static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
1494{
1495 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
1496
1497 if (cpu >= nr_cpu_ids)
1498 cpu = cpumask_first(hctx->cpumask);
1499 return cpu;
1500}
1501
Jens Axboe506e9312014-05-07 10:26:44 -06001502/*
1503 * It'd be great if the workqueue API had a way to pass
1504 * in a mask and had some smarts for more clever placement.
1505 * For now we just round-robin here, switching for every
1506 * BLK_MQ_CPU_WORK_BATCH queued items.
1507 */
1508static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1509{
Ming Lei7bed4592018-01-18 00:41:51 +08001510 bool tried = false;
Ming Lei476f8c92018-04-08 17:48:09 +08001511 int next_cpu = hctx->next_cpu;
Ming Lei7bed4592018-01-18 00:41:51 +08001512
Christoph Hellwigb657d7e2014-11-24 09:27:23 +01001513 if (hctx->queue->nr_hw_queues == 1)
1514 return WORK_CPU_UNBOUND;
Jens Axboe506e9312014-05-07 10:26:44 -06001515
1516 if (--hctx->next_cpu_batch <= 0) {
Ming Lei7bed4592018-01-18 00:41:51 +08001517select_cpu:
Ming Lei476f8c92018-04-08 17:48:09 +08001518 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
Christoph Hellwig20e4d8132018-01-12 10:53:06 +08001519 cpu_online_mask);
Jens Axboe506e9312014-05-07 10:26:44 -06001520 if (next_cpu >= nr_cpu_ids)
Ming Leif82ddf12018-04-08 17:48:10 +08001521 next_cpu = blk_mq_first_mapped_cpu(hctx);
Jens Axboe506e9312014-05-07 10:26:44 -06001522 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1523 }
1524
Ming Lei7bed4592018-01-18 00:41:51 +08001525 /*
1526 * Do unbound schedule if we can't find a online CPU for this hctx,
1527 * and it should only happen in the path of handling CPU DEAD.
1528 */
Ming Lei476f8c92018-04-08 17:48:09 +08001529 if (!cpu_online(next_cpu)) {
Ming Lei7bed4592018-01-18 00:41:51 +08001530 if (!tried) {
1531 tried = true;
1532 goto select_cpu;
1533 }
1534
1535 /*
1536 * Make sure to re-select CPU next time once after CPUs
1537 * in hctx->cpumask become online again.
1538 */
Ming Lei476f8c92018-04-08 17:48:09 +08001539 hctx->next_cpu = next_cpu;
Ming Lei7bed4592018-01-18 00:41:51 +08001540 hctx->next_cpu_batch = 1;
1541 return WORK_CPU_UNBOUND;
1542 }
Ming Lei476f8c92018-04-08 17:48:09 +08001543
1544 hctx->next_cpu = next_cpu;
1545 return next_cpu;
Jens Axboe506e9312014-05-07 10:26:44 -06001546}
1547
André Almeida105663f2020-01-06 15:08:18 -03001548/**
1549 * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
1550 * @hctx: Pointer to the hardware queue to run.
1551 * @async: If we want to run the queue asynchronously.
Minwoo Imfa94ba82020-12-05 00:20:55 +09001552 * @msecs: Milliseconds of delay to wait before running the queue.
André Almeida105663f2020-01-06 15:08:18 -03001553 *
1554 * If !@async, try to run the queue now. Else, run the queue asynchronously and
1555 * with a delay of @msecs.
1556 */
Bart Van Assche7587a5a2017-04-07 11:16:52 -07001557static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1558 unsigned long msecs)
Jens Axboe320ae512013-10-24 09:20:05 +01001559{
Bart Van Assche5435c022017-06-20 11:15:49 -07001560 if (unlikely(blk_mq_hctx_stopped(hctx)))
Jens Axboe320ae512013-10-24 09:20:05 +01001561 return;
1562
Jens Axboe1b792f22016-09-21 10:12:13 -06001563 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001564 int cpu = get_cpu();
1565 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
Paolo Bonzini398205b2014-11-07 23:03:59 +01001566 __blk_mq_run_hw_queue(hctx);
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001567 put_cpu();
Paolo Bonzini398205b2014-11-07 23:03:59 +01001568 return;
1569 }
Jens Axboee4043dc2014-04-09 10:18:23 -06001570
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001571 put_cpu();
Jens Axboee4043dc2014-04-09 10:18:23 -06001572 }
Paolo Bonzini398205b2014-11-07 23:03:59 +01001573
Bart Van Asscheae943d22018-01-19 08:58:55 -08001574 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
1575 msecs_to_jiffies(msecs));
Bart Van Assche7587a5a2017-04-07 11:16:52 -07001576}
1577
André Almeida105663f2020-01-06 15:08:18 -03001578/**
1579 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
1580 * @hctx: Pointer to the hardware queue to run.
Minwoo Imfa94ba82020-12-05 00:20:55 +09001581 * @msecs: Milliseconds of delay to wait before running the queue.
André Almeida105663f2020-01-06 15:08:18 -03001582 *
1583 * Run a hardware queue asynchronously with a delay of @msecs.
1584 */
Bart Van Assche7587a5a2017-04-07 11:16:52 -07001585void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1586{
1587 __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1588}
1589EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1590
André Almeida105663f2020-01-06 15:08:18 -03001591/**
1592 * blk_mq_run_hw_queue - Start to run a hardware queue.
1593 * @hctx: Pointer to the hardware queue to run.
1594 * @async: If we want to run the queue asynchronously.
1595 *
1596 * Check if the request queue is not in a quiesced state and if there are
1597 * pending requests to be sent. If this is true, run the queue to send requests
1598 * to hardware.
1599 */
John Garry626fb732019-10-30 00:59:30 +08001600void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
Bart Van Assche7587a5a2017-04-07 11:16:52 -07001601{
Ming Lei24f5a902018-01-06 16:27:38 +08001602 int srcu_idx;
1603 bool need_run;
1604
1605 /*
1606 * When queue is quiesced, we may be switching io scheduler, or
1607 * updating nr_hw_queues, or other things, and we can't run queue
1608 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
1609 *
1610 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
1611 * quiesced.
1612 */
Jens Axboe04ced152018-01-09 08:29:46 -08001613 hctx_lock(hctx, &srcu_idx);
1614 need_run = !blk_queue_quiesced(hctx->queue) &&
1615 blk_mq_hctx_has_pending(hctx);
1616 hctx_unlock(hctx, srcu_idx);
Ming Lei24f5a902018-01-06 16:27:38 +08001617
John Garry626fb732019-10-30 00:59:30 +08001618 if (need_run)
Jens Axboe79f720a2017-11-10 09:13:21 -07001619 __blk_mq_delay_run_hw_queue(hctx, async, 0);
Jens Axboe320ae512013-10-24 09:20:05 +01001620}
Omar Sandoval5b727272017-04-14 01:00:00 -07001621EXPORT_SYMBOL(blk_mq_run_hw_queue);
Jens Axboe320ae512013-10-24 09:20:05 +01001622
Jan Karab6e68ee2021-01-11 17:47:17 +01001623/*
1624 * Is the request queue handled by an IO scheduler that does not respect
1625 * hardware queues when dispatching?
1626 */
1627static bool blk_mq_has_sqsched(struct request_queue *q)
1628{
1629 struct elevator_queue *e = q->elevator;
1630
1631 if (e && e->type->ops.dispatch_request &&
1632 !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
1633 return true;
1634 return false;
1635}
1636
1637/*
1638 * Return prefered queue to dispatch from (if any) for non-mq aware IO
1639 * scheduler.
1640 */
1641static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
1642{
1643 struct blk_mq_hw_ctx *hctx;
1644
1645 /*
1646 * If the IO scheduler does not respect hardware queues when
1647 * dispatching, we just don't bother with multiple HW queues and
1648 * dispatch from hctx for the current CPU since running multiple queues
1649 * just causes lock contention inside the scheduler and pointless cache
1650 * bouncing.
1651 */
1652 hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
1653 raw_smp_processor_id());
1654 if (!blk_mq_hctx_stopped(hctx))
1655 return hctx;
1656 return NULL;
1657}
1658
André Almeida105663f2020-01-06 15:08:18 -03001659/**
Mauro Carvalho Chehab24f7bb82020-10-23 18:32:54 +02001660 * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
André Almeida105663f2020-01-06 15:08:18 -03001661 * @q: Pointer to the request queue to run.
1662 * @async: If we want to run the queue asynchronously.
1663 */
Mike Snitzerb94ec292015-03-11 23:56:38 -04001664void blk_mq_run_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +01001665{
Jan Karab6e68ee2021-01-11 17:47:17 +01001666 struct blk_mq_hw_ctx *hctx, *sq_hctx;
Jens Axboe320ae512013-10-24 09:20:05 +01001667 int i;
1668
Jan Karab6e68ee2021-01-11 17:47:17 +01001669 sq_hctx = NULL;
1670 if (blk_mq_has_sqsched(q))
1671 sq_hctx = blk_mq_get_sq_hctx(q);
Jens Axboe320ae512013-10-24 09:20:05 +01001672 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe79f720a2017-11-10 09:13:21 -07001673 if (blk_mq_hctx_stopped(hctx))
Jens Axboe320ae512013-10-24 09:20:05 +01001674 continue;
Jan Karab6e68ee2021-01-11 17:47:17 +01001675 /*
1676 * Dispatch from this hctx either if there's no hctx preferred
1677 * by IO scheduler or if it has requests that bypass the
1678 * scheduler.
1679 */
1680 if (!sq_hctx || sq_hctx == hctx ||
1681 !list_empty_careful(&hctx->dispatch))
1682 blk_mq_run_hw_queue(hctx, async);
Jens Axboe320ae512013-10-24 09:20:05 +01001683 }
1684}
Mike Snitzerb94ec292015-03-11 23:56:38 -04001685EXPORT_SYMBOL(blk_mq_run_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01001686
Bart Van Asschefd001442016-10-28 17:19:37 -07001687/**
Douglas Andersonb9151e72020-04-20 09:24:52 -07001688 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
1689 * @q: Pointer to the request queue to run.
Minwoo Imfa94ba82020-12-05 00:20:55 +09001690 * @msecs: Milliseconds of delay to wait before running the queues.
Douglas Andersonb9151e72020-04-20 09:24:52 -07001691 */
1692void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
1693{
Jan Karab6e68ee2021-01-11 17:47:17 +01001694 struct blk_mq_hw_ctx *hctx, *sq_hctx;
Douglas Andersonb9151e72020-04-20 09:24:52 -07001695 int i;
1696
Jan Karab6e68ee2021-01-11 17:47:17 +01001697 sq_hctx = NULL;
1698 if (blk_mq_has_sqsched(q))
1699 sq_hctx = blk_mq_get_sq_hctx(q);
Douglas Andersonb9151e72020-04-20 09:24:52 -07001700 queue_for_each_hw_ctx(q, hctx, i) {
1701 if (blk_mq_hctx_stopped(hctx))
1702 continue;
Jan Karab6e68ee2021-01-11 17:47:17 +01001703 /*
1704 * Dispatch from this hctx either if there's no hctx preferred
1705 * by IO scheduler or if it has requests that bypass the
1706 * scheduler.
1707 */
1708 if (!sq_hctx || sq_hctx == hctx ||
1709 !list_empty_careful(&hctx->dispatch))
1710 blk_mq_delay_run_hw_queue(hctx, msecs);
Douglas Andersonb9151e72020-04-20 09:24:52 -07001711 }
1712}
1713EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
1714
1715/**
Bart Van Asschefd001442016-10-28 17:19:37 -07001716 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1717 * @q: request queue.
1718 *
1719 * The caller is responsible for serializing this function against
1720 * blk_mq_{start,stop}_hw_queue().
1721 */
1722bool blk_mq_queue_stopped(struct request_queue *q)
1723{
1724 struct blk_mq_hw_ctx *hctx;
1725 int i;
1726
1727 queue_for_each_hw_ctx(q, hctx, i)
1728 if (blk_mq_hctx_stopped(hctx))
1729 return true;
1730
1731 return false;
1732}
1733EXPORT_SYMBOL(blk_mq_queue_stopped);
1734
Ming Lei39a70c72017-06-06 23:22:09 +08001735/*
1736 * This function is often used for pausing .queue_rq() by driver when
1737 * there isn't enough resource or some conditions aren't satisfied, and
Bart Van Assche4d606212017-08-17 16:23:00 -07001738 * BLK_STS_RESOURCE is usually returned.
Ming Lei39a70c72017-06-06 23:22:09 +08001739 *
1740 * We do not guarantee that dispatch can be drained or blocked
1741 * after blk_mq_stop_hw_queue() returns. Please use
1742 * blk_mq_quiesce_queue() for that requirement.
1743 */
Jens Axboe320ae512013-10-24 09:20:05 +01001744void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1745{
Ming Lei641a9ed2017-06-06 23:22:10 +08001746 cancel_delayed_work(&hctx->run_work);
1747
1748 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboe320ae512013-10-24 09:20:05 +01001749}
1750EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1751
Ming Lei39a70c72017-06-06 23:22:09 +08001752/*
1753 * This function is often used for pausing .queue_rq() by driver when
1754 * there isn't enough resource or some conditions aren't satisfied, and
Bart Van Assche4d606212017-08-17 16:23:00 -07001755 * BLK_STS_RESOURCE is usually returned.
Ming Lei39a70c72017-06-06 23:22:09 +08001756 *
1757 * We do not guarantee that dispatch can be drained or blocked
1758 * after blk_mq_stop_hw_queues() returns. Please use
1759 * blk_mq_quiesce_queue() for that requirement.
1760 */
Jens Axboe2719aa22017-05-03 11:08:14 -06001761void blk_mq_stop_hw_queues(struct request_queue *q)
1762{
Ming Lei641a9ed2017-06-06 23:22:10 +08001763 struct blk_mq_hw_ctx *hctx;
1764 int i;
1765
1766 queue_for_each_hw_ctx(q, hctx, i)
1767 blk_mq_stop_hw_queue(hctx);
Christoph Hellwig280d45f2013-10-25 14:45:58 +01001768}
1769EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1770
Jens Axboe320ae512013-10-24 09:20:05 +01001771void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1772{
1773 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboee4043dc2014-04-09 10:18:23 -06001774
Jens Axboe0ffbce82014-06-25 08:22:34 -06001775 blk_mq_run_hw_queue(hctx, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001776}
1777EXPORT_SYMBOL(blk_mq_start_hw_queue);
1778
Christoph Hellwig2f268552014-04-16 09:44:56 +02001779void blk_mq_start_hw_queues(struct request_queue *q)
1780{
1781 struct blk_mq_hw_ctx *hctx;
1782 int i;
1783
1784 queue_for_each_hw_ctx(q, hctx, i)
1785 blk_mq_start_hw_queue(hctx);
1786}
1787EXPORT_SYMBOL(blk_mq_start_hw_queues);
1788
Jens Axboeae911c52016-12-08 13:19:30 -07001789void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1790{
1791 if (!blk_mq_hctx_stopped(hctx))
1792 return;
1793
1794 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1795 blk_mq_run_hw_queue(hctx, async);
1796}
1797EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1798
Christoph Hellwig1b4a3252014-04-16 09:44:54 +02001799void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +01001800{
1801 struct blk_mq_hw_ctx *hctx;
1802 int i;
1803
Jens Axboeae911c52016-12-08 13:19:30 -07001804 queue_for_each_hw_ctx(q, hctx, i)
1805 blk_mq_start_stopped_hw_queue(hctx, async);
Jens Axboe320ae512013-10-24 09:20:05 +01001806}
1807EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1808
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001809static void blk_mq_run_work_fn(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +01001810{
1811 struct blk_mq_hw_ctx *hctx;
1812
Jens Axboe9f993732017-04-10 09:54:54 -06001813 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
Jens Axboe21c6e932017-04-10 09:54:56 -06001814
1815 /*
Ming Lei15fe8a902018-04-08 17:48:11 +08001816 * If we are stopped, don't run the queue.
Jens Axboe21c6e932017-04-10 09:54:56 -06001817 */
Yufen Yu08410312020-10-08 23:26:30 -04001818 if (blk_mq_hctx_stopped(hctx))
Jianchao Wang0196d6b2018-06-04 17:03:55 +08001819 return;
Jens Axboee4043dc2014-04-09 10:18:23 -06001820
Jens Axboe320ae512013-10-24 09:20:05 +01001821 __blk_mq_run_hw_queue(hctx);
1822}
1823
Ming Leicfd0c552015-10-20 23:13:57 +08001824static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
Ming Leicfd0c552015-10-20 23:13:57 +08001825 struct request *rq,
1826 bool at_head)
Jens Axboe320ae512013-10-24 09:20:05 +01001827{
Jens Axboee57690f2016-08-24 15:34:35 -06001828 struct blk_mq_ctx *ctx = rq->mq_ctx;
Ming Leic16d6b52018-12-17 08:44:05 -07001829 enum hctx_type type = hctx->type;
Jens Axboee57690f2016-08-24 15:34:35 -06001830
Bart Van Assche7b607812017-06-20 11:15:47 -07001831 lockdep_assert_held(&ctx->lock);
1832
Christoph Hellwiga54895f2020-12-03 17:21:39 +01001833 trace_block_rq_insert(rq);
Jens Axboe01b983c2013-11-19 18:59:10 -07001834
Christoph Hellwig72a0a362014-02-07 10:22:36 -08001835 if (at_head)
Ming Leic16d6b52018-12-17 08:44:05 -07001836 list_add(&rq->queuelist, &ctx->rq_lists[type]);
Christoph Hellwig72a0a362014-02-07 10:22:36 -08001837 else
Ming Leic16d6b52018-12-17 08:44:05 -07001838 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
Ming Leicfd0c552015-10-20 23:13:57 +08001839}
Jens Axboe4bb659b2014-05-09 09:36:49 -06001840
Jens Axboe2c3ad662016-12-14 14:34:47 -07001841void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1842 bool at_head)
Ming Leicfd0c552015-10-20 23:13:57 +08001843{
1844 struct blk_mq_ctx *ctx = rq->mq_ctx;
1845
Bart Van Assche7b607812017-06-20 11:15:47 -07001846 lockdep_assert_held(&ctx->lock);
1847
Jens Axboee57690f2016-08-24 15:34:35 -06001848 __blk_mq_insert_req_list(hctx, rq, at_head);
Jens Axboe320ae512013-10-24 09:20:05 +01001849 blk_mq_hctx_mark_pending(hctx, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001850}
1851
André Almeida105663f2020-01-06 15:08:18 -03001852/**
1853 * blk_mq_request_bypass_insert - Insert a request at dispatch list.
1854 * @rq: Pointer to request to be inserted.
Randy Dunlap26bfeb22020-08-16 16:39:34 -07001855 * @at_head: true if the request should be inserted at the head of the list.
André Almeida105663f2020-01-06 15:08:18 -03001856 * @run_queue: If we should run the hardware queue after inserting the request.
1857 *
Jens Axboe157f3772017-09-11 16:43:57 -06001858 * Should only be used carefully, when the caller knows we want to
1859 * bypass a potential IO scheduler on the target device.
1860 */
Ming Lei01e99ae2020-02-25 09:04:32 +08001861void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
1862 bool run_queue)
Jens Axboe157f3772017-09-11 16:43:57 -06001863{
Jens Axboeea4f9952018-10-29 15:06:13 -06001864 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
Jens Axboe157f3772017-09-11 16:43:57 -06001865
1866 spin_lock(&hctx->lock);
Ming Lei01e99ae2020-02-25 09:04:32 +08001867 if (at_head)
1868 list_add(&rq->queuelist, &hctx->dispatch);
1869 else
1870 list_add_tail(&rq->queuelist, &hctx->dispatch);
Jens Axboe157f3772017-09-11 16:43:57 -06001871 spin_unlock(&hctx->lock);
1872
Ming Leib0850292017-11-02 23:24:34 +08001873 if (run_queue)
1874 blk_mq_run_hw_queue(hctx, false);
Jens Axboe157f3772017-09-11 16:43:57 -06001875}
1876
Jens Axboebd166ef2017-01-17 06:03:22 -07001877void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1878 struct list_head *list)
Jens Axboe320ae512013-10-24 09:20:05 +01001879
1880{
Ming Lei3f0cedc2018-07-02 17:35:58 +08001881 struct request *rq;
Ming Leic16d6b52018-12-17 08:44:05 -07001882 enum hctx_type type = hctx->type;
Ming Lei3f0cedc2018-07-02 17:35:58 +08001883
Jens Axboe320ae512013-10-24 09:20:05 +01001884 /*
1885 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1886 * offline now
1887 */
Ming Lei3f0cedc2018-07-02 17:35:58 +08001888 list_for_each_entry(rq, list, queuelist) {
Jens Axboee57690f2016-08-24 15:34:35 -06001889 BUG_ON(rq->mq_ctx != ctx);
Christoph Hellwiga54895f2020-12-03 17:21:39 +01001890 trace_block_rq_insert(rq);
Jens Axboe320ae512013-10-24 09:20:05 +01001891 }
Ming Lei3f0cedc2018-07-02 17:35:58 +08001892
1893 spin_lock(&ctx->lock);
Ming Leic16d6b52018-12-17 08:44:05 -07001894 list_splice_tail_init(list, &ctx->rq_lists[type]);
Ming Leicfd0c552015-10-20 23:13:57 +08001895 blk_mq_hctx_mark_pending(hctx, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001896 spin_unlock(&ctx->lock);
Jens Axboe320ae512013-10-24 09:20:05 +01001897}
1898
Sami Tolvanen4f0f5862021-04-08 11:28:34 -07001899static int plug_rq_cmp(void *priv, const struct list_head *a,
1900 const struct list_head *b)
Jens Axboe320ae512013-10-24 09:20:05 +01001901{
1902 struct request *rqa = container_of(a, struct request, queuelist);
1903 struct request *rqb = container_of(b, struct request, queuelist);
1904
Pavel Begunkov7d30a622019-11-29 00:11:53 +03001905 if (rqa->mq_ctx != rqb->mq_ctx)
1906 return rqa->mq_ctx > rqb->mq_ctx;
1907 if (rqa->mq_hctx != rqb->mq_hctx)
1908 return rqa->mq_hctx > rqb->mq_hctx;
Jens Axboe3110fc72018-10-30 12:24:04 -06001909
1910 return blk_rq_pos(rqa) > blk_rq_pos(rqb);
Jens Axboe320ae512013-10-24 09:20:05 +01001911}
1912
1913void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1914{
Jens Axboe320ae512013-10-24 09:20:05 +01001915 LIST_HEAD(list);
Jens Axboe320ae512013-10-24 09:20:05 +01001916
Pavel Begunkov95ed0c52019-11-29 00:11:55 +03001917 if (list_empty(&plug->mq_list))
1918 return;
Jens Axboe320ae512013-10-24 09:20:05 +01001919 list_splice_init(&plug->mq_list, &list);
1920
Jens Axboece5b0092018-11-27 17:13:56 -07001921 if (plug->rq_count > 2 && plug->multiple_queues)
1922 list_sort(NULL, &list, plug_rq_cmp);
Jens Axboe320ae512013-10-24 09:20:05 +01001923
Dongli Zhangbcc816d2019-04-04 10:57:44 +08001924 plug->rq_count = 0;
1925
Pavel Begunkov95ed0c52019-11-29 00:11:55 +03001926 do {
1927 struct list_head rq_list;
1928 struct request *rq, *head_rq = list_entry_rq(list.next);
1929 struct list_head *pos = &head_rq->queuelist; /* skip first */
1930 struct blk_mq_hw_ctx *this_hctx = head_rq->mq_hctx;
1931 struct blk_mq_ctx *this_ctx = head_rq->mq_ctx;
1932 unsigned int depth = 1;
Jens Axboe320ae512013-10-24 09:20:05 +01001933
Pavel Begunkov95ed0c52019-11-29 00:11:55 +03001934 list_for_each_continue(pos, &list) {
1935 rq = list_entry_rq(pos);
1936 BUG_ON(!rq->q);
1937 if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx)
1938 break;
1939 depth++;
Jens Axboe320ae512013-10-24 09:20:05 +01001940 }
1941
Pavel Begunkov95ed0c52019-11-29 00:11:55 +03001942 list_cut_before(&rq_list, &list, pos);
1943 trace_block_unplug(head_rq->q, depth, !from_schedule);
Jens Axboe67cae4c2018-10-30 11:31:51 -06001944 blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
Jens Axboebd166ef2017-01-17 06:03:22 -07001945 from_schedule);
Pavel Begunkov95ed0c52019-11-29 00:11:55 +03001946 } while(!list_empty(&list));
Jens Axboe320ae512013-10-24 09:20:05 +01001947}
1948
Christoph Hellwig14ccb662019-06-06 12:29:01 +02001949static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
1950 unsigned int nr_segs)
Jens Axboe320ae512013-10-24 09:20:05 +01001951{
Eric Biggers93f221a2020-09-15 20:53:14 -07001952 int err;
1953
Christoph Hellwigf924cdd2019-06-06 12:29:00 +02001954 if (bio->bi_opf & REQ_RAHEAD)
1955 rq->cmd_flags |= REQ_FAILFAST_MASK;
1956
1957 rq->__sector = bio->bi_iter.bi_sector;
1958 rq->write_hint = bio->bi_write_hint;
Christoph Hellwig14ccb662019-06-06 12:29:01 +02001959 blk_rq_bio_prep(rq, bio, nr_segs);
Eric Biggers93f221a2020-09-15 20:53:14 -07001960
1961 /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
1962 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
1963 WARN_ON_ONCE(err);
Jens Axboe4b570522014-05-29 11:00:11 -06001964
Konstantin Khlebnikovb5af37a2020-05-27 07:24:16 +02001965 blk_account_io_start(rq);
Jens Axboe320ae512013-10-24 09:20:05 +01001966}
1967
Mike Snitzer0f955492018-01-17 11:25:56 -05001968static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1969 struct request *rq,
Jens Axboebe94f052018-11-24 10:15:46 -07001970 blk_qc_t *cookie, bool last)
Shaohua Lif984df12015-05-08 10:51:32 -07001971{
Shaohua Lif984df12015-05-08 10:51:32 -07001972 struct request_queue *q = rq->q;
Shaohua Lif984df12015-05-08 10:51:32 -07001973 struct blk_mq_queue_data bd = {
1974 .rq = rq,
Jens Axboebe94f052018-11-24 10:15:46 -07001975 .last = last,
Shaohua Lif984df12015-05-08 10:51:32 -07001976 };
Jens Axboebd166ef2017-01-17 06:03:22 -07001977 blk_qc_t new_cookie;
Jens Axboef06345a2017-06-12 11:22:46 -06001978 blk_status_t ret;
Mike Snitzer0f955492018-01-17 11:25:56 -05001979
1980 new_cookie = request_to_qc_t(hctx, rq);
1981
1982 /*
1983 * For OK queue, we are done. For error, caller may kill it.
1984 * Any other error (busy), just add it to our list as we
1985 * previously would have done.
1986 */
1987 ret = q->mq_ops->queue_rq(hctx, &bd);
1988 switch (ret) {
1989 case BLK_STS_OK:
Ming Lei6ce3dd62018-07-10 09:03:31 +08001990 blk_mq_update_dispatch_busy(hctx, false);
Mike Snitzer0f955492018-01-17 11:25:56 -05001991 *cookie = new_cookie;
1992 break;
1993 case BLK_STS_RESOURCE:
Ming Lei86ff7c22018-01-30 22:04:57 -05001994 case BLK_STS_DEV_RESOURCE:
Ming Lei6ce3dd62018-07-10 09:03:31 +08001995 blk_mq_update_dispatch_busy(hctx, true);
Mike Snitzer0f955492018-01-17 11:25:56 -05001996 __blk_mq_requeue_request(rq);
1997 break;
1998 default:
Ming Lei6ce3dd62018-07-10 09:03:31 +08001999 blk_mq_update_dispatch_busy(hctx, false);
Mike Snitzer0f955492018-01-17 11:25:56 -05002000 *cookie = BLK_QC_T_NONE;
2001 break;
2002 }
2003
2004 return ret;
2005}
2006
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002007static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
Mike Snitzer0f955492018-01-17 11:25:56 -05002008 struct request *rq,
Ming Lei396eaf22018-01-17 11:25:57 -05002009 blk_qc_t *cookie,
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002010 bool bypass_insert, bool last)
Mike Snitzer0f955492018-01-17 11:25:56 -05002011{
2012 struct request_queue *q = rq->q;
Ming Leid964f042017-06-06 23:22:00 +08002013 bool run_queue = true;
Ming Lei2a5a24a2021-01-22 10:33:12 +08002014 int budget_token;
Ming Leid964f042017-06-06 23:22:00 +08002015
Ming Lei23d4ee12018-01-18 12:06:59 +08002016 /*
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002017 * RCU or SRCU read lock is needed before checking quiesced flag.
Ming Lei23d4ee12018-01-18 12:06:59 +08002018 *
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002019 * When queue is stopped or quiesced, ignore 'bypass_insert' from
2020 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
2021 * and avoid driver to try to dispatch again.
Ming Lei23d4ee12018-01-18 12:06:59 +08002022 */
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002023 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
Ming Leid964f042017-06-06 23:22:00 +08002024 run_queue = false;
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002025 bypass_insert = false;
2026 goto insert;
Ming Leid964f042017-06-06 23:22:00 +08002027 }
Shaohua Lif984df12015-05-08 10:51:32 -07002028
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002029 if (q->elevator && !bypass_insert)
2030 goto insert;
Bart Van Assche2253efc2016-10-28 17:20:02 -07002031
Ming Lei2a5a24a2021-01-22 10:33:12 +08002032 budget_token = blk_mq_get_dispatch_budget(q);
2033 if (budget_token < 0)
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002034 goto insert;
Jens Axboebd166ef2017-01-17 06:03:22 -07002035
Ming Lei2a5a24a2021-01-22 10:33:12 +08002036 blk_mq_set_rq_budget_token(rq, budget_token);
2037
Ming Lei8ab6bb9e2018-06-25 19:31:45 +08002038 if (!blk_mq_get_driver_tag(rq)) {
Ming Lei2a5a24a2021-01-22 10:33:12 +08002039 blk_mq_put_dispatch_budget(q, budget_token);
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002040 goto insert;
Ming Lei88022d72017-11-05 02:21:12 +08002041 }
Ming Leide148292017-10-14 17:22:29 +08002042
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002043 return __blk_mq_issue_directly(hctx, rq, cookie, last);
2044insert:
2045 if (bypass_insert)
2046 return BLK_STS_RESOURCE;
2047
Ming Leidb03f882020-08-18 17:07:28 +08002048 blk_mq_sched_insert_request(rq, false, run_queue, false);
2049
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002050 return BLK_STS_OK;
2051}
2052
André Almeida105663f2020-01-06 15:08:18 -03002053/**
2054 * blk_mq_try_issue_directly - Try to send a request directly to device driver.
2055 * @hctx: Pointer of the associated hardware queue.
2056 * @rq: Pointer to request to be sent.
2057 * @cookie: Request queue cookie.
2058 *
2059 * If the device has enough resources to accept a new request now, send the
2060 * request directly to device driver. Else, insert at hctx->dispatch queue, so
2061 * we can try send it another time in the future. Requests inserted at this
2062 * queue have higher priority.
2063 */
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002064static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2065 struct request *rq, blk_qc_t *cookie)
2066{
2067 blk_status_t ret;
2068 int srcu_idx;
2069
2070 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
2071
2072 hctx_lock(hctx, &srcu_idx);
2073
2074 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
2075 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
Ming Lei01e99ae2020-02-25 09:04:32 +08002076 blk_mq_request_bypass_insert(rq, false, true);
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002077 else if (ret != BLK_STS_OK)
2078 blk_mq_end_request(rq, ret);
2079
Jens Axboe04ced152018-01-09 08:29:46 -08002080 hctx_unlock(hctx, srcu_idx);
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002081}
2082
2083blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2084{
2085 blk_status_t ret;
2086 int srcu_idx;
2087 blk_qc_t unused_cookie;
2088 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2089
2090 hctx_lock(hctx, &srcu_idx);
2091 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
2092 hctx_unlock(hctx, srcu_idx);
Jianchao Wang7f556a42018-12-14 09:28:18 +08002093
2094 return ret;
Christoph Hellwig5eb61262017-03-22 15:01:51 -04002095}
2096
Ming Lei6ce3dd62018-07-10 09:03:31 +08002097void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
2098 struct list_head *list)
2099{
Keith Busch536167d42020-04-07 03:13:48 +09002100 int queued = 0;
yangerkun632bfb62020-09-05 19:25:56 +08002101 int errors = 0;
Keith Busch536167d42020-04-07 03:13:48 +09002102
Ming Lei6ce3dd62018-07-10 09:03:31 +08002103 while (!list_empty(list)) {
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002104 blk_status_t ret;
Ming Lei6ce3dd62018-07-10 09:03:31 +08002105 struct request *rq = list_first_entry(list, struct request,
2106 queuelist);
2107
2108 list_del_init(&rq->queuelist);
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002109 ret = blk_mq_request_issue_directly(rq, list_empty(list));
2110 if (ret != BLK_STS_OK) {
2111 if (ret == BLK_STS_RESOURCE ||
2112 ret == BLK_STS_DEV_RESOURCE) {
Ming Lei01e99ae2020-02-25 09:04:32 +08002113 blk_mq_request_bypass_insert(rq, false,
Jens Axboec616cbe2018-12-06 22:17:44 -07002114 list_empty(list));
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002115 break;
2116 }
2117 blk_mq_end_request(rq, ret);
yangerkun632bfb62020-09-05 19:25:56 +08002118 errors++;
Keith Busch536167d42020-04-07 03:13:48 +09002119 } else
2120 queued++;
Ming Lei6ce3dd62018-07-10 09:03:31 +08002121 }
Jens Axboed666ba92018-11-27 17:02:25 -07002122
2123 /*
2124 * If we didn't flush the entire list, we could have told
2125 * the driver there was more coming, but that turned out to
2126 * be a lie.
2127 */
yangerkun632bfb62020-09-05 19:25:56 +08002128 if ((!list_empty(list) || errors) &&
2129 hctx->queue->mq_ops->commit_rqs && queued)
Jens Axboed666ba92018-11-27 17:02:25 -07002130 hctx->queue->mq_ops->commit_rqs(hctx);
Ming Lei6ce3dd62018-07-10 09:03:31 +08002131}
2132
Jens Axboece5b0092018-11-27 17:13:56 -07002133static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
2134{
2135 list_add_tail(&rq->queuelist, &plug->mq_list);
2136 plug->rq_count++;
2137 if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
2138 struct request *tmp;
2139
2140 tmp = list_first_entry(&plug->mq_list, struct request,
2141 queuelist);
2142 if (tmp->q != rq->q)
2143 plug->multiple_queues = true;
2144 }
2145}
2146
Song Liu7f2a6a62021-09-07 16:03:38 -07002147/*
2148 * Allow 4x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
2149 * queues. This is important for md arrays to benefit from merging
2150 * requests.
2151 */
2152static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
2153{
2154 if (plug->multiple_queues)
2155 return BLK_MAX_REQUEST_COUNT * 4;
2156 return BLK_MAX_REQUEST_COUNT;
2157}
2158
André Almeida105663f2020-01-06 15:08:18 -03002159/**
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02002160 * blk_mq_submit_bio - Create and send a request to block device.
André Almeida105663f2020-01-06 15:08:18 -03002161 * @bio: Bio pointer.
2162 *
2163 * Builds up a request structure from @q and @bio and send to the device. The
2164 * request may not be queued directly to hardware if:
2165 * * This request can be merged with another one
2166 * * We want to place request at plug queue for possible future merging
2167 * * There is an IO scheduler active at this queue
2168 *
2169 * It will not queue the request if there is an error with the bio, or at the
2170 * request creation.
2171 *
2172 * Returns: Request queue cookie.
2173 */
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02002174blk_qc_t blk_mq_submit_bio(struct bio *bio)
Jens Axboe07068d52014-05-22 10:40:51 -06002175{
Christoph Hellwig309dca302021-01-24 11:02:34 +01002176 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
Christoph Hellwigef295ec2016-10-28 08:48:16 -06002177 const int is_sync = op_is_sync(bio->bi_opf);
Christoph Hellwigf73f44e2017-01-27 08:30:47 -07002178 const int is_flush_fua = op_is_flush(bio->bi_opf);
Christoph Hellwige6e7abf2020-05-29 15:53:09 +02002179 struct blk_mq_alloc_data data = {
2180 .q = q,
2181 };
Jens Axboe07068d52014-05-22 10:40:51 -06002182 struct request *rq;
Shaohua Lif984df12015-05-08 10:51:32 -07002183 struct blk_plug *plug;
Shaohua Li5b3f3412015-05-08 10:51:33 -07002184 struct request *same_queue_rq = NULL;
Christoph Hellwig14ccb662019-06-06 12:29:01 +02002185 unsigned int nr_segs;
Jens Axboe7b371632015-11-05 10:41:40 -07002186 blk_qc_t cookie;
Satya Tangiralaa892c8d2020-05-14 00:37:18 +00002187 blk_status_t ret;
Jeffle Xucc29e1b2020-11-26 17:18:52 +08002188 bool hipri;
Jens Axboe07068d52014-05-22 10:40:51 -06002189
2190 blk_queue_bounce(q, &bio);
Christoph Hellwigf695ca32020-07-01 10:59:39 +02002191 __blk_queue_split(&bio, &nr_segs);
Wen Xiongf36ea502017-05-10 08:54:11 -05002192
Dmitry Monakhove23947b2017-06-29 11:31:11 -07002193 if (!bio_integrity_prep(bio))
Christoph Hellwigac7c5672020-05-16 20:28:01 +02002194 goto queue_exit;
Jens Axboe07068d52014-05-22 10:40:51 -06002195
Omar Sandoval87c279e2016-06-01 22:18:48 -07002196 if (!is_flush_fua && !blk_queue_nomerges(q) &&
Christoph Hellwig14ccb662019-06-06 12:29:01 +02002197 blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
Christoph Hellwigac7c5672020-05-16 20:28:01 +02002198 goto queue_exit;
Shaohua Lif984df12015-05-08 10:51:32 -07002199
Christoph Hellwig14ccb662019-06-06 12:29:01 +02002200 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
Christoph Hellwigac7c5672020-05-16 20:28:01 +02002201 goto queue_exit;
Jens Axboebd166ef2017-01-17 06:03:22 -07002202
Christoph Hellwigd5337562018-11-14 17:02:09 +01002203 rq_qos_throttle(q, bio);
Jens Axboe87760e52016-11-09 12:38:14 -07002204
Jeffle Xucc29e1b2020-11-26 17:18:52 +08002205 hipri = bio->bi_opf & REQ_HIPRI;
2206
Ming Lei78091672019-01-16 19:08:15 +08002207 data.cmd_flags = bio->bi_opf;
Christoph Hellwige6e7abf2020-05-29 15:53:09 +02002208 rq = __blk_mq_alloc_request(&data);
Jens Axboe87760e52016-11-09 12:38:14 -07002209 if (unlikely(!rq)) {
Josef Bacikc1c80382018-07-03 11:14:59 -04002210 rq_qos_cleanup(q, bio);
Jens Axboe7b6620d2019-08-15 11:09:16 -06002211 if (bio->bi_opf & REQ_NOWAIT)
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -05002212 bio_wouldblock_error(bio);
Christoph Hellwigac7c5672020-05-16 20:28:01 +02002213 goto queue_exit;
Jens Axboe87760e52016-11-09 12:38:14 -07002214 }
2215
Christoph Hellwige8a676d2020-12-03 17:21:36 +01002216 trace_block_getrq(bio);
Xiaoguang Wangd6f1dda2018-10-23 22:30:50 +08002217
Josef Bacikc1c80382018-07-03 11:14:59 -04002218 rq_qos_track(q, rq, bio);
Jens Axboe07068d52014-05-22 10:40:51 -06002219
Jens Axboefd2d3322017-01-12 10:04:45 -07002220 cookie = request_to_qc_t(data.hctx, rq);
Jens Axboe07068d52014-05-22 10:40:51 -06002221
Bart Van Assche970d1682019-07-01 08:47:30 -07002222 blk_mq_bio_to_request(rq, bio, nr_segs);
2223
Satya Tangiralaa892c8d2020-05-14 00:37:18 +00002224 ret = blk_crypto_init_request(rq);
2225 if (ret != BLK_STS_OK) {
2226 bio->bi_status = ret;
2227 bio_endio(bio);
2228 blk_mq_free_request(rq);
2229 return BLK_QC_T_NONE;
2230 }
2231
Damien Le Moalb49773e72019-07-11 01:18:31 +09002232 plug = blk_mq_plug(q, bio);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04002233 if (unlikely(is_flush_fua)) {
André Almeida105663f2020-01-06 15:08:18 -03002234 /* Bypass scheduler for flush requests */
Ming Lei923218f2017-11-02 23:24:38 +08002235 blk_insert_flush(rq);
2236 blk_mq_run_hw_queue(data.hctx, true);
Ming Lei03f26d82021-05-14 10:20:52 +08002237 } else if (plug && (q->nr_hw_queues == 1 ||
John Garry079a2e32021-10-05 18:23:39 +08002238 blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
Ming Lei03f26d82021-05-14 10:20:52 +08002239 q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
Jens Axboeb2c5d162018-11-29 10:03:42 -07002240 /*
2241 * Use plugging if we have a ->commit_rqs() hook as well, as
2242 * we know the driver uses bd->last in a smart fashion.
Ming Lei3154df22019-09-27 15:24:31 +08002243 *
2244 * Use normal plugging if this disk is slow HDD, as sequential
2245 * IO may benefit a lot from plug merging.
Jens Axboeb2c5d162018-11-29 10:03:42 -07002246 */
Jens Axboe5f0ed772018-11-23 22:04:33 -07002247 unsigned int request_count = plug->rq_count;
Shaohua Li600271d2016-11-03 17:03:54 -07002248 struct request *last = NULL;
2249
Ming Lei676d0602015-10-20 23:13:56 +08002250 if (!request_count)
Jeff Moyere6c44382015-05-08 10:51:30 -07002251 trace_block_plug(q);
Shaohua Li600271d2016-11-03 17:03:54 -07002252 else
2253 last = list_entry_rq(plug->mq_list.prev);
Jens Axboeb094f892015-11-20 20:29:45 -07002254
Song Liu7f2a6a62021-09-07 16:03:38 -07002255 if (request_count >= blk_plug_max_rq_count(plug) || (last &&
Shaohua Li600271d2016-11-03 17:03:54 -07002256 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
Jeff Moyere6c44382015-05-08 10:51:30 -07002257 blk_flush_plug_list(plug, false);
2258 trace_block_plug(q);
Jens Axboe320ae512013-10-24 09:20:05 +01002259 }
Jens Axboeb094f892015-11-20 20:29:45 -07002260
Jens Axboece5b0092018-11-27 17:13:56 -07002261 blk_add_rq_to_plug(plug, rq);
Ming Leia12de1d2019-09-27 15:24:30 +08002262 } else if (q->elevator) {
André Almeida105663f2020-01-06 15:08:18 -03002263 /* Insert the request at the IO scheduler queue */
Ming Leia12de1d2019-09-27 15:24:30 +08002264 blk_mq_sched_insert_request(rq, false, true, true);
Christoph Hellwig22997222017-03-22 15:01:52 -04002265 } else if (plug && !blk_queue_nomerges(q)) {
Jens Axboe320ae512013-10-24 09:20:05 +01002266 /*
2267 * We do limited plugging. If the bio can be merged, do that.
2268 * Otherwise the existing request in the plug list will be
2269 * issued. So the plug list will have one request at most
Christoph Hellwig22997222017-03-22 15:01:52 -04002270 * The plug list might get flushed before this. If that happens,
2271 * the plug list is empty, and same_queue_rq is invalid.
Jens Axboe320ae512013-10-24 09:20:05 +01002272 */
Christoph Hellwig22997222017-03-22 15:01:52 -04002273 if (list_empty(&plug->mq_list))
2274 same_queue_rq = NULL;
Jens Axboe4711b572018-11-27 17:07:17 -07002275 if (same_queue_rq) {
Christoph Hellwig22997222017-03-22 15:01:52 -04002276 list_del_init(&same_queue_rq->queuelist);
Jens Axboe4711b572018-11-27 17:07:17 -07002277 plug->rq_count--;
2278 }
Jens Axboece5b0092018-11-27 17:13:56 -07002279 blk_add_rq_to_plug(plug, rq);
Yufen Yuff3b74b2019-03-26 21:19:25 +08002280 trace_block_plug(q);
Christoph Hellwig22997222017-03-22 15:01:52 -04002281
Ming Leidad7a3b2017-06-06 23:21:59 +08002282 if (same_queue_rq) {
Jens Axboeea4f9952018-10-29 15:06:13 -06002283 data.hctx = same_queue_rq->mq_hctx;
Yufen Yuff3b74b2019-03-26 21:19:25 +08002284 trace_block_unplug(q, 1, true);
Christoph Hellwig22997222017-03-22 15:01:52 -04002285 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002286 &cookie);
Ming Leidad7a3b2017-06-06 23:21:59 +08002287 }
Ming Leia12de1d2019-09-27 15:24:30 +08002288 } else if ((q->nr_hw_queues > 1 && is_sync) ||
2289 !data.hctx->dispatch_busy) {
André Almeida105663f2020-01-06 15:08:18 -03002290 /*
2291 * There is no scheduler and we can try to send directly
2292 * to the hardware.
2293 */
Bart Van Asschefd9c40f2019-04-04 10:08:43 -07002294 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
Ming Leiab42f352017-05-26 19:53:19 +08002295 } else {
André Almeida105663f2020-01-06 15:08:18 -03002296 /* Default case. */
huhai8fa9f552018-05-16 08:21:21 -06002297 blk_mq_sched_insert_request(rq, false, true, true);
Ming Leiab42f352017-05-26 19:53:19 +08002298 }
Jens Axboe320ae512013-10-24 09:20:05 +01002299
Jeffle Xucc29e1b2020-11-26 17:18:52 +08002300 if (!hipri)
2301 return BLK_QC_T_NONE;
Jens Axboe7b371632015-11-05 10:41:40 -07002302 return cookie;
Christoph Hellwigac7c5672020-05-16 20:28:01 +02002303queue_exit:
2304 blk_queue_exit(q);
2305 return BLK_QC_T_NONE;
Jens Axboe320ae512013-10-24 09:20:05 +01002306}
2307
Ming Leibd631412021-05-11 23:22:35 +08002308static size_t order_to_size(unsigned int order)
2309{
2310 return (size_t)PAGE_SIZE << order;
2311}
2312
2313/* called before freeing request pool in @tags */
John Garryf32e4ea2021-10-05 18:23:32 +08002314static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
2315 struct blk_mq_tags *tags)
Ming Leibd631412021-05-11 23:22:35 +08002316{
Ming Leibd631412021-05-11 23:22:35 +08002317 struct page *page;
2318 unsigned long flags;
2319
John Garry4f245d52021-10-05 18:23:33 +08002320 /* There is no need to clear a driver tags own mapping */
2321 if (drv_tags == tags)
2322 return;
2323
Ming Leibd631412021-05-11 23:22:35 +08002324 list_for_each_entry(page, &tags->page_list, lru) {
2325 unsigned long start = (unsigned long)page_address(page);
2326 unsigned long end = start + order_to_size(page->private);
2327 int i;
2328
John Garryf32e4ea2021-10-05 18:23:32 +08002329 for (i = 0; i < drv_tags->nr_tags; i++) {
Ming Leibd631412021-05-11 23:22:35 +08002330 struct request *rq = drv_tags->rqs[i];
2331 unsigned long rq_addr = (unsigned long)rq;
2332
2333 if (rq_addr >= start && rq_addr < end) {
2334 WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
2335 cmpxchg(&drv_tags->rqs[i], rq, NULL);
2336 }
2337 }
2338 }
2339
2340 /*
2341 * Wait until all pending iteration is done.
2342 *
2343 * Request reference is cleared and it is guaranteed to be observed
2344 * after the ->lock is released.
2345 */
2346 spin_lock_irqsave(&drv_tags->lock, flags);
2347 spin_unlock_irqrestore(&drv_tags->lock, flags);
2348}
2349
Jens Axboecc71a6f2017-01-11 14:29:56 -07002350void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2351 unsigned int hctx_idx)
Jens Axboe320ae512013-10-24 09:20:05 +01002352{
John Garryf32e4ea2021-10-05 18:23:32 +08002353 struct blk_mq_tags *drv_tags;
Jens Axboe320ae512013-10-24 09:20:05 +01002354 struct page *page;
2355
John Garry079a2e32021-10-05 18:23:39 +08002356 if (blk_mq_is_shared_tags(set->flags))
2357 drv_tags = set->shared_tags;
John Garrye155b0c2021-10-05 18:23:37 +08002358 else
2359 drv_tags = set->tags[hctx_idx];
John Garryf32e4ea2021-10-05 18:23:32 +08002360
John Garry65de57b2021-10-05 18:23:26 +08002361 if (tags->static_rqs && set->ops->exit_request) {
Christoph Hellwige9b267d2014-04-15 13:59:10 -06002362 int i;
2363
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002364 for (i = 0; i < tags->nr_tags; i++) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07002365 struct request *rq = tags->static_rqs[i];
2366
2367 if (!rq)
Christoph Hellwige9b267d2014-04-15 13:59:10 -06002368 continue;
Christoph Hellwigd6296d392017-05-01 10:19:08 -06002369 set->ops->exit_request(set, rq, hctx_idx);
Jens Axboe2af8cbe2017-01-13 14:39:30 -07002370 tags->static_rqs[i] = NULL;
Christoph Hellwige9b267d2014-04-15 13:59:10 -06002371 }
2372 }
2373
John Garryf32e4ea2021-10-05 18:23:32 +08002374 blk_mq_clear_rq_mapping(drv_tags, tags);
Ming Leibd631412021-05-11 23:22:35 +08002375
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002376 while (!list_empty(&tags->page_list)) {
2377 page = list_first_entry(&tags->page_list, struct page, lru);
Dave Hansen67534712014-01-08 20:17:46 -07002378 list_del_init(&page->lru);
Catalin Marinasf75782e2015-09-14 18:16:02 +01002379 /*
2380 * Remove kmemleak object previously allocated in
Raul E Rangel273938b2019-05-02 13:48:11 -06002381 * blk_mq_alloc_rqs().
Catalin Marinasf75782e2015-09-14 18:16:02 +01002382 */
2383 kmemleak_free(page_address(page));
Jens Axboe320ae512013-10-24 09:20:05 +01002384 __free_pages(page, page->private);
2385 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07002386}
Jens Axboe320ae512013-10-24 09:20:05 +01002387
John Garrye155b0c2021-10-05 18:23:37 +08002388void blk_mq_free_rq_map(struct blk_mq_tags *tags)
Jens Axboecc71a6f2017-01-11 14:29:56 -07002389{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002390 kfree(tags->rqs);
Jens Axboecc71a6f2017-01-11 14:29:56 -07002391 tags->rqs = NULL;
Jens Axboe2af8cbe2017-01-13 14:39:30 -07002392 kfree(tags->static_rqs);
2393 tags->static_rqs = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01002394
John Garrye155b0c2021-10-05 18:23:37 +08002395 blk_mq_free_tags(tags);
Jens Axboe320ae512013-10-24 09:20:05 +01002396}
2397
John Garry63064be2021-10-05 18:23:35 +08002398static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
2399 unsigned int hctx_idx,
2400 unsigned int nr_tags,
John Garrye155b0c2021-10-05 18:23:37 +08002401 unsigned int reserved_tags)
Jens Axboe320ae512013-10-24 09:20:05 +01002402{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002403 struct blk_mq_tags *tags;
Shaohua Li59f082e2017-02-01 09:53:14 -08002404 int node;
Jens Axboe320ae512013-10-24 09:20:05 +01002405
Dongli Zhang7d76f852019-02-27 21:35:01 +08002406 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
Shaohua Li59f082e2017-02-01 09:53:14 -08002407 if (node == NUMA_NO_NODE)
2408 node = set->numa_node;
2409
John Garrye155b0c2021-10-05 18:23:37 +08002410 tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
2411 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002412 if (!tags)
2413 return NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01002414
Kees Cook590b5b72018-06-12 14:04:20 -07002415 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02002416 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
Shaohua Li59f082e2017-02-01 09:53:14 -08002417 node);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002418 if (!tags->rqs) {
John Garrye155b0c2021-10-05 18:23:37 +08002419 blk_mq_free_tags(tags);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002420 return NULL;
2421 }
Jens Axboe320ae512013-10-24 09:20:05 +01002422
Kees Cook590b5b72018-06-12 14:04:20 -07002423 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2424 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2425 node);
Jens Axboe2af8cbe2017-01-13 14:39:30 -07002426 if (!tags->static_rqs) {
2427 kfree(tags->rqs);
John Garrye155b0c2021-10-05 18:23:37 +08002428 blk_mq_free_tags(tags);
Jens Axboe2af8cbe2017-01-13 14:39:30 -07002429 return NULL;
2430 }
2431
Jens Axboecc71a6f2017-01-11 14:29:56 -07002432 return tags;
2433}
2434
Tejun Heo1d9bd512018-01-09 08:29:48 -08002435static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2436 unsigned int hctx_idx, int node)
2437{
2438 int ret;
2439
2440 if (set->ops->init_request) {
2441 ret = set->ops->init_request(set, rq, hctx_idx, node);
2442 if (ret)
2443 return ret;
2444 }
2445
Keith Busch12f5b932018-05-29 15:52:28 +02002446 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
Tejun Heo1d9bd512018-01-09 08:29:48 -08002447 return 0;
2448}
2449
John Garry63064be2021-10-05 18:23:35 +08002450static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
2451 struct blk_mq_tags *tags,
2452 unsigned int hctx_idx, unsigned int depth)
Jens Axboecc71a6f2017-01-11 14:29:56 -07002453{
2454 unsigned int i, j, entries_per_page, max_order = 4;
2455 size_t rq_size, left;
Shaohua Li59f082e2017-02-01 09:53:14 -08002456 int node;
2457
Dongli Zhang7d76f852019-02-27 21:35:01 +08002458 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
Shaohua Li59f082e2017-02-01 09:53:14 -08002459 if (node == NUMA_NO_NODE)
2460 node = set->numa_node;
Jens Axboecc71a6f2017-01-11 14:29:56 -07002461
2462 INIT_LIST_HEAD(&tags->page_list);
2463
Jens Axboe320ae512013-10-24 09:20:05 +01002464 /*
2465 * rq_size is the size of the request plus driver payload, rounded
2466 * to the cacheline size
2467 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002468 rq_size = round_up(sizeof(struct request) + set->cmd_size,
Jens Axboe320ae512013-10-24 09:20:05 +01002469 cache_line_size());
Jens Axboecc71a6f2017-01-11 14:29:56 -07002470 left = rq_size * depth;
Jens Axboe320ae512013-10-24 09:20:05 +01002471
Jens Axboecc71a6f2017-01-11 14:29:56 -07002472 for (i = 0; i < depth; ) {
Jens Axboe320ae512013-10-24 09:20:05 +01002473 int this_order = max_order;
2474 struct page *page;
2475 int to_do;
2476 void *p;
2477
Bartlomiej Zolnierkiewiczb3a834b2016-05-16 09:54:47 -06002478 while (this_order && left < order_to_size(this_order - 1))
Jens Axboe320ae512013-10-24 09:20:05 +01002479 this_order--;
2480
2481 do {
Shaohua Li59f082e2017-02-01 09:53:14 -08002482 page = alloc_pages_node(node,
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02002483 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
Jens Axboea5164402014-09-10 09:02:03 -06002484 this_order);
Jens Axboe320ae512013-10-24 09:20:05 +01002485 if (page)
2486 break;
2487 if (!this_order--)
2488 break;
2489 if (order_to_size(this_order) < rq_size)
2490 break;
2491 } while (1);
2492
2493 if (!page)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002494 goto fail;
Jens Axboe320ae512013-10-24 09:20:05 +01002495
2496 page->private = this_order;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002497 list_add_tail(&page->lru, &tags->page_list);
Jens Axboe320ae512013-10-24 09:20:05 +01002498
2499 p = page_address(page);
Catalin Marinasf75782e2015-09-14 18:16:02 +01002500 /*
2501 * Allow kmemleak to scan these pages as they contain pointers
2502 * to additional allocations like via ops->init_request().
2503 */
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02002504 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
Jens Axboe320ae512013-10-24 09:20:05 +01002505 entries_per_page = order_to_size(this_order) / rq_size;
Jens Axboecc71a6f2017-01-11 14:29:56 -07002506 to_do = min(entries_per_page, depth - i);
Jens Axboe320ae512013-10-24 09:20:05 +01002507 left -= to_do * rq_size;
2508 for (j = 0; j < to_do; j++) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07002509 struct request *rq = p;
2510
2511 tags->static_rqs[i] = rq;
Tejun Heo1d9bd512018-01-09 08:29:48 -08002512 if (blk_mq_init_request(set, rq, hctx_idx, node)) {
2513 tags->static_rqs[i] = NULL;
2514 goto fail;
Christoph Hellwige9b267d2014-04-15 13:59:10 -06002515 }
2516
Jens Axboe320ae512013-10-24 09:20:05 +01002517 p += rq_size;
2518 i++;
2519 }
2520 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07002521 return 0;
Jens Axboe320ae512013-10-24 09:20:05 +01002522
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002523fail:
Jens Axboecc71a6f2017-01-11 14:29:56 -07002524 blk_mq_free_rqs(set, tags, hctx_idx);
2525 return -ENOMEM;
Jens Axboe320ae512013-10-24 09:20:05 +01002526}
2527
Ming Leibf0beec2020-05-29 15:53:15 +02002528struct rq_iter_data {
2529 struct blk_mq_hw_ctx *hctx;
2530 bool has_rq;
2531};
2532
2533static bool blk_mq_has_request(struct request *rq, void *data, bool reserved)
2534{
2535 struct rq_iter_data *iter_data = data;
2536
2537 if (rq->mq_hctx != iter_data->hctx)
2538 return true;
2539 iter_data->has_rq = true;
2540 return false;
2541}
2542
2543static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
2544{
2545 struct blk_mq_tags *tags = hctx->sched_tags ?
2546 hctx->sched_tags : hctx->tags;
2547 struct rq_iter_data data = {
2548 .hctx = hctx,
2549 };
2550
2551 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
2552 return data.has_rq;
2553}
2554
2555static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
2556 struct blk_mq_hw_ctx *hctx)
2557{
2558 if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu)
2559 return false;
2560 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
2561 return false;
2562 return true;
2563}
2564
2565static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
2566{
2567 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
2568 struct blk_mq_hw_ctx, cpuhp_online);
2569
2570 if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
2571 !blk_mq_last_cpu_in_hctx(cpu, hctx))
2572 return 0;
2573
2574 /*
2575 * Prevent new request from being allocated on the current hctx.
2576 *
2577 * The smp_mb__after_atomic() Pairs with the implied barrier in
2578 * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is
2579 * seen once we return from the tag allocator.
2580 */
2581 set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
2582 smp_mb__after_atomic();
2583
2584 /*
2585 * Try to grab a reference to the queue and wait for any outstanding
2586 * requests. If we could not grab a reference the queue has been
2587 * frozen and there are no requests.
2588 */
2589 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
2590 while (blk_mq_hctx_has_requests(hctx))
2591 msleep(5);
2592 percpu_ref_put(&hctx->queue->q_usage_counter);
2593 }
2594
2595 return 0;
2596}
2597
2598static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
2599{
2600 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
2601 struct blk_mq_hw_ctx, cpuhp_online);
2602
2603 if (cpumask_test_cpu(cpu, hctx->cpumask))
2604 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
2605 return 0;
2606}
2607
Jens Axboee57690f2016-08-24 15:34:35 -06002608/*
2609 * 'cpu' is going away. splice any existing rq_list entries from this
2610 * software queue to the hw queue dispatch list, and ensure that it
2611 * gets run.
2612 */
Thomas Gleixner9467f852016-09-22 08:05:17 -06002613static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
Jens Axboe484b4062014-05-21 14:01:15 -06002614{
Thomas Gleixner9467f852016-09-22 08:05:17 -06002615 struct blk_mq_hw_ctx *hctx;
Jens Axboe484b4062014-05-21 14:01:15 -06002616 struct blk_mq_ctx *ctx;
2617 LIST_HEAD(tmp);
Ming Leic16d6b52018-12-17 08:44:05 -07002618 enum hctx_type type;
Jens Axboe484b4062014-05-21 14:01:15 -06002619
Thomas Gleixner9467f852016-09-22 08:05:17 -06002620 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
Ming Leibf0beec2020-05-29 15:53:15 +02002621 if (!cpumask_test_cpu(cpu, hctx->cpumask))
2622 return 0;
2623
Jens Axboee57690f2016-08-24 15:34:35 -06002624 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
Ming Leic16d6b52018-12-17 08:44:05 -07002625 type = hctx->type;
Jens Axboe484b4062014-05-21 14:01:15 -06002626
2627 spin_lock(&ctx->lock);
Ming Leic16d6b52018-12-17 08:44:05 -07002628 if (!list_empty(&ctx->rq_lists[type])) {
2629 list_splice_init(&ctx->rq_lists[type], &tmp);
Jens Axboe484b4062014-05-21 14:01:15 -06002630 blk_mq_hctx_clear_pending(hctx, ctx);
2631 }
2632 spin_unlock(&ctx->lock);
2633
2634 if (list_empty(&tmp))
Thomas Gleixner9467f852016-09-22 08:05:17 -06002635 return 0;
Jens Axboe484b4062014-05-21 14:01:15 -06002636
Jens Axboee57690f2016-08-24 15:34:35 -06002637 spin_lock(&hctx->lock);
2638 list_splice_tail_init(&tmp, &hctx->dispatch);
2639 spin_unlock(&hctx->lock);
Jens Axboe484b4062014-05-21 14:01:15 -06002640
2641 blk_mq_run_hw_queue(hctx, true);
Thomas Gleixner9467f852016-09-22 08:05:17 -06002642 return 0;
Jens Axboe484b4062014-05-21 14:01:15 -06002643}
2644
Thomas Gleixner9467f852016-09-22 08:05:17 -06002645static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
Jens Axboe484b4062014-05-21 14:01:15 -06002646{
Ming Leibf0beec2020-05-29 15:53:15 +02002647 if (!(hctx->flags & BLK_MQ_F_STACKING))
2648 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
2649 &hctx->cpuhp_online);
Thomas Gleixner9467f852016-09-22 08:05:17 -06002650 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
2651 &hctx->cpuhp_dead);
Jens Axboe484b4062014-05-21 14:01:15 -06002652}
2653
Ming Lei364b6182021-05-11 23:22:36 +08002654/*
2655 * Before freeing hw queue, clearing the flush request reference in
2656 * tags->rqs[] for avoiding potential UAF.
2657 */
2658static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
2659 unsigned int queue_depth, struct request *flush_rq)
2660{
2661 int i;
2662 unsigned long flags;
2663
2664 /* The hw queue may not be mapped yet */
2665 if (!tags)
2666 return;
2667
2668 WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);
2669
2670 for (i = 0; i < queue_depth; i++)
2671 cmpxchg(&tags->rqs[i], flush_rq, NULL);
2672
2673 /*
2674 * Wait until all pending iteration is done.
2675 *
2676 * Request reference is cleared and it is guaranteed to be observed
2677 * after the ->lock is released.
2678 */
2679 spin_lock_irqsave(&tags->lock, flags);
2680 spin_unlock_irqrestore(&tags->lock, flags);
2681}
2682
Ming Leic3b4afc2015-06-04 22:25:04 +08002683/* hctx->ctxs will be freed in queue's release handler */
Ming Lei08e98fc2014-09-25 23:23:38 +08002684static void blk_mq_exit_hctx(struct request_queue *q,
2685 struct blk_mq_tag_set *set,
2686 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2687{
Ming Lei364b6182021-05-11 23:22:36 +08002688 struct request *flush_rq = hctx->fq->flush_rq;
2689
Ming Lei8ab0b7d2018-01-09 21:28:29 +08002690 if (blk_mq_hw_queue_mapped(hctx))
2691 blk_mq_tag_idle(hctx);
Ming Lei08e98fc2014-09-25 23:23:38 +08002692
Ming Lei364b6182021-05-11 23:22:36 +08002693 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
2694 set->queue_depth, flush_rq);
Ming Leif70ced02014-09-25 23:23:47 +08002695 if (set->ops->exit_request)
Ming Lei364b6182021-05-11 23:22:36 +08002696 set->ops->exit_request(set, flush_rq, hctx_idx);
Ming Leif70ced02014-09-25 23:23:47 +08002697
Ming Lei08e98fc2014-09-25 23:23:38 +08002698 if (set->ops->exit_hctx)
2699 set->ops->exit_hctx(hctx, hctx_idx);
2700
Thomas Gleixner9467f852016-09-22 08:05:17 -06002701 blk_mq_remove_cpuhp(hctx);
Ming Lei2f8f1332019-04-30 09:52:27 +08002702
2703 spin_lock(&q->unused_hctx_lock);
2704 list_add(&hctx->hctx_list, &q->unused_hctx_list);
2705 spin_unlock(&q->unused_hctx_lock);
Ming Lei08e98fc2014-09-25 23:23:38 +08002706}
2707
Ming Lei624dbe42014-05-27 23:35:13 +08002708static void blk_mq_exit_hw_queues(struct request_queue *q,
2709 struct blk_mq_tag_set *set, int nr_queue)
2710{
2711 struct blk_mq_hw_ctx *hctx;
2712 unsigned int i;
2713
2714 queue_for_each_hw_ctx(q, hctx, i) {
2715 if (i == nr_queue)
2716 break;
Jianchao Wang477e19d2018-10-12 18:07:25 +08002717 blk_mq_debugfs_unregister_hctx(hctx);
Ming Lei08e98fc2014-09-25 23:23:38 +08002718 blk_mq_exit_hctx(q, set, hctx, i);
Ming Lei624dbe42014-05-27 23:35:13 +08002719 }
Ming Lei624dbe42014-05-27 23:35:13 +08002720}
2721
Ming Lei7c6c5b72019-04-30 09:52:26 +08002722static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2723{
2724 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2725
2726 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
2727 __alignof__(struct blk_mq_hw_ctx)) !=
2728 sizeof(struct blk_mq_hw_ctx));
2729
2730 if (tag_set->flags & BLK_MQ_F_BLOCKING)
2731 hw_ctx_size += sizeof(struct srcu_struct);
2732
2733 return hw_ctx_size;
2734}
2735
Ming Lei08e98fc2014-09-25 23:23:38 +08002736static int blk_mq_init_hctx(struct request_queue *q,
2737 struct blk_mq_tag_set *set,
2738 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2739{
Ming Lei7c6c5b72019-04-30 09:52:26 +08002740 hctx->queue_num = hctx_idx;
Ming Lei08e98fc2014-09-25 23:23:38 +08002741
Ming Leibf0beec2020-05-29 15:53:15 +02002742 if (!(hctx->flags & BLK_MQ_F_STACKING))
2743 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
2744 &hctx->cpuhp_online);
Ming Lei7c6c5b72019-04-30 09:52:26 +08002745 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2746
2747 hctx->tags = set->tags[hctx_idx];
2748
2749 if (set->ops->init_hctx &&
2750 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2751 goto unregister_cpu_notifier;
2752
2753 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
2754 hctx->numa_node))
2755 goto exit_hctx;
2756 return 0;
2757
2758 exit_hctx:
2759 if (set->ops->exit_hctx)
2760 set->ops->exit_hctx(hctx, hctx_idx);
2761 unregister_cpu_notifier:
2762 blk_mq_remove_cpuhp(hctx);
2763 return -1;
2764}
2765
2766static struct blk_mq_hw_ctx *
2767blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
2768 int node)
2769{
2770 struct blk_mq_hw_ctx *hctx;
2771 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
2772
2773 hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
2774 if (!hctx)
2775 goto fail_alloc_hctx;
2776
2777 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
2778 goto free_hctx;
2779
2780 atomic_set(&hctx->nr_active, 0);
Ming Lei08e98fc2014-09-25 23:23:38 +08002781 if (node == NUMA_NO_NODE)
Ming Lei7c6c5b72019-04-30 09:52:26 +08002782 node = set->numa_node;
2783 hctx->numa_node = node;
Ming Lei08e98fc2014-09-25 23:23:38 +08002784
Jens Axboe9f993732017-04-10 09:54:54 -06002785 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
Ming Lei08e98fc2014-09-25 23:23:38 +08002786 spin_lock_init(&hctx->lock);
2787 INIT_LIST_HEAD(&hctx->dispatch);
2788 hctx->queue = q;
Ming Lei51db1c32020-08-19 23:20:19 +08002789 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
Ming Lei08e98fc2014-09-25 23:23:38 +08002790
Ming Lei2f8f1332019-04-30 09:52:27 +08002791 INIT_LIST_HEAD(&hctx->hctx_list);
2792
Ming Lei08e98fc2014-09-25 23:23:38 +08002793 /*
2794 * Allocate space for all possible cpus to avoid allocation at
2795 * runtime
2796 */
Johannes Thumshirnd904bfa2017-11-15 17:32:33 -08002797 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
Ming Lei7c6c5b72019-04-30 09:52:26 +08002798 gfp, node);
Ming Lei08e98fc2014-09-25 23:23:38 +08002799 if (!hctx->ctxs)
Ming Lei7c6c5b72019-04-30 09:52:26 +08002800 goto free_cpumask;
Ming Lei08e98fc2014-09-25 23:23:38 +08002801
Jianchao Wang5b202852018-10-12 18:07:26 +08002802 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
Ming Leic548e622021-01-22 10:33:08 +08002803 gfp, node, false, false))
Ming Lei08e98fc2014-09-25 23:23:38 +08002804 goto free_ctxs;
Ming Lei08e98fc2014-09-25 23:23:38 +08002805 hctx->nr_ctx = 0;
2806
Ming Lei5815839b2018-06-25 19:31:47 +08002807 spin_lock_init(&hctx->dispatch_wait_lock);
Jens Axboeeb619fd2017-11-09 08:32:43 -07002808 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2809 INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2810
Guoqing Jiang754a1572020-03-09 22:41:37 +01002811 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
Ming Leif70ced02014-09-25 23:23:47 +08002812 if (!hctx->fq)
Ming Lei7c6c5b72019-04-30 09:52:26 +08002813 goto free_bitmap;
Ming Leif70ced02014-09-25 23:23:47 +08002814
Bart Van Assche6a83e742016-11-02 10:09:51 -06002815 if (hctx->flags & BLK_MQ_F_BLOCKING)
Tejun Heo05707b62018-01-09 08:29:53 -08002816 init_srcu_struct(hctx->srcu);
Ming Lei7c6c5b72019-04-30 09:52:26 +08002817 blk_mq_hctx_kobj_init(hctx);
Bart Van Assche6a83e742016-11-02 10:09:51 -06002818
Ming Lei7c6c5b72019-04-30 09:52:26 +08002819 return hctx;
Ming Lei08e98fc2014-09-25 23:23:38 +08002820
2821 free_bitmap:
Omar Sandoval88459642016-09-17 08:38:44 -06002822 sbitmap_free(&hctx->ctx_map);
Ming Lei08e98fc2014-09-25 23:23:38 +08002823 free_ctxs:
2824 kfree(hctx->ctxs);
Ming Lei7c6c5b72019-04-30 09:52:26 +08002825 free_cpumask:
2826 free_cpumask_var(hctx->cpumask);
2827 free_hctx:
2828 kfree(hctx);
2829 fail_alloc_hctx:
2830 return NULL;
Ming Lei08e98fc2014-09-25 23:23:38 +08002831}
2832
Jens Axboe320ae512013-10-24 09:20:05 +01002833static void blk_mq_init_cpu_queues(struct request_queue *q,
2834 unsigned int nr_hw_queues)
2835{
Jens Axboeb3c661b2018-10-30 10:36:06 -06002836 struct blk_mq_tag_set *set = q->tag_set;
2837 unsigned int i, j;
Jens Axboe320ae512013-10-24 09:20:05 +01002838
2839 for_each_possible_cpu(i) {
2840 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2841 struct blk_mq_hw_ctx *hctx;
Ming Leic16d6b52018-12-17 08:44:05 -07002842 int k;
Jens Axboe320ae512013-10-24 09:20:05 +01002843
Jens Axboe320ae512013-10-24 09:20:05 +01002844 __ctx->cpu = i;
2845 spin_lock_init(&__ctx->lock);
Ming Leic16d6b52018-12-17 08:44:05 -07002846 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
2847 INIT_LIST_HEAD(&__ctx->rq_lists[k]);
2848
Jens Axboe320ae512013-10-24 09:20:05 +01002849 __ctx->queue = q;
2850
Jens Axboe320ae512013-10-24 09:20:05 +01002851 /*
2852 * Set local node, IFF we have more than one hw queue. If
2853 * not, we remain on the home node of the device
2854 */
Jens Axboeb3c661b2018-10-30 10:36:06 -06002855 for (j = 0; j < set->nr_maps; j++) {
2856 hctx = blk_mq_map_queue_type(q, j, i);
2857 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
Xianting Tian576e85c2020-10-19 16:20:47 +08002858 hctx->numa_node = cpu_to_node(i);
Jens Axboeb3c661b2018-10-30 10:36:06 -06002859 }
Jens Axboe320ae512013-10-24 09:20:05 +01002860 }
2861}
2862
John Garry63064be2021-10-05 18:23:35 +08002863struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
2864 unsigned int hctx_idx,
2865 unsigned int depth)
Jens Axboecc71a6f2017-01-11 14:29:56 -07002866{
John Garry63064be2021-10-05 18:23:35 +08002867 struct blk_mq_tags *tags;
2868 int ret;
Jens Axboecc71a6f2017-01-11 14:29:56 -07002869
John Garrye155b0c2021-10-05 18:23:37 +08002870 tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
John Garry63064be2021-10-05 18:23:35 +08002871 if (!tags)
2872 return NULL;
Jens Axboecc71a6f2017-01-11 14:29:56 -07002873
John Garry63064be2021-10-05 18:23:35 +08002874 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
2875 if (ret) {
John Garrye155b0c2021-10-05 18:23:37 +08002876 blk_mq_free_rq_map(tags);
John Garry63064be2021-10-05 18:23:35 +08002877 return NULL;
2878 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07002879
John Garry63064be2021-10-05 18:23:35 +08002880 return tags;
2881}
2882
2883static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
2884 int hctx_idx)
2885{
John Garry079a2e32021-10-05 18:23:39 +08002886 if (blk_mq_is_shared_tags(set->flags)) {
2887 set->tags[hctx_idx] = set->shared_tags;
John Garrye155b0c2021-10-05 18:23:37 +08002888
2889 return true;
2890 }
2891
John Garry63064be2021-10-05 18:23:35 +08002892 set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
2893 set->queue_depth);
2894
2895 return set->tags[hctx_idx];
Jens Axboecc71a6f2017-01-11 14:29:56 -07002896}
2897
John Garry645db342021-10-05 18:23:36 +08002898void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
2899 struct blk_mq_tags *tags,
2900 unsigned int hctx_idx)
Jens Axboecc71a6f2017-01-11 14:29:56 -07002901{
John Garry645db342021-10-05 18:23:36 +08002902 if (tags) {
2903 blk_mq_free_rqs(set, tags, hctx_idx);
John Garrye155b0c2021-10-05 18:23:37 +08002904 blk_mq_free_rq_map(tags);
Jens Axboebd166ef2017-01-17 06:03:22 -07002905 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07002906}
2907
John Garrye155b0c2021-10-05 18:23:37 +08002908static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
2909 unsigned int hctx_idx)
2910{
John Garry079a2e32021-10-05 18:23:39 +08002911 if (!blk_mq_is_shared_tags(set->flags))
John Garrye155b0c2021-10-05 18:23:37 +08002912 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
2913
2914 set->tags[hctx_idx] = NULL;
2915}
2916
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002917static void blk_mq_map_swqueue(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +01002918{
Jens Axboeb3c661b2018-10-30 10:36:06 -06002919 unsigned int i, j, hctx_idx;
Jens Axboe320ae512013-10-24 09:20:05 +01002920 struct blk_mq_hw_ctx *hctx;
2921 struct blk_mq_ctx *ctx;
Ming Lei2a34c082015-04-21 10:00:20 +08002922 struct blk_mq_tag_set *set = q->tag_set;
Jens Axboe320ae512013-10-24 09:20:05 +01002923
2924 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboee4043dc2014-04-09 10:18:23 -06002925 cpumask_clear(hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01002926 hctx->nr_ctx = 0;
huhaid416c922018-05-18 08:32:30 -06002927 hctx->dispatch_from = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01002928 }
2929
2930 /*
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002931 * Map software to hardware queues.
Ming Lei4412efe2018-04-25 04:01:44 +08002932 *
2933 * If the cpu isn't present, the cpu is mapped to first hctx.
Jens Axboe320ae512013-10-24 09:20:05 +01002934 */
Christoph Hellwig20e4d8132018-01-12 10:53:06 +08002935 for_each_possible_cpu(i) {
Ming Lei4412efe2018-04-25 04:01:44 +08002936
Thomas Gleixner897bb0c2016-03-19 11:30:33 +01002937 ctx = per_cpu_ptr(q->queue_ctx, i);
Jens Axboeb3c661b2018-10-30 10:36:06 -06002938 for (j = 0; j < set->nr_maps; j++) {
Jianchao Wangbb94aea2019-01-24 18:25:33 +08002939 if (!set->map[j].nr_queues) {
2940 ctx->hctxs[j] = blk_mq_map_queue_type(q,
2941 HCTX_TYPE_DEFAULT, i);
Ming Leie5edd5f2018-12-18 01:28:56 +08002942 continue;
Jianchao Wangbb94aea2019-01-24 18:25:33 +08002943 }
Ming Leifd689872020-05-07 21:04:08 +08002944 hctx_idx = set->map[j].mq_map[i];
2945 /* unmapped hw queue can be remapped after CPU topo changed */
2946 if (!set->tags[hctx_idx] &&
John Garry63064be2021-10-05 18:23:35 +08002947 !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
Ming Leifd689872020-05-07 21:04:08 +08002948 /*
2949 * If tags initialization fail for some hctx,
2950 * that hctx won't be brought online. In this
2951 * case, remap the current ctx to hctx[0] which
2952 * is guaranteed to always have tags allocated
2953 */
2954 set->map[j].mq_map[i] = 0;
2955 }
Ming Leie5edd5f2018-12-18 01:28:56 +08002956
Jens Axboeb3c661b2018-10-30 10:36:06 -06002957 hctx = blk_mq_map_queue_type(q, j, i);
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +08002958 ctx->hctxs[j] = hctx;
Jens Axboeb3c661b2018-10-30 10:36:06 -06002959 /*
2960 * If the CPU is already set in the mask, then we've
2961 * mapped this one already. This can happen if
2962 * devices share queues across queue maps.
2963 */
2964 if (cpumask_test_cpu(i, hctx->cpumask))
2965 continue;
2966
2967 cpumask_set_cpu(i, hctx->cpumask);
2968 hctx->type = j;
2969 ctx->index_hw[hctx->type] = hctx->nr_ctx;
2970 hctx->ctxs[hctx->nr_ctx++] = ctx;
2971
2972 /*
2973 * If the nr_ctx type overflows, we have exceeded the
2974 * amount of sw queues we can support.
2975 */
2976 BUG_ON(!hctx->nr_ctx);
2977 }
Jianchao Wangbb94aea2019-01-24 18:25:33 +08002978
2979 for (; j < HCTX_MAX_TYPES; j++)
2980 ctx->hctxs[j] = blk_mq_map_queue_type(q,
2981 HCTX_TYPE_DEFAULT, i);
Jens Axboe320ae512013-10-24 09:20:05 +01002982 }
Jens Axboe506e9312014-05-07 10:26:44 -06002983
2984 queue_for_each_hw_ctx(q, hctx, i) {
Ming Lei4412efe2018-04-25 04:01:44 +08002985 /*
2986 * If no software queues are mapped to this hardware queue,
2987 * disable it and free the request entries.
2988 */
2989 if (!hctx->nr_ctx) {
2990 /* Never unmap queue 0. We need it as a
2991 * fallback in case of a new remap fails
2992 * allocation
2993 */
John Garrye155b0c2021-10-05 18:23:37 +08002994 if (i)
2995 __blk_mq_free_map_and_rqs(set, i);
Ming Lei4412efe2018-04-25 04:01:44 +08002996
2997 hctx->tags = NULL;
2998 continue;
2999 }
Jens Axboe484b4062014-05-21 14:01:15 -06003000
Ming Lei2a34c082015-04-21 10:00:20 +08003001 hctx->tags = set->tags[i];
3002 WARN_ON(!hctx->tags);
3003
Jens Axboe484b4062014-05-21 14:01:15 -06003004 /*
Chong Yuan889fa312015-04-15 11:39:29 -06003005 * Set the map size to the number of mapped software queues.
3006 * This is more accurate and more efficient than looping
3007 * over all possibly mapped software queues.
3008 */
Omar Sandoval88459642016-09-17 08:38:44 -06003009 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
Chong Yuan889fa312015-04-15 11:39:29 -06003010
3011 /*
Jens Axboe484b4062014-05-21 14:01:15 -06003012 * Initialize batch roundrobin counts
3013 */
Ming Leif82ddf12018-04-08 17:48:10 +08003014 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
Jens Axboe506e9312014-05-07 10:26:44 -06003015 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
3016 }
Jens Axboe320ae512013-10-24 09:20:05 +01003017}
3018
Jens Axboe8e8320c2017-06-20 17:56:13 -06003019/*
3020 * Caller needs to ensure that we're either frozen/quiesced, or that
3021 * the queue isn't live yet.
3022 */
Jeff Moyer2404e602015-11-03 10:40:06 -05003023static void queue_set_hctx_shared(struct request_queue *q, bool shared)
Jens Axboe0d2602c2014-05-13 15:10:52 -06003024{
3025 struct blk_mq_hw_ctx *hctx;
Jens Axboe0d2602c2014-05-13 15:10:52 -06003026 int i;
3027
Jeff Moyer2404e602015-11-03 10:40:06 -05003028 queue_for_each_hw_ctx(q, hctx, i) {
Yu Kuai454bb672021-07-31 14:21:30 +08003029 if (shared) {
Ming Lei51db1c32020-08-19 23:20:19 +08003030 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
Yu Kuai454bb672021-07-31 14:21:30 +08003031 } else {
3032 blk_mq_tag_idle(hctx);
Ming Lei51db1c32020-08-19 23:20:19 +08003033 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
Yu Kuai454bb672021-07-31 14:21:30 +08003034 }
Jeff Moyer2404e602015-11-03 10:40:06 -05003035 }
3036}
3037
Hannes Reinecke655ac302020-08-19 23:20:20 +08003038static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
3039 bool shared)
Jeff Moyer2404e602015-11-03 10:40:06 -05003040{
3041 struct request_queue *q;
Jens Axboe0d2602c2014-05-13 15:10:52 -06003042
Bart Van Assche705cda92017-04-07 11:16:49 -07003043 lockdep_assert_held(&set->tag_list_lock);
3044
Jens Axboe0d2602c2014-05-13 15:10:52 -06003045 list_for_each_entry(q, &set->tag_list, tag_set_list) {
3046 blk_mq_freeze_queue(q);
Jeff Moyer2404e602015-11-03 10:40:06 -05003047 queue_set_hctx_shared(q, shared);
Jens Axboe0d2602c2014-05-13 15:10:52 -06003048 blk_mq_unfreeze_queue(q);
3049 }
3050}
3051
3052static void blk_mq_del_queue_tag_set(struct request_queue *q)
3053{
3054 struct blk_mq_tag_set *set = q->tag_set;
3055
Jens Axboe0d2602c2014-05-13 15:10:52 -06003056 mutex_lock(&set->tag_list_lock);
Daniel Wagner08c875c2020-07-28 15:29:51 +02003057 list_del(&q->tag_set_list);
Jeff Moyer2404e602015-11-03 10:40:06 -05003058 if (list_is_singular(&set->tag_list)) {
3059 /* just transitioned to unshared */
Ming Lei51db1c32020-08-19 23:20:19 +08003060 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
Jeff Moyer2404e602015-11-03 10:40:06 -05003061 /* update existing queue */
Hannes Reinecke655ac302020-08-19 23:20:20 +08003062 blk_mq_update_tag_set_shared(set, false);
Jeff Moyer2404e602015-11-03 10:40:06 -05003063 }
Jens Axboe0d2602c2014-05-13 15:10:52 -06003064 mutex_unlock(&set->tag_list_lock);
Roman Pena347c7a2018-06-10 22:38:24 +02003065 INIT_LIST_HEAD(&q->tag_set_list);
Jens Axboe0d2602c2014-05-13 15:10:52 -06003066}
3067
3068static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
3069 struct request_queue *q)
3070{
Jens Axboe0d2602c2014-05-13 15:10:52 -06003071 mutex_lock(&set->tag_list_lock);
Jeff Moyer2404e602015-11-03 10:40:06 -05003072
Jens Axboeff821d22017-11-10 22:05:12 -07003073 /*
3074 * Check to see if we're transitioning to shared (from 1 to 2 queues).
3075 */
3076 if (!list_empty(&set->tag_list) &&
Ming Lei51db1c32020-08-19 23:20:19 +08003077 !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
3078 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
Jeff Moyer2404e602015-11-03 10:40:06 -05003079 /* update existing queue */
Hannes Reinecke655ac302020-08-19 23:20:20 +08003080 blk_mq_update_tag_set_shared(set, true);
Jeff Moyer2404e602015-11-03 10:40:06 -05003081 }
Ming Lei51db1c32020-08-19 23:20:19 +08003082 if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
Jeff Moyer2404e602015-11-03 10:40:06 -05003083 queue_set_hctx_shared(q, true);
Daniel Wagner08c875c2020-07-28 15:29:51 +02003084 list_add_tail(&q->tag_set_list, &set->tag_list);
Jeff Moyer2404e602015-11-03 10:40:06 -05003085
Jens Axboe0d2602c2014-05-13 15:10:52 -06003086 mutex_unlock(&set->tag_list_lock);
3087}
3088
Ming Lei1db49092018-11-20 09:44:35 +08003089/* All allocations will be freed in release handler of q->mq_kobj */
3090static int blk_mq_alloc_ctxs(struct request_queue *q)
3091{
3092 struct blk_mq_ctxs *ctxs;
3093 int cpu;
3094
3095 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
3096 if (!ctxs)
3097 return -ENOMEM;
3098
3099 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
3100 if (!ctxs->queue_ctx)
3101 goto fail;
3102
3103 for_each_possible_cpu(cpu) {
3104 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
3105 ctx->ctxs = ctxs;
3106 }
3107
3108 q->mq_kobj = &ctxs->kobj;
3109 q->queue_ctx = ctxs->queue_ctx;
3110
3111 return 0;
3112 fail:
3113 kfree(ctxs);
3114 return -ENOMEM;
3115}
3116
Ming Leie09aae72015-01-29 20:17:27 +08003117/*
3118 * It is the actual release handler for mq, but we do it from
3119 * request queue's release handler for avoiding use-after-free
3120 * and headache because q->mq_kobj shouldn't have been introduced,
3121 * but we can't group ctx/kctx kobj without it.
3122 */
3123void blk_mq_release(struct request_queue *q)
3124{
Ming Lei2f8f1332019-04-30 09:52:27 +08003125 struct blk_mq_hw_ctx *hctx, *next;
3126 int i;
Ming Leie09aae72015-01-29 20:17:27 +08003127
Ming Lei2f8f1332019-04-30 09:52:27 +08003128 queue_for_each_hw_ctx(q, hctx, i)
3129 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
3130
3131 /* all hctx are in .unused_hctx_list now */
3132 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
3133 list_del_init(&hctx->hctx_list);
Ming Lei6c8b2322017-02-22 18:14:01 +08003134 kobject_put(&hctx->kobj);
Ming Leic3b4afc2015-06-04 22:25:04 +08003135 }
Ming Leie09aae72015-01-29 20:17:27 +08003136
3137 kfree(q->queue_hw_ctx);
3138
Ming Lei7ea5fe32017-02-22 18:14:00 +08003139 /*
3140 * release .mq_kobj and sw queue's kobject now because
3141 * both share lifetime with request queue.
3142 */
3143 blk_mq_sysfs_deinit(q);
Ming Leie09aae72015-01-29 20:17:27 +08003144}
3145
Christoph Hellwig5ec780a2021-06-24 10:10:12 +02003146static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
Christoph Hellwig2f227bb2020-03-27 09:30:08 +01003147 void *queuedata)
Jens Axboe320ae512013-10-24 09:20:05 +01003148{
Christoph Hellwig26a97502021-06-02 09:53:17 +03003149 struct request_queue *q;
3150 int ret;
Mike Snitzerb62c21b2015-03-12 23:56:02 -04003151
Christoph Hellwig26a97502021-06-02 09:53:17 +03003152 q = blk_alloc_queue(set->numa_node);
3153 if (!q)
Mike Snitzerb62c21b2015-03-12 23:56:02 -04003154 return ERR_PTR(-ENOMEM);
Christoph Hellwig26a97502021-06-02 09:53:17 +03003155 q->queuedata = queuedata;
3156 ret = blk_mq_init_allocated_queue(set, q);
3157 if (ret) {
3158 blk_cleanup_queue(q);
3159 return ERR_PTR(ret);
3160 }
Mike Snitzerb62c21b2015-03-12 23:56:02 -04003161 return q;
3162}
Christoph Hellwig2f227bb2020-03-27 09:30:08 +01003163
3164struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
3165{
3166 return blk_mq_init_queue_data(set, NULL);
3167}
Mike Snitzerb62c21b2015-03-12 23:56:02 -04003168EXPORT_SYMBOL(blk_mq_init_queue);
3169
Christoph Hellwig4dcc4872021-08-16 15:19:05 +02003170struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
3171 struct lock_class_key *lkclass)
Jens Axboe9316a9e2018-10-15 08:40:37 -06003172{
3173 struct request_queue *q;
Christoph Hellwigb461dfc2021-06-02 09:53:18 +03003174 struct gendisk *disk;
Jens Axboe9316a9e2018-10-15 08:40:37 -06003175
Christoph Hellwigb461dfc2021-06-02 09:53:18 +03003176 q = blk_mq_init_queue_data(set, queuedata);
3177 if (IS_ERR(q))
3178 return ERR_CAST(q);
Jens Axboe9316a9e2018-10-15 08:40:37 -06003179
Christoph Hellwig4a1fa412021-08-16 15:19:08 +02003180 disk = __alloc_disk_node(q, set->numa_node, lkclass);
Christoph Hellwigb461dfc2021-06-02 09:53:18 +03003181 if (!disk) {
3182 blk_cleanup_queue(q);
3183 return ERR_PTR(-ENOMEM);
Jens Axboe9316a9e2018-10-15 08:40:37 -06003184 }
Christoph Hellwigb461dfc2021-06-02 09:53:18 +03003185 return disk;
Jens Axboe9316a9e2018-10-15 08:40:37 -06003186}
Christoph Hellwigb461dfc2021-06-02 09:53:18 +03003187EXPORT_SYMBOL(__blk_mq_alloc_disk);
Jens Axboe9316a9e2018-10-15 08:40:37 -06003188
Jianchao Wang34d11ff2018-10-12 18:07:27 +08003189static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
3190 struct blk_mq_tag_set *set, struct request_queue *q,
3191 int hctx_idx, int node)
3192{
Ming Lei2f8f1332019-04-30 09:52:27 +08003193 struct blk_mq_hw_ctx *hctx = NULL, *tmp;
Jianchao Wang34d11ff2018-10-12 18:07:27 +08003194
Ming Lei2f8f1332019-04-30 09:52:27 +08003195 /* reuse dead hctx first */
3196 spin_lock(&q->unused_hctx_lock);
3197 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
3198 if (tmp->numa_node == node) {
3199 hctx = tmp;
3200 break;
3201 }
3202 }
3203 if (hctx)
3204 list_del_init(&hctx->hctx_list);
3205 spin_unlock(&q->unused_hctx_lock);
3206
3207 if (!hctx)
3208 hctx = blk_mq_alloc_hctx(q, set, node);
Jianchao Wang34d11ff2018-10-12 18:07:27 +08003209 if (!hctx)
Ming Lei7c6c5b72019-04-30 09:52:26 +08003210 goto fail;
Jianchao Wang34d11ff2018-10-12 18:07:27 +08003211
Ming Lei7c6c5b72019-04-30 09:52:26 +08003212 if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
3213 goto free_hctx;
Jianchao Wang34d11ff2018-10-12 18:07:27 +08003214
3215 return hctx;
Ming Lei7c6c5b72019-04-30 09:52:26 +08003216
3217 free_hctx:
3218 kobject_put(&hctx->kobj);
3219 fail:
3220 return NULL;
Jianchao Wang34d11ff2018-10-12 18:07:27 +08003221}
3222
Keith Busch868f2f02015-12-17 17:08:14 -07003223static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
3224 struct request_queue *q)
Mike Snitzerb62c21b2015-03-12 23:56:02 -04003225{
Jianchao Wange01ad462018-10-12 18:07:28 +08003226 int i, j, end;
Keith Busch868f2f02015-12-17 17:08:14 -07003227 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +01003228
Bart Van Asscheac0d6b92019-10-25 09:50:09 -07003229 if (q->nr_hw_queues < set->nr_hw_queues) {
3230 struct blk_mq_hw_ctx **new_hctxs;
3231
3232 new_hctxs = kcalloc_node(set->nr_hw_queues,
3233 sizeof(*new_hctxs), GFP_KERNEL,
3234 set->numa_node);
3235 if (!new_hctxs)
3236 return;
3237 if (hctxs)
3238 memcpy(new_hctxs, hctxs, q->nr_hw_queues *
3239 sizeof(*hctxs));
3240 q->queue_hw_ctx = new_hctxs;
Bart Van Asscheac0d6b92019-10-25 09:50:09 -07003241 kfree(hctxs);
3242 hctxs = new_hctxs;
3243 }
3244
Ming Leifb350e02018-01-06 16:27:40 +08003245 /* protect against switching io scheduler */
3246 mutex_lock(&q->sysfs_lock);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06003247 for (i = 0; i < set->nr_hw_queues; i++) {
Keith Busch868f2f02015-12-17 17:08:14 -07003248 int node;
Jianchao Wang34d11ff2018-10-12 18:07:27 +08003249 struct blk_mq_hw_ctx *hctx;
Keith Busch868f2f02015-12-17 17:08:14 -07003250
Dongli Zhang7d76f852019-02-27 21:35:01 +08003251 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
Jianchao Wang34d11ff2018-10-12 18:07:27 +08003252 /*
3253 * If the hw queue has been mapped to another numa node,
3254 * we need to realloc the hctx. If allocation fails, fallback
3255 * to use the previous one.
3256 */
3257 if (hctxs[i] && (hctxs[i]->numa_node == node))
3258 continue;
Jens Axboe320ae512013-10-24 09:20:05 +01003259
Jianchao Wang34d11ff2018-10-12 18:07:27 +08003260 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
3261 if (hctx) {
Ming Lei2f8f1332019-04-30 09:52:27 +08003262 if (hctxs[i])
Jianchao Wang34d11ff2018-10-12 18:07:27 +08003263 blk_mq_exit_hctx(q, set, hctxs[i], i);
Jianchao Wang34d11ff2018-10-12 18:07:27 +08003264 hctxs[i] = hctx;
3265 } else {
3266 if (hctxs[i])
3267 pr_warn("Allocate new hctx on node %d fails,\
3268 fallback to previous one on node %d\n",
3269 node, hctxs[i]->numa_node);
3270 else
3271 break;
Keith Busch868f2f02015-12-17 17:08:14 -07003272 }
Jens Axboe320ae512013-10-24 09:20:05 +01003273 }
Jianchao Wange01ad462018-10-12 18:07:28 +08003274 /*
3275 * Increasing nr_hw_queues fails. Free the newly allocated
3276 * hctxs and keep the previous q->nr_hw_queues.
3277 */
3278 if (i != set->nr_hw_queues) {
3279 j = q->nr_hw_queues;
3280 end = i;
3281 } else {
3282 j = i;
3283 end = q->nr_hw_queues;
3284 q->nr_hw_queues = set->nr_hw_queues;
3285 }
Jianchao Wang34d11ff2018-10-12 18:07:27 +08003286
Jianchao Wange01ad462018-10-12 18:07:28 +08003287 for (; j < end; j++) {
Keith Busch868f2f02015-12-17 17:08:14 -07003288 struct blk_mq_hw_ctx *hctx = hctxs[j];
3289
3290 if (hctx) {
John Garrye155b0c2021-10-05 18:23:37 +08003291 __blk_mq_free_map_and_rqs(set, j);
Keith Busch868f2f02015-12-17 17:08:14 -07003292 blk_mq_exit_hctx(q, set, hctx, j);
Keith Busch868f2f02015-12-17 17:08:14 -07003293 hctxs[j] = NULL;
Keith Busch868f2f02015-12-17 17:08:14 -07003294 }
3295 }
Ming Leifb350e02018-01-06 16:27:40 +08003296 mutex_unlock(&q->sysfs_lock);
Keith Busch868f2f02015-12-17 17:08:14 -07003297}
3298
Christoph Hellwig26a97502021-06-02 09:53:17 +03003299int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
3300 struct request_queue *q)
Keith Busch868f2f02015-12-17 17:08:14 -07003301{
Ming Lei66841672016-02-12 15:27:00 +08003302 /* mark the queue as mq asap */
3303 q->mq_ops = set->ops;
3304
Omar Sandoval34dbad52017-03-21 08:56:08 -07003305 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
Stephen Bates720b8cc2017-04-07 06:24:03 -06003306 blk_mq_poll_stats_bkt,
3307 BLK_MQ_POLL_STATS_BKTS, q);
Omar Sandoval34dbad52017-03-21 08:56:08 -07003308 if (!q->poll_cb)
3309 goto err_exit;
3310
Ming Lei1db49092018-11-20 09:44:35 +08003311 if (blk_mq_alloc_ctxs(q))
Jes Sorensen41de54c2019-04-19 16:35:44 -04003312 goto err_poll;
Keith Busch868f2f02015-12-17 17:08:14 -07003313
Ming Lei737f98c2017-02-22 18:13:59 +08003314 /* init q->mq_kobj and sw queues' kobjects */
3315 blk_mq_sysfs_init(q);
3316
Ming Lei2f8f1332019-04-30 09:52:27 +08003317 INIT_LIST_HEAD(&q->unused_hctx_list);
3318 spin_lock_init(&q->unused_hctx_lock);
3319
Keith Busch868f2f02015-12-17 17:08:14 -07003320 blk_mq_realloc_hw_ctxs(set, q);
3321 if (!q->nr_hw_queues)
3322 goto err_hctxs;
Jens Axboe320ae512013-10-24 09:20:05 +01003323
Christoph Hellwig287922e2015-10-30 20:57:30 +08003324 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
Ming Leie56f6982015-07-16 19:53:22 +08003325 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
Jens Axboe320ae512013-10-24 09:20:05 +01003326
Jens Axboea8908932018-10-16 14:23:06 -06003327 q->tag_set = set;
Jens Axboe320ae512013-10-24 09:20:05 +01003328
Jens Axboe94eddfb2013-11-19 09:25:07 -07003329 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
Ming Leicd191812018-12-18 12:15:29 +08003330 if (set->nr_maps > HCTX_TYPE_POLL &&
3331 set->map[HCTX_TYPE_POLL].nr_queues)
Christoph Hellwig6544d222018-12-02 17:46:28 +01003332 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
Jens Axboe320ae512013-10-24 09:20:05 +01003333
Mike Snitzer28494502016-09-14 13:28:30 -04003334 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -06003335 INIT_LIST_HEAD(&q->requeue_list);
3336 spin_lock_init(&q->requeue_lock);
3337
Jens Axboeeba71762014-05-20 15:17:27 -06003338 q->nr_requests = set->queue_depth;
3339
Jens Axboe64f1c212016-11-14 13:03:03 -07003340 /*
3341 * Default to classic polling
3342 */
Yufen Yu29ece8b2019-03-18 22:44:41 +08003343 q->poll_nsec = BLK_MQ_POLL_CLASSIC;
Jens Axboe64f1c212016-11-14 13:03:03 -07003344
Christoph Hellwig24d2f902014-04-15 14:14:00 -06003345 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
Jens Axboe0d2602c2014-05-13 15:10:52 -06003346 blk_mq_add_queue_tag_set(set, q);
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02003347 blk_mq_map_swqueue(q);
Christoph Hellwig26a97502021-06-02 09:53:17 +03003348 return 0;
Christoph Hellwig18741982014-02-10 09:29:00 -07003349
Jens Axboe320ae512013-10-24 09:20:05 +01003350err_hctxs:
Keith Busch868f2f02015-12-17 17:08:14 -07003351 kfree(q->queue_hw_ctx);
zhengbin73d9c8d2019-07-23 22:10:42 +08003352 q->nr_hw_queues = 0;
Ming Lei1db49092018-11-20 09:44:35 +08003353 blk_mq_sysfs_deinit(q);
Jes Sorensen41de54c2019-04-19 16:35:44 -04003354err_poll:
3355 blk_stat_free_callback(q->poll_cb);
3356 q->poll_cb = NULL;
Ming Linc7de5722016-05-25 23:23:27 -07003357err_exit:
3358 q->mq_ops = NULL;
Christoph Hellwig26a97502021-06-02 09:53:17 +03003359 return -ENOMEM;
Jens Axboe320ae512013-10-24 09:20:05 +01003360}
Mike Snitzerb62c21b2015-03-12 23:56:02 -04003361EXPORT_SYMBOL(blk_mq_init_allocated_queue);
Jens Axboe320ae512013-10-24 09:20:05 +01003362
Ming Leic7e2d942019-04-30 09:52:25 +08003363/* tags can _not_ be used after returning from blk_mq_exit_queue */
3364void blk_mq_exit_queue(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +01003365{
Bart Van Assche630ef622021-05-13 10:15:29 -07003366 struct blk_mq_tag_set *set = q->tag_set;
Jens Axboe320ae512013-10-24 09:20:05 +01003367
Bart Van Assche630ef622021-05-13 10:15:29 -07003368 /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
Ming Lei624dbe42014-05-27 23:35:13 +08003369 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
Bart Van Assche630ef622021-05-13 10:15:29 -07003370 /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
3371 blk_mq_del_queue_tag_set(q);
Jens Axboe320ae512013-10-24 09:20:05 +01003372}
Jens Axboe320ae512013-10-24 09:20:05 +01003373
Jens Axboea5164402014-09-10 09:02:03 -06003374static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
3375{
3376 int i;
3377
John Garry079a2e32021-10-05 18:23:39 +08003378 if (blk_mq_is_shared_tags(set->flags)) {
3379 set->shared_tags = blk_mq_alloc_map_and_rqs(set,
John Garrye155b0c2021-10-05 18:23:37 +08003380 BLK_MQ_NO_HCTX_IDX,
3381 set->queue_depth);
John Garry079a2e32021-10-05 18:23:39 +08003382 if (!set->shared_tags)
John Garrye155b0c2021-10-05 18:23:37 +08003383 return -ENOMEM;
3384 }
3385
Xianting Tian8229cca2020-09-26 10:39:47 +08003386 for (i = 0; i < set->nr_hw_queues; i++) {
John Garry63064be2021-10-05 18:23:35 +08003387 if (!__blk_mq_alloc_map_and_rqs(set, i))
Jens Axboea5164402014-09-10 09:02:03 -06003388 goto out_unwind;
Xianting Tian8229cca2020-09-26 10:39:47 +08003389 cond_resched();
3390 }
Jens Axboea5164402014-09-10 09:02:03 -06003391
3392 return 0;
3393
3394out_unwind:
John Garrye155b0c2021-10-05 18:23:37 +08003395 while (--i >= 0)
3396 __blk_mq_free_map_and_rqs(set, i);
3397
John Garry079a2e32021-10-05 18:23:39 +08003398 if (blk_mq_is_shared_tags(set->flags)) {
3399 blk_mq_free_map_and_rqs(set, set->shared_tags,
John Garrye155b0c2021-10-05 18:23:37 +08003400 BLK_MQ_NO_HCTX_IDX);
John Garry645db342021-10-05 18:23:36 +08003401 }
Jens Axboea5164402014-09-10 09:02:03 -06003402
Jens Axboea5164402014-09-10 09:02:03 -06003403 return -ENOMEM;
3404}
3405
3406/*
3407 * Allocate the request maps associated with this tag_set. Note that this
3408 * may reduce the depth asked for, if memory is tight. set->queue_depth
3409 * will be updated to reflect the allocated depth.
3410 */
John Garry63064be2021-10-05 18:23:35 +08003411static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
Jens Axboea5164402014-09-10 09:02:03 -06003412{
3413 unsigned int depth;
3414 int err;
3415
3416 depth = set->queue_depth;
3417 do {
3418 err = __blk_mq_alloc_rq_maps(set);
3419 if (!err)
3420 break;
3421
3422 set->queue_depth >>= 1;
3423 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
3424 err = -ENOMEM;
3425 break;
3426 }
3427 } while (set->queue_depth);
3428
3429 if (!set->queue_depth || err) {
3430 pr_err("blk-mq: failed to allocate request map\n");
3431 return -ENOMEM;
3432 }
3433
3434 if (depth != set->queue_depth)
3435 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
3436 depth, set->queue_depth);
3437
3438 return 0;
3439}
3440
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06003441static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
3442{
Bart Van Assche6e66b492020-03-09 21:26:17 -07003443 /*
3444 * blk_mq_map_queues() and multiple .map_queues() implementations
3445 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
3446 * number of hardware queues.
3447 */
3448 if (set->nr_maps == 1)
3449 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
3450
Ming Lei59388702018-12-07 11:03:53 +08003451 if (set->ops->map_queues && !is_kdump_kernel()) {
Jens Axboeb3c661b2018-10-30 10:36:06 -06003452 int i;
3453
Ming Lei7d4901a2018-01-06 16:27:39 +08003454 /*
3455 * transport .map_queues is usually done in the following
3456 * way:
3457 *
3458 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
3459 * mask = get_cpu_mask(queue)
3460 * for_each_cpu(cpu, mask)
Jens Axboeb3c661b2018-10-30 10:36:06 -06003461 * set->map[x].mq_map[cpu] = queue;
Ming Lei7d4901a2018-01-06 16:27:39 +08003462 * }
3463 *
3464 * When we need to remap, the table has to be cleared for
3465 * killing stale mapping since one CPU may not be mapped
3466 * to any hw queue.
3467 */
Jens Axboeb3c661b2018-10-30 10:36:06 -06003468 for (i = 0; i < set->nr_maps; i++)
3469 blk_mq_clear_mq_map(&set->map[i]);
Ming Lei7d4901a2018-01-06 16:27:39 +08003470
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06003471 return set->ops->map_queues(set);
Jens Axboeb3c661b2018-10-30 10:36:06 -06003472 } else {
3473 BUG_ON(set->nr_maps > 1);
Dongli Zhang7d76f852019-02-27 21:35:01 +08003474 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
Jens Axboeb3c661b2018-10-30 10:36:06 -06003475 }
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06003476}
3477
Bart Van Asschef7e76db2019-10-25 09:50:10 -07003478static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
3479 int cur_nr_hw_queues, int new_nr_hw_queues)
3480{
3481 struct blk_mq_tags **new_tags;
3482
3483 if (cur_nr_hw_queues >= new_nr_hw_queues)
3484 return 0;
3485
3486 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
3487 GFP_KERNEL, set->numa_node);
3488 if (!new_tags)
3489 return -ENOMEM;
3490
3491 if (set->tags)
3492 memcpy(new_tags, set->tags, cur_nr_hw_queues *
3493 sizeof(*set->tags));
3494 kfree(set->tags);
3495 set->tags = new_tags;
3496 set->nr_hw_queues = new_nr_hw_queues;
3497
3498 return 0;
3499}
3500
Minwoo Im91cdf262020-12-05 00:20:53 +09003501static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set,
3502 int new_nr_hw_queues)
3503{
3504 return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues);
3505}
3506
Jens Axboea4391c62014-06-05 15:21:56 -06003507/*
3508 * Alloc a tag set to be associated with one or more request queues.
3509 * May fail with EINVAL for various error conditions. May adjust the
Minwoo Imc018c842018-06-30 22:12:41 +09003510 * requested depth down, if it's too large. In that case, the set
Jens Axboea4391c62014-06-05 15:21:56 -06003511 * value will be stored in set->queue_depth.
3512 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06003513int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
3514{
Jens Axboeb3c661b2018-10-30 10:36:06 -06003515 int i, ret;
Christoph Hellwigda695ba2016-09-14 16:18:55 +02003516
Bart Van Assche205fb5f2014-10-30 14:45:11 +01003517 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
3518
Christoph Hellwig24d2f902014-04-15 14:14:00 -06003519 if (!set->nr_hw_queues)
3520 return -EINVAL;
Jens Axboea4391c62014-06-05 15:21:56 -06003521 if (!set->queue_depth)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06003522 return -EINVAL;
3523 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
3524 return -EINVAL;
3525
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02003526 if (!set->ops->queue_rq)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06003527 return -EINVAL;
3528
Ming Leide148292017-10-14 17:22:29 +08003529 if (!set->ops->get_budget ^ !set->ops->put_budget)
3530 return -EINVAL;
3531
Jens Axboea4391c62014-06-05 15:21:56 -06003532 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
3533 pr_info("blk-mq: reduced tag depth to %u\n",
3534 BLK_MQ_MAX_DEPTH);
3535 set->queue_depth = BLK_MQ_MAX_DEPTH;
3536 }
Christoph Hellwig24d2f902014-04-15 14:14:00 -06003537
Jens Axboeb3c661b2018-10-30 10:36:06 -06003538 if (!set->nr_maps)
3539 set->nr_maps = 1;
3540 else if (set->nr_maps > HCTX_MAX_TYPES)
3541 return -EINVAL;
3542
Shaohua Li6637fad2014-11-30 16:00:58 -08003543 /*
3544 * If a crashdump is active, then we are potentially in a very
3545 * memory constrained environment. Limit us to 1 queue and
3546 * 64 tags to prevent using too much memory.
3547 */
3548 if (is_kdump_kernel()) {
3549 set->nr_hw_queues = 1;
Ming Lei59388702018-12-07 11:03:53 +08003550 set->nr_maps = 1;
Shaohua Li6637fad2014-11-30 16:00:58 -08003551 set->queue_depth = min(64U, set->queue_depth);
3552 }
Keith Busch868f2f02015-12-17 17:08:14 -07003553 /*
Jens Axboe392546a2018-10-29 13:25:27 -06003554 * There is no use for more h/w queues than cpus if we just have
3555 * a single map
Keith Busch868f2f02015-12-17 17:08:14 -07003556 */
Jens Axboe392546a2018-10-29 13:25:27 -06003557 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
Keith Busch868f2f02015-12-17 17:08:14 -07003558 set->nr_hw_queues = nr_cpu_ids;
Shaohua Li6637fad2014-11-30 16:00:58 -08003559
Minwoo Im91cdf262020-12-05 00:20:53 +09003560 if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
Jens Axboea5164402014-09-10 09:02:03 -06003561 return -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06003562
Christoph Hellwigda695ba2016-09-14 16:18:55 +02003563 ret = -ENOMEM;
Jens Axboeb3c661b2018-10-30 10:36:06 -06003564 for (i = 0; i < set->nr_maps; i++) {
3565 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
Ming Lei07b35eb2018-12-17 18:42:45 +08003566 sizeof(set->map[i].mq_map[0]),
Jens Axboeb3c661b2018-10-30 10:36:06 -06003567 GFP_KERNEL, set->numa_node);
3568 if (!set->map[i].mq_map)
3569 goto out_free_mq_map;
Ming Lei59388702018-12-07 11:03:53 +08003570 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
Jens Axboeb3c661b2018-10-30 10:36:06 -06003571 }
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02003572
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06003573 ret = blk_mq_update_queue_map(set);
Christoph Hellwigda695ba2016-09-14 16:18:55 +02003574 if (ret)
3575 goto out_free_mq_map;
3576
John Garry63064be2021-10-05 18:23:35 +08003577 ret = blk_mq_alloc_set_map_and_rqs(set);
Christoph Hellwigda695ba2016-09-14 16:18:55 +02003578 if (ret)
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02003579 goto out_free_mq_map;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06003580
Jens Axboe0d2602c2014-05-13 15:10:52 -06003581 mutex_init(&set->tag_list_lock);
3582 INIT_LIST_HEAD(&set->tag_list);
3583
Christoph Hellwig24d2f902014-04-15 14:14:00 -06003584 return 0;
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02003585
3586out_free_mq_map:
Jens Axboeb3c661b2018-10-30 10:36:06 -06003587 for (i = 0; i < set->nr_maps; i++) {
3588 kfree(set->map[i].mq_map);
3589 set->map[i].mq_map = NULL;
3590 }
Robert Elliott5676e7b2014-09-02 11:38:44 -05003591 kfree(set->tags);
3592 set->tags = NULL;
Christoph Hellwigda695ba2016-09-14 16:18:55 +02003593 return ret;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06003594}
3595EXPORT_SYMBOL(blk_mq_alloc_tag_set);
3596
Christoph Hellwigcdb14e02021-06-02 09:53:16 +03003597/* allocate and initialize a tagset for a simple single-queue device */
3598int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
3599 const struct blk_mq_ops *ops, unsigned int queue_depth,
3600 unsigned int set_flags)
3601{
3602 memset(set, 0, sizeof(*set));
3603 set->ops = ops;
3604 set->nr_hw_queues = 1;
3605 set->nr_maps = 1;
3606 set->queue_depth = queue_depth;
3607 set->numa_node = NUMA_NO_NODE;
3608 set->flags = set_flags;
3609 return blk_mq_alloc_tag_set(set);
3610}
3611EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
3612
Christoph Hellwig24d2f902014-04-15 14:14:00 -06003613void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
3614{
Jens Axboeb3c661b2018-10-30 10:36:06 -06003615 int i, j;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06003616
John Garrye155b0c2021-10-05 18:23:37 +08003617 for (i = 0; i < set->nr_hw_queues; i++)
3618 __blk_mq_free_map_and_rqs(set, i);
Jens Axboe484b4062014-05-21 14:01:15 -06003619
John Garry079a2e32021-10-05 18:23:39 +08003620 if (blk_mq_is_shared_tags(set->flags)) {
3621 blk_mq_free_map_and_rqs(set, set->shared_tags,
John Garrye155b0c2021-10-05 18:23:37 +08003622 BLK_MQ_NO_HCTX_IDX);
3623 }
John Garry32bc15a2020-08-19 23:20:24 +08003624
Jens Axboeb3c661b2018-10-30 10:36:06 -06003625 for (j = 0; j < set->nr_maps; j++) {
3626 kfree(set->map[j].mq_map);
3627 set->map[j].mq_map = NULL;
3628 }
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02003629
Ming Lei981bd182014-04-24 00:07:34 +08003630 kfree(set->tags);
Robert Elliott5676e7b2014-09-02 11:38:44 -05003631 set->tags = NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06003632}
3633EXPORT_SYMBOL(blk_mq_free_tag_set);
3634
Jens Axboee3a2b3f2014-05-20 11:49:02 -06003635int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
3636{
3637 struct blk_mq_tag_set *set = q->tag_set;
3638 struct blk_mq_hw_ctx *hctx;
3639 int i, ret;
3640
Jens Axboebd166ef2017-01-17 06:03:22 -07003641 if (!set)
Jens Axboee3a2b3f2014-05-20 11:49:02 -06003642 return -EINVAL;
3643
Aleksei Zakharove5fa8142019-02-08 19:14:05 +03003644 if (q->nr_requests == nr)
3645 return 0;
3646
Jens Axboe70f36b62017-01-19 10:59:07 -07003647 blk_mq_freeze_queue(q);
Ming Lei24f5a902018-01-06 16:27:38 +08003648 blk_mq_quiesce_queue(q);
Jens Axboe70f36b62017-01-19 10:59:07 -07003649
Jens Axboee3a2b3f2014-05-20 11:49:02 -06003650 ret = 0;
3651 queue_for_each_hw_ctx(q, hctx, i) {
Keith Busche9137d42016-02-18 14:56:35 -07003652 if (!hctx->tags)
3653 continue;
Jens Axboebd166ef2017-01-17 06:03:22 -07003654 /*
3655 * If we're using an MQ scheduler, just update the scheduler
3656 * queue depth. This is similar to what the old code would do.
3657 */
John Garryf6adcef2021-10-05 18:23:29 +08003658 if (hctx->sched_tags) {
Jens Axboe70f36b62017-01-19 10:59:07 -07003659 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
John Garryf6adcef2021-10-05 18:23:29 +08003660 nr, true);
John Garryf6adcef2021-10-05 18:23:29 +08003661 } else {
3662 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
3663 false);
Jens Axboe70f36b62017-01-19 10:59:07 -07003664 }
Jens Axboee3a2b3f2014-05-20 11:49:02 -06003665 if (ret)
3666 break;
Jens Axboe77f1e0a2019-01-18 10:34:16 -07003667 if (q->elevator && q->elevator->type->ops.depth_updated)
3668 q->elevator->type->ops.depth_updated(hctx);
Jens Axboee3a2b3f2014-05-20 11:49:02 -06003669 }
John Garryd97e5942021-05-13 20:00:58 +08003670 if (!ret) {
Jens Axboee3a2b3f2014-05-20 11:49:02 -06003671 q->nr_requests = nr;
John Garry079a2e32021-10-05 18:23:39 +08003672 if (blk_mq_is_shared_tags(set->flags)) {
John Garry8fa04462021-10-05 18:23:28 +08003673 if (q->elevator)
John Garry079a2e32021-10-05 18:23:39 +08003674 blk_mq_tag_update_sched_shared_tags(q);
John Garry8fa04462021-10-05 18:23:28 +08003675 else
John Garry079a2e32021-10-05 18:23:39 +08003676 blk_mq_tag_resize_shared_tags(set, nr);
John Garry8fa04462021-10-05 18:23:28 +08003677 }
John Garryd97e5942021-05-13 20:00:58 +08003678 }
Jens Axboee3a2b3f2014-05-20 11:49:02 -06003679
Ming Lei24f5a902018-01-06 16:27:38 +08003680 blk_mq_unquiesce_queue(q);
Jens Axboe70f36b62017-01-19 10:59:07 -07003681 blk_mq_unfreeze_queue(q);
Jens Axboe70f36b62017-01-19 10:59:07 -07003682
Jens Axboee3a2b3f2014-05-20 11:49:02 -06003683 return ret;
3684}
3685
Jianchao Wangd48ece22018-08-21 15:15:03 +08003686/*
3687 * request_queue and elevator_type pair.
3688 * It is just used by __blk_mq_update_nr_hw_queues to cache
3689 * the elevator_type associated with a request_queue.
3690 */
3691struct blk_mq_qe_pair {
3692 struct list_head node;
3693 struct request_queue *q;
3694 struct elevator_type *type;
3695};
3696
3697/*
3698 * Cache the elevator_type in qe pair list and switch the
3699 * io scheduler to 'none'
3700 */
3701static bool blk_mq_elv_switch_none(struct list_head *head,
3702 struct request_queue *q)
3703{
3704 struct blk_mq_qe_pair *qe;
3705
3706 if (!q->elevator)
3707 return true;
3708
3709 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
3710 if (!qe)
3711 return false;
3712
3713 INIT_LIST_HEAD(&qe->node);
3714 qe->q = q;
3715 qe->type = q->elevator->type;
3716 list_add(&qe->node, head);
3717
3718 mutex_lock(&q->sysfs_lock);
3719 /*
3720 * After elevator_switch_mq, the previous elevator_queue will be
3721 * released by elevator_release. The reference of the io scheduler
3722 * module get by elevator_get will also be put. So we need to get
3723 * a reference of the io scheduler module here to prevent it to be
3724 * removed.
3725 */
3726 __module_get(qe->type->elevator_owner);
3727 elevator_switch_mq(q, NULL);
3728 mutex_unlock(&q->sysfs_lock);
3729
3730 return true;
3731}
3732
3733static void blk_mq_elv_switch_back(struct list_head *head,
3734 struct request_queue *q)
3735{
3736 struct blk_mq_qe_pair *qe;
3737 struct elevator_type *t = NULL;
3738
3739 list_for_each_entry(qe, head, node)
3740 if (qe->q == q) {
3741 t = qe->type;
3742 break;
3743 }
3744
3745 if (!t)
3746 return;
3747
3748 list_del(&qe->node);
3749 kfree(qe);
3750
3751 mutex_lock(&q->sysfs_lock);
3752 elevator_switch_mq(q, t);
3753 mutex_unlock(&q->sysfs_lock);
3754}
3755
Keith Busche4dc2b32017-05-30 14:39:11 -04003756static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
3757 int nr_hw_queues)
Keith Busch868f2f02015-12-17 17:08:14 -07003758{
3759 struct request_queue *q;
Jianchao Wangd48ece22018-08-21 15:15:03 +08003760 LIST_HEAD(head);
Jianchao Wange01ad462018-10-12 18:07:28 +08003761 int prev_nr_hw_queues;
Keith Busch868f2f02015-12-17 17:08:14 -07003762
Bart Van Assche705cda92017-04-07 11:16:49 -07003763 lockdep_assert_held(&set->tag_list_lock);
3764
Jens Axboe392546a2018-10-29 13:25:27 -06003765 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
Keith Busch868f2f02015-12-17 17:08:14 -07003766 nr_hw_queues = nr_cpu_ids;
Weiping Zhangfe35ec52020-06-17 14:18:37 +08003767 if (nr_hw_queues < 1)
3768 return;
3769 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
Keith Busch868f2f02015-12-17 17:08:14 -07003770 return;
3771
3772 list_for_each_entry(q, &set->tag_list, tag_set_list)
3773 blk_mq_freeze_queue(q);
Jianchao Wangd48ece22018-08-21 15:15:03 +08003774 /*
3775 * Switch IO scheduler to 'none', cleaning up the data associated
3776 * with the previous scheduler. We will switch back once we are done
3777 * updating the new sw to hw queue mappings.
3778 */
3779 list_for_each_entry(q, &set->tag_list, tag_set_list)
3780 if (!blk_mq_elv_switch_none(&head, q))
3781 goto switch_back;
Keith Busch868f2f02015-12-17 17:08:14 -07003782
Jianchao Wang477e19d2018-10-12 18:07:25 +08003783 list_for_each_entry(q, &set->tag_list, tag_set_list) {
3784 blk_mq_debugfs_unregister_hctxs(q);
3785 blk_mq_sysfs_unregister(q);
3786 }
3787
Weiping Zhanga2584e42020-05-07 21:03:56 +08003788 prev_nr_hw_queues = set->nr_hw_queues;
Bart Van Asschef7e76db2019-10-25 09:50:10 -07003789 if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
3790 0)
3791 goto reregister;
3792
Keith Busch868f2f02015-12-17 17:08:14 -07003793 set->nr_hw_queues = nr_hw_queues;
Jianchao Wange01ad462018-10-12 18:07:28 +08003794fallback:
Weiping Zhangaa880ad2020-05-13 08:44:05 +08003795 blk_mq_update_queue_map(set);
Keith Busch868f2f02015-12-17 17:08:14 -07003796 list_for_each_entry(q, &set->tag_list, tag_set_list) {
3797 blk_mq_realloc_hw_ctxs(set, q);
Jianchao Wange01ad462018-10-12 18:07:28 +08003798 if (q->nr_hw_queues != set->nr_hw_queues) {
3799 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
3800 nr_hw_queues, prev_nr_hw_queues);
3801 set->nr_hw_queues = prev_nr_hw_queues;
Dongli Zhang7d76f852019-02-27 21:35:01 +08003802 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
Jianchao Wange01ad462018-10-12 18:07:28 +08003803 goto fallback;
3804 }
Jianchao Wang477e19d2018-10-12 18:07:25 +08003805 blk_mq_map_swqueue(q);
3806 }
3807
Bart Van Asschef7e76db2019-10-25 09:50:10 -07003808reregister:
Jianchao Wang477e19d2018-10-12 18:07:25 +08003809 list_for_each_entry(q, &set->tag_list, tag_set_list) {
3810 blk_mq_sysfs_register(q);
3811 blk_mq_debugfs_register_hctxs(q);
Keith Busch868f2f02015-12-17 17:08:14 -07003812 }
3813
Jianchao Wangd48ece22018-08-21 15:15:03 +08003814switch_back:
3815 list_for_each_entry(q, &set->tag_list, tag_set_list)
3816 blk_mq_elv_switch_back(&head, q);
3817
Keith Busch868f2f02015-12-17 17:08:14 -07003818 list_for_each_entry(q, &set->tag_list, tag_set_list)
3819 blk_mq_unfreeze_queue(q);
3820}
Keith Busche4dc2b32017-05-30 14:39:11 -04003821
3822void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
3823{
3824 mutex_lock(&set->tag_list_lock);
3825 __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
3826 mutex_unlock(&set->tag_list_lock);
3827}
Keith Busch868f2f02015-12-17 17:08:14 -07003828EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
3829
Omar Sandoval34dbad52017-03-21 08:56:08 -07003830/* Enable polling stats and return whether they were already enabled. */
3831static bool blk_poll_stats_enable(struct request_queue *q)
3832{
3833 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
Bart Van Assche7dfdbc72018-03-07 17:10:05 -08003834 blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
Omar Sandoval34dbad52017-03-21 08:56:08 -07003835 return true;
3836 blk_stat_add_callback(q, q->poll_cb);
3837 return false;
3838}
3839
3840static void blk_mq_poll_stats_start(struct request_queue *q)
3841{
3842 /*
3843 * We don't arm the callback if polling stats are not enabled or the
3844 * callback is already active.
3845 */
3846 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3847 blk_stat_is_active(q->poll_cb))
3848 return;
3849
3850 blk_stat_activate_msecs(q->poll_cb, 100);
3851}
3852
3853static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
3854{
3855 struct request_queue *q = cb->data;
Stephen Bates720b8cc2017-04-07 06:24:03 -06003856 int bucket;
Omar Sandoval34dbad52017-03-21 08:56:08 -07003857
Stephen Bates720b8cc2017-04-07 06:24:03 -06003858 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
3859 if (cb->stat[bucket].nr_samples)
3860 q->poll_stat[bucket] = cb->stat[bucket];
3861 }
Omar Sandoval34dbad52017-03-21 08:56:08 -07003862}
3863
Jens Axboe64f1c212016-11-14 13:03:03 -07003864static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
Jens Axboe64f1c212016-11-14 13:03:03 -07003865 struct request *rq)
3866{
Jens Axboe64f1c212016-11-14 13:03:03 -07003867 unsigned long ret = 0;
Stephen Bates720b8cc2017-04-07 06:24:03 -06003868 int bucket;
Jens Axboe64f1c212016-11-14 13:03:03 -07003869
3870 /*
3871 * If stats collection isn't on, don't sleep but turn it on for
3872 * future users
3873 */
Omar Sandoval34dbad52017-03-21 08:56:08 -07003874 if (!blk_poll_stats_enable(q))
Jens Axboe64f1c212016-11-14 13:03:03 -07003875 return 0;
3876
3877 /*
Jens Axboe64f1c212016-11-14 13:03:03 -07003878 * As an optimistic guess, use half of the mean service time
3879 * for this type of request. We can (and should) make this smarter.
3880 * For instance, if the completion latencies are tight, we can
3881 * get closer than just half the mean. This is especially
3882 * important on devices where the completion latencies are longer
Stephen Bates720b8cc2017-04-07 06:24:03 -06003883 * than ~10 usec. We do use the stats for the relevant IO size
3884 * if available which does lead to better estimates.
Jens Axboe64f1c212016-11-14 13:03:03 -07003885 */
Stephen Bates720b8cc2017-04-07 06:24:03 -06003886 bucket = blk_mq_poll_stats_bkt(rq);
3887 if (bucket < 0)
3888 return ret;
3889
3890 if (q->poll_stat[bucket].nr_samples)
3891 ret = (q->poll_stat[bucket].mean + 1) / 2;
Jens Axboe64f1c212016-11-14 13:03:03 -07003892
3893 return ret;
3894}
3895
Jens Axboe06426ad2016-11-14 13:01:59 -07003896static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
3897 struct request *rq)
3898{
3899 struct hrtimer_sleeper hs;
3900 enum hrtimer_mode mode;
Jens Axboe64f1c212016-11-14 13:03:03 -07003901 unsigned int nsecs;
Jens Axboe06426ad2016-11-14 13:01:59 -07003902 ktime_t kt;
3903
Jens Axboe76a86f92018-01-10 11:30:56 -07003904 if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
Jens Axboe64f1c212016-11-14 13:03:03 -07003905 return false;
3906
3907 /*
Jens Axboe1052b8a2018-11-26 08:21:49 -07003908 * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
Jens Axboe64f1c212016-11-14 13:03:03 -07003909 *
Jens Axboe64f1c212016-11-14 13:03:03 -07003910 * 0: use half of prev avg
3911 * >0: use this specific value
3912 */
Jens Axboe1052b8a2018-11-26 08:21:49 -07003913 if (q->poll_nsec > 0)
Jens Axboe64f1c212016-11-14 13:03:03 -07003914 nsecs = q->poll_nsec;
3915 else
John Garrycae740a2020-02-26 20:10:15 +08003916 nsecs = blk_mq_poll_nsecs(q, rq);
Jens Axboe64f1c212016-11-14 13:03:03 -07003917
3918 if (!nsecs)
Jens Axboe06426ad2016-11-14 13:01:59 -07003919 return false;
3920
Jens Axboe76a86f92018-01-10 11:30:56 -07003921 rq->rq_flags |= RQF_MQ_POLL_SLEPT;
Jens Axboe06426ad2016-11-14 13:01:59 -07003922
3923 /*
3924 * This will be replaced with the stats tracking code, using
3925 * 'avg_completion_time / 2' as the pre-sleep target.
3926 */
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01003927 kt = nsecs;
Jens Axboe06426ad2016-11-14 13:01:59 -07003928
3929 mode = HRTIMER_MODE_REL;
Sebastian Andrzej Siewiordbc16252019-07-26 20:30:50 +02003930 hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
Jens Axboe06426ad2016-11-14 13:01:59 -07003931 hrtimer_set_expires(&hs.timer, kt);
3932
Jens Axboe06426ad2016-11-14 13:01:59 -07003933 do {
Tejun Heo5a61c362018-01-09 08:29:52 -08003934 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
Jens Axboe06426ad2016-11-14 13:01:59 -07003935 break;
3936 set_current_state(TASK_UNINTERRUPTIBLE);
Thomas Gleixner9dd88132019-07-30 21:16:55 +02003937 hrtimer_sleeper_start_expires(&hs, mode);
Jens Axboe06426ad2016-11-14 13:01:59 -07003938 if (hs.task)
3939 io_schedule();
3940 hrtimer_cancel(&hs.timer);
3941 mode = HRTIMER_MODE_ABS;
3942 } while (hs.task && !signal_pending(current));
3943
3944 __set_current_state(TASK_RUNNING);
3945 destroy_hrtimer_on_stack(&hs.timer);
3946 return true;
3947}
3948
Jens Axboe1052b8a2018-11-26 08:21:49 -07003949static bool blk_mq_poll_hybrid(struct request_queue *q,
3950 struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
Jens Axboebbd7bb72016-11-04 09:34:34 -06003951{
Jens Axboe1052b8a2018-11-26 08:21:49 -07003952 struct request *rq;
3953
Yufen Yu29ece8b2019-03-18 22:44:41 +08003954 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
Jens Axboe1052b8a2018-11-26 08:21:49 -07003955 return false;
3956
3957 if (!blk_qc_t_is_internal(cookie))
3958 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
3959 else {
3960 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
3961 /*
3962 * With scheduling, if the request has completed, we'll
3963 * get a NULL return here, as we clear the sched tag when
3964 * that happens. The request still remains valid, like always,
3965 * so we should be safe with just the NULL check.
3966 */
3967 if (!rq)
3968 return false;
3969 }
3970
John Garrycae740a2020-02-26 20:10:15 +08003971 return blk_mq_poll_hybrid_sleep(q, rq);
Jens Axboe1052b8a2018-11-26 08:21:49 -07003972}
3973
Christoph Hellwig529262d2018-12-02 17:46:26 +01003974/**
3975 * blk_poll - poll for IO completions
3976 * @q: the queue
3977 * @cookie: cookie passed back at IO submission time
3978 * @spin: whether to spin for completions
3979 *
3980 * Description:
3981 * Poll for completions on the passed in queue. Returns number of
3982 * completed entries found. If @spin is true, then blk_poll will continue
3983 * looping until at least one completion is found, unless the task is
3984 * otherwise marked running (or we need to reschedule).
3985 */
3986int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
Jens Axboe1052b8a2018-11-26 08:21:49 -07003987{
3988 struct blk_mq_hw_ctx *hctx;
Peter Zijlstra2f064a52021-06-11 10:28:17 +02003989 unsigned int state;
Jens Axboebbd7bb72016-11-04 09:34:34 -06003990
Christoph Hellwig529262d2018-12-02 17:46:26 +01003991 if (!blk_qc_t_valid(cookie) ||
3992 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
Jens Axboe1052b8a2018-11-26 08:21:49 -07003993 return 0;
3994
Christoph Hellwig529262d2018-12-02 17:46:26 +01003995 if (current->plug)
3996 blk_flush_plug_list(current->plug, false);
3997
Jens Axboe1052b8a2018-11-26 08:21:49 -07003998 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
3999
Jens Axboe06426ad2016-11-14 13:01:59 -07004000 /*
4001 * If we sleep, have the caller restart the poll loop to reset
4002 * the state. Like for the other success return cases, the
4003 * caller is responsible for checking if the IO completed. If
4004 * the IO isn't complete, we'll get called again and will go
Pavel Begunkovf6f371f2020-12-06 14:04:39 +00004005 * straight to the busy poll loop. If specified not to spin,
4006 * we also should not sleep.
Jens Axboe06426ad2016-11-14 13:01:59 -07004007 */
Pavel Begunkovf6f371f2020-12-06 14:04:39 +00004008 if (spin && blk_mq_poll_hybrid(q, hctx, cookie))
Jens Axboe85f4d4b2018-11-06 13:30:55 -07004009 return 1;
Jens Axboe06426ad2016-11-14 13:01:59 -07004010
Jens Axboebbd7bb72016-11-04 09:34:34 -06004011 hctx->poll_considered++;
4012
Peter Zijlstrad6c23bb2021-06-11 10:28:14 +02004013 state = get_current_state();
Jens Axboeaa61bec2018-11-13 21:32:10 -07004014 do {
Jens Axboebbd7bb72016-11-04 09:34:34 -06004015 int ret;
4016
4017 hctx->poll_invoked++;
4018
Jens Axboe97431392018-11-16 09:48:21 -07004019 ret = q->mq_ops->poll(hctx);
Jens Axboebbd7bb72016-11-04 09:34:34 -06004020 if (ret > 0) {
4021 hctx->poll_success++;
Jens Axboe849a3702018-11-16 08:37:34 -07004022 __set_current_state(TASK_RUNNING);
Jens Axboe85f4d4b2018-11-06 13:30:55 -07004023 return ret;
Jens Axboebbd7bb72016-11-04 09:34:34 -06004024 }
4025
4026 if (signal_pending_state(state, current))
Jens Axboe849a3702018-11-16 08:37:34 -07004027 __set_current_state(TASK_RUNNING);
Jens Axboebbd7bb72016-11-04 09:34:34 -06004028
Peter Zijlstrab03fbd42021-06-11 10:28:12 +02004029 if (task_is_running(current))
Jens Axboe85f4d4b2018-11-06 13:30:55 -07004030 return 1;
Jens Axboe0a1b8b82018-11-26 08:24:43 -07004031 if (ret < 0 || !spin)
Jens Axboebbd7bb72016-11-04 09:34:34 -06004032 break;
4033 cpu_relax();
Jens Axboeaa61bec2018-11-13 21:32:10 -07004034 } while (!need_resched());
Jens Axboebbd7bb72016-11-04 09:34:34 -06004035
Nitesh Shetty67b41102018-02-13 21:18:12 +05304036 __set_current_state(TASK_RUNNING);
Jens Axboe85f4d4b2018-11-06 13:30:55 -07004037 return 0;
Jens Axboebbd7bb72016-11-04 09:34:34 -06004038}
Christoph Hellwig529262d2018-12-02 17:46:26 +01004039EXPORT_SYMBOL_GPL(blk_poll);
Jens Axboebbd7bb72016-11-04 09:34:34 -06004040
Jens Axboe9cf2bab2018-10-31 17:01:22 -06004041unsigned int blk_mq_rq_cpu(struct request *rq)
4042{
4043 return rq->mq_ctx->cpu;
4044}
4045EXPORT_SYMBOL(blk_mq_rq_cpu);
4046
Jens Axboe320ae512013-10-24 09:20:05 +01004047static int __init blk_mq_init(void)
4048{
Christoph Hellwigc3077b52020-06-11 08:44:41 +02004049 int i;
4050
4051 for_each_possible_cpu(i)
Sebastian Andrzej Siewiorf9ab4912021-01-23 21:10:27 +01004052 init_llist_head(&per_cpu(blk_cpu_done, i));
Christoph Hellwigc3077b52020-06-11 08:44:41 +02004053 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
4054
4055 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
4056 "block/softirq:dead", NULL,
4057 blk_softirq_cpu_dead);
Thomas Gleixner9467f852016-09-22 08:05:17 -06004058 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
4059 blk_mq_hctx_notify_dead);
Ming Leibf0beec2020-05-29 15:53:15 +02004060 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
4061 blk_mq_hctx_notify_online,
4062 blk_mq_hctx_notify_offline);
Jens Axboe320ae512013-10-24 09:20:05 +01004063 return 0;
4064}
4065subsys_initcall(blk_mq_init);