blob: e383a20809f46c9575fbf7d39a824c1c2f486419 [file] [log] [blame]
Jens Axboe75bb4622014-05-28 10:15:41 -06001/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
Jens Axboe320ae512013-10-24 09:20:05 +01007#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/backing-dev.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
Catalin Marinasf75782e2015-09-14 18:16:02 +010012#include <linux/kmemleak.h>
Jens Axboe320ae512013-10-24 09:20:05 +010013#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/workqueue.h>
17#include <linux/smp.h>
18#include <linux/llist.h>
19#include <linux/list_sort.h>
20#include <linux/cpu.h>
21#include <linux/cache.h>
22#include <linux/sched/sysctl.h>
Ingo Molnar105ab3d2017-02-01 16:36:40 +010023#include <linux/sched/topology.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
Jens Axboe320ae512013-10-24 09:20:05 +010025#include <linux/delay.h>
Jens Axboeaedcd722014-09-17 08:27:03 -060026#include <linux/crash_dump.h>
Jens Axboe88c7b2b2016-08-25 08:07:30 -060027#include <linux/prefetch.h>
Jens Axboe320ae512013-10-24 09:20:05 +010028
29#include <trace/events/block.h>
30
31#include <linux/blk-mq.h>
32#include "blk.h"
33#include "blk-mq.h"
Omar Sandoval9c1051a2017-05-04 08:17:21 -060034#include "blk-mq-debugfs.h"
Jens Axboe320ae512013-10-24 09:20:05 +010035#include "blk-mq-tag.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -070036#include "blk-stat.h"
Jens Axboe87760e52016-11-09 12:38:14 -070037#include "blk-wbt.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070038#include "blk-mq-sched.h"
Jens Axboe320ae512013-10-24 09:20:05 +010039
Christoph Hellwigea435e12017-11-02 21:29:54 +030040static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
Omar Sandoval34dbad52017-03-21 08:56:08 -070041static void blk_mq_poll_stats_start(struct request_queue *q);
42static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
43
Stephen Bates720b8cc2017-04-07 06:24:03 -060044static int blk_mq_poll_stats_bkt(const struct request *rq)
45{
46 int ddir, bytes, bucket;
47
Jens Axboe99c749a2017-04-21 07:55:42 -060048 ddir = rq_data_dir(rq);
Stephen Bates720b8cc2017-04-07 06:24:03 -060049 bytes = blk_rq_bytes(rq);
50
51 bucket = ddir + 2*(ilog2(bytes) - 9);
52
53 if (bucket < 0)
54 return -1;
55 else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
56 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
57
58 return bucket;
59}
60
Jens Axboe320ae512013-10-24 09:20:05 +010061/*
62 * Check if any of the ctx's have pending work in this hardware queue
63 */
Jens Axboe79f720a2017-11-10 09:13:21 -070064static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
Jens Axboe320ae512013-10-24 09:20:05 +010065{
Jens Axboe79f720a2017-11-10 09:13:21 -070066 return !list_empty_careful(&hctx->dispatch) ||
67 sbitmap_any_bit_set(&hctx->ctx_map) ||
Jens Axboebd166ef2017-01-17 06:03:22 -070068 blk_mq_sched_has_work(hctx);
Jens Axboe320ae512013-10-24 09:20:05 +010069}
70
71/*
72 * Mark this ctx as having pending work in this hardware queue
73 */
74static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
75 struct blk_mq_ctx *ctx)
76{
Omar Sandoval88459642016-09-17 08:38:44 -060077 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
78 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
Jens Axboe1429d7c2014-05-19 09:23:55 -060079}
80
81static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
82 struct blk_mq_ctx *ctx)
83{
Omar Sandoval88459642016-09-17 08:38:44 -060084 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
Jens Axboe320ae512013-10-24 09:20:05 +010085}
86
Jens Axboef299b7c2017-08-08 17:51:45 -060087struct mq_inflight {
88 struct hd_struct *part;
89 unsigned int *inflight;
90};
91
92static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
93 struct request *rq, void *priv,
94 bool reserved)
95{
96 struct mq_inflight *mi = priv;
97
Tejun Heo67818d22018-01-09 08:29:49 -080098 if (blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) {
Jens Axboef299b7c2017-08-08 17:51:45 -060099 /*
Jens Axboeb8d62b32017-08-08 17:53:33 -0600100 * index[0] counts the specific partition that was asked
101 * for. index[1] counts the ones that are active on the
102 * whole device, so increment that if mi->part is indeed
103 * a partition, and not a whole device.
Jens Axboef299b7c2017-08-08 17:51:45 -0600104 */
Jens Axboeb8d62b32017-08-08 17:53:33 -0600105 if (rq->part == mi->part)
Jens Axboef299b7c2017-08-08 17:51:45 -0600106 mi->inflight[0]++;
Jens Axboeb8d62b32017-08-08 17:53:33 -0600107 if (mi->part->partno)
108 mi->inflight[1]++;
Jens Axboef299b7c2017-08-08 17:51:45 -0600109 }
110}
111
112void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
113 unsigned int inflight[2])
114{
115 struct mq_inflight mi = { .part = part, .inflight = inflight, };
116
Jens Axboeb8d62b32017-08-08 17:53:33 -0600117 inflight[0] = inflight[1] = 0;
Jens Axboef299b7c2017-08-08 17:51:45 -0600118 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
119}
120
Ming Lei1671d522017-03-27 20:06:57 +0800121void blk_freeze_queue_start(struct request_queue *q)
Ming Lei43a5e4e2013-12-26 21:31:35 +0800122{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200123 int freeze_depth;
Tejun Heocddd5d12014-08-16 08:02:24 -0400124
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200125 freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
126 if (freeze_depth == 1) {
Dan Williams3ef28e82015-10-21 13:20:12 -0400127 percpu_ref_kill(&q->q_usage_counter);
Ming Lei055f6e12017-11-09 10:49:53 -0800128 if (q->mq_ops)
129 blk_mq_run_hw_queues(q, false);
Tejun Heocddd5d12014-08-16 08:02:24 -0400130 }
Tejun Heof3af0202014-11-04 13:52:27 -0500131}
Ming Lei1671d522017-03-27 20:06:57 +0800132EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
Tejun Heof3af0202014-11-04 13:52:27 -0500133
Keith Busch6bae3632017-03-01 14:22:10 -0500134void blk_mq_freeze_queue_wait(struct request_queue *q)
Tejun Heof3af0202014-11-04 13:52:27 -0500135{
Dan Williams3ef28e82015-10-21 13:20:12 -0400136 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
Ming Lei43a5e4e2013-12-26 21:31:35 +0800137}
Keith Busch6bae3632017-03-01 14:22:10 -0500138EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
Ming Lei43a5e4e2013-12-26 21:31:35 +0800139
Keith Buschf91328c2017-03-01 14:22:11 -0500140int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
141 unsigned long timeout)
142{
143 return wait_event_timeout(q->mq_freeze_wq,
144 percpu_ref_is_zero(&q->q_usage_counter),
145 timeout);
146}
147EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
Jens Axboe320ae512013-10-24 09:20:05 +0100148
Tejun Heof3af0202014-11-04 13:52:27 -0500149/*
150 * Guarantee no request is in use, so we can change any data structure of
151 * the queue afterward.
152 */
Dan Williams3ef28e82015-10-21 13:20:12 -0400153void blk_freeze_queue(struct request_queue *q)
Tejun Heof3af0202014-11-04 13:52:27 -0500154{
Dan Williams3ef28e82015-10-21 13:20:12 -0400155 /*
156 * In the !blk_mq case we are only calling this to kill the
157 * q_usage_counter, otherwise this increases the freeze depth
158 * and waits for it to return to zero. For this reason there is
159 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
160 * exported to drivers as the only user for unfreeze is blk_mq.
161 */
Ming Lei1671d522017-03-27 20:06:57 +0800162 blk_freeze_queue_start(q);
Tejun Heof3af0202014-11-04 13:52:27 -0500163 blk_mq_freeze_queue_wait(q);
164}
Dan Williams3ef28e82015-10-21 13:20:12 -0400165
166void blk_mq_freeze_queue(struct request_queue *q)
167{
168 /*
169 * ...just an alias to keep freeze and unfreeze actions balanced
170 * in the blk_mq_* namespace
171 */
172 blk_freeze_queue(q);
173}
Jens Axboec761d962015-01-02 15:05:12 -0700174EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
Tejun Heof3af0202014-11-04 13:52:27 -0500175
Keith Buschb4c6a022014-12-19 17:54:14 -0700176void blk_mq_unfreeze_queue(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +0100177{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200178 int freeze_depth;
Jens Axboe320ae512013-10-24 09:20:05 +0100179
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200180 freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
181 WARN_ON_ONCE(freeze_depth < 0);
182 if (!freeze_depth) {
Dan Williams3ef28e82015-10-21 13:20:12 -0400183 percpu_ref_reinit(&q->q_usage_counter);
Jens Axboe320ae512013-10-24 09:20:05 +0100184 wake_up_all(&q->mq_freeze_wq);
Tejun Heoadd703f2014-07-01 10:34:38 -0600185 }
Jens Axboe320ae512013-10-24 09:20:05 +0100186}
Keith Buschb4c6a022014-12-19 17:54:14 -0700187EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
Jens Axboe320ae512013-10-24 09:20:05 +0100188
Bart Van Assche852ec802017-06-21 10:55:47 -0700189/*
190 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
191 * mpt3sas driver such that this function can be removed.
192 */
193void blk_mq_quiesce_queue_nowait(struct request_queue *q)
194{
195 unsigned long flags;
196
197 spin_lock_irqsave(q->queue_lock, flags);
198 queue_flag_set(QUEUE_FLAG_QUIESCED, q);
199 spin_unlock_irqrestore(q->queue_lock, flags);
200}
201EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
202
Bart Van Assche6a83e742016-11-02 10:09:51 -0600203/**
Ming Lei69e07c42017-06-06 23:22:07 +0800204 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
Bart Van Assche6a83e742016-11-02 10:09:51 -0600205 * @q: request queue.
206 *
207 * Note: this function does not prevent that the struct request end_io()
Ming Lei69e07c42017-06-06 23:22:07 +0800208 * callback function is invoked. Once this function is returned, we make
209 * sure no dispatch can happen until the queue is unquiesced via
210 * blk_mq_unquiesce_queue().
Bart Van Assche6a83e742016-11-02 10:09:51 -0600211 */
212void blk_mq_quiesce_queue(struct request_queue *q)
213{
214 struct blk_mq_hw_ctx *hctx;
215 unsigned int i;
216 bool rcu = false;
217
Ming Lei1d9e9bc2017-06-06 23:22:08 +0800218 blk_mq_quiesce_queue_nowait(q);
Ming Leif4560ff2017-06-18 14:24:27 -0600219
Bart Van Assche6a83e742016-11-02 10:09:51 -0600220 queue_for_each_hw_ctx(q, hctx, i) {
221 if (hctx->flags & BLK_MQ_F_BLOCKING)
Tejun Heo05707b62018-01-09 08:29:53 -0800222 synchronize_srcu(hctx->srcu);
Bart Van Assche6a83e742016-11-02 10:09:51 -0600223 else
224 rcu = true;
225 }
226 if (rcu)
227 synchronize_rcu();
228}
229EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
230
Ming Leie4e73912017-06-06 23:22:03 +0800231/*
232 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
233 * @q: request queue.
234 *
235 * This function recovers queue into the state before quiescing
236 * which is done by blk_mq_quiesce_queue.
237 */
238void blk_mq_unquiesce_queue(struct request_queue *q)
239{
Bart Van Assche852ec802017-06-21 10:55:47 -0700240 unsigned long flags;
241
242 spin_lock_irqsave(q->queue_lock, flags);
Ming Leif4560ff2017-06-18 14:24:27 -0600243 queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
Bart Van Assche852ec802017-06-21 10:55:47 -0700244 spin_unlock_irqrestore(q->queue_lock, flags);
Ming Leif4560ff2017-06-18 14:24:27 -0600245
Ming Lei1d9e9bc2017-06-06 23:22:08 +0800246 /* dispatch requests which are inserted during quiescing */
247 blk_mq_run_hw_queues(q, true);
Ming Leie4e73912017-06-06 23:22:03 +0800248}
249EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
250
Jens Axboeaed3ea92014-12-22 14:04:42 -0700251void blk_mq_wake_waiters(struct request_queue *q)
252{
253 struct blk_mq_hw_ctx *hctx;
254 unsigned int i;
255
256 queue_for_each_hw_ctx(q, hctx, i)
257 if (blk_mq_hw_queue_mapped(hctx))
258 blk_mq_tag_wakeup_all(hctx->tags, true);
259}
260
Jens Axboe320ae512013-10-24 09:20:05 +0100261bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
262{
263 return blk_mq_has_free_tags(hctx->tags);
264}
265EXPORT_SYMBOL(blk_mq_can_queue);
266
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200267static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
268 unsigned int tag, unsigned int op)
Jens Axboe320ae512013-10-24 09:20:05 +0100269{
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200270 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
271 struct request *rq = tags->static_rqs[tag];
Jens Axboebf9ae8c2018-01-14 10:40:45 -0700272 req_flags_t rq_flags = 0;
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200273
274 if (data->flags & BLK_MQ_REQ_INTERNAL) {
275 rq->tag = -1;
276 rq->internal_tag = tag;
277 } else {
278 if (blk_mq_tag_busy(data->hctx)) {
Jens Axboebf9ae8c2018-01-14 10:40:45 -0700279 rq_flags = RQF_MQ_INFLIGHT;
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200280 atomic_inc(&data->hctx->nr_active);
281 }
282 rq->tag = tag;
283 rq->internal_tag = -1;
284 data->hctx->tags->rqs[rq->tag] = rq;
285 }
286
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200287 /* csd/requeue_work/fifo_time is initialized before use */
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200288 rq->q = data->q;
289 rq->mq_ctx = data->ctx;
Jens Axboebf9ae8c2018-01-14 10:40:45 -0700290 rq->rq_flags = rq_flags;
Jens Axboe7c3fb702018-01-10 11:46:39 -0700291 rq->cpu = -1;
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600292 rq->cmd_flags = op;
Bart Van Assche1b6d65a2017-11-09 10:49:55 -0800293 if (data->flags & BLK_MQ_REQ_PREEMPT)
294 rq->rq_flags |= RQF_PREEMPT;
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200295 if (blk_queue_io_stat(data->q))
Christoph Hellwige8064022016-10-20 15:12:13 +0200296 rq->rq_flags |= RQF_IO_STAT;
Jens Axboe7c3fb702018-01-10 11:46:39 -0700297 INIT_LIST_HEAD(&rq->queuelist);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200298 INIT_HLIST_NODE(&rq->hash);
299 RB_CLEAR_NODE(&rq->rb_node);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200300 rq->rq_disk = NULL;
301 rq->part = NULL;
Jens Axboe3ee32372014-06-09 09:36:53 -0600302 rq->start_time = jiffies;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200303 rq->nr_phys_segments = 0;
304#if defined(CONFIG_BLK_DEV_INTEGRITY)
305 rq->nr_integrity_segments = 0;
306#endif
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200307 rq->special = NULL;
308 /* tag was already set */
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200309 rq->extra_len = 0;
Jens Axboee14575b32018-01-10 11:34:25 -0700310 rq->__deadline = 0;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200311
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200312 INIT_LIST_HEAD(&rq->timeout_list);
Jens Axboef6be4fb2014-06-06 11:03:48 -0600313 rq->timeout = 0;
314
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200315 rq->end_io = NULL;
316 rq->end_io_data = NULL;
317 rq->next_rq = NULL;
318
Jens Axboe7c3fb702018-01-10 11:46:39 -0700319#ifdef CONFIG_BLK_CGROUP
320 rq->rl = NULL;
321 set_start_time_ns(rq);
322 rq->io_start_time_ns = 0;
323#endif
324
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200325 data->ctx->rq_dispatched[op_is_sync(op)]++;
326 return rq;
Jens Axboe320ae512013-10-24 09:20:05 +0100327}
328
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200329static struct request *blk_mq_get_request(struct request_queue *q,
330 struct bio *bio, unsigned int op,
331 struct blk_mq_alloc_data *data)
332{
333 struct elevator_queue *e = q->elevator;
334 struct request *rq;
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200335 unsigned int tag;
Bart Van Assche21e768b2017-10-16 16:32:26 -0700336 bool put_ctx_on_error = false;
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200337
338 blk_queue_enter_live(q);
339 data->q = q;
Bart Van Assche21e768b2017-10-16 16:32:26 -0700340 if (likely(!data->ctx)) {
341 data->ctx = blk_mq_get_ctx(q);
342 put_ctx_on_error = true;
343 }
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200344 if (likely(!data->hctx))
345 data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -0500346 if (op & REQ_NOWAIT)
347 data->flags |= BLK_MQ_REQ_NOWAIT;
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200348
349 if (e) {
350 data->flags |= BLK_MQ_REQ_INTERNAL;
351
352 /*
353 * Flush requests are special and go directly to the
354 * dispatch list.
355 */
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200356 if (!op_is_flush(op) && e->type->ops.mq.limit_depth)
357 e->type->ops.mq.limit_depth(op, data);
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200358 }
359
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200360 tag = blk_mq_get_tag(data);
361 if (tag == BLK_MQ_TAG_FAIL) {
Bart Van Assche21e768b2017-10-16 16:32:26 -0700362 if (put_ctx_on_error) {
363 blk_mq_put_ctx(data->ctx);
Ming Lei1ad43c02017-08-02 08:01:45 +0800364 data->ctx = NULL;
365 }
Christoph Hellwig037cebb2017-06-16 18:15:23 +0200366 blk_queue_exit(q);
367 return NULL;
368 }
369
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200370 rq = blk_mq_rq_ctx_init(data, tag, op);
Christoph Hellwig037cebb2017-06-16 18:15:23 +0200371 if (!op_is_flush(op)) {
372 rq->elv.icq = NULL;
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200373 if (e && e->type->ops.mq.prepare_request) {
Christoph Hellwig44e8c2b2017-06-16 18:15:25 +0200374 if (e->type->icq_cache && rq_ioc(bio))
375 blk_mq_sched_assign_ioc(rq, bio);
376
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200377 e->type->ops.mq.prepare_request(rq, bio);
378 rq->rq_flags |= RQF_ELVPRIV;
Christoph Hellwig44e8c2b2017-06-16 18:15:25 +0200379 }
Christoph Hellwig037cebb2017-06-16 18:15:23 +0200380 }
381 data->hctx->queued++;
382 return rq;
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200383}
384
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700385struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800386 blk_mq_req_flags_t flags)
Jens Axboe320ae512013-10-24 09:20:05 +0100387{
Jens Axboe5a797e02017-01-26 12:22:11 -0700388 struct blk_mq_alloc_data alloc_data = { .flags = flags };
Jens Axboebd166ef2017-01-17 06:03:22 -0700389 struct request *rq;
Joe Lawrencea492f072014-08-28 08:15:21 -0600390 int ret;
Jens Axboe320ae512013-10-24 09:20:05 +0100391
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800392 ret = blk_queue_enter(q, flags);
Joe Lawrencea492f072014-08-28 08:15:21 -0600393 if (ret)
394 return ERR_PTR(ret);
Jens Axboe320ae512013-10-24 09:20:05 +0100395
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700396 rq = blk_mq_get_request(q, NULL, op, &alloc_data);
Keith Busch3280d662017-08-14 16:40:11 -0400397 blk_queue_exit(q);
Jens Axboe841bac22016-09-21 10:08:43 -0600398
Jens Axboebd166ef2017-01-17 06:03:22 -0700399 if (!rq)
Joe Lawrencea492f072014-08-28 08:15:21 -0600400 return ERR_PTR(-EWOULDBLOCK);
Christoph Hellwig0c4de0f2016-07-19 11:31:50 +0200401
Ming Lei1ad43c02017-08-02 08:01:45 +0800402 blk_mq_put_ctx(alloc_data.ctx);
Ming Lei1ad43c02017-08-02 08:01:45 +0800403
Christoph Hellwig0c4de0f2016-07-19 11:31:50 +0200404 rq->__data_len = 0;
405 rq->__sector = (sector_t) -1;
406 rq->bio = rq->biotail = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +0100407 return rq;
408}
Jens Axboe4bb659b2014-05-09 09:36:49 -0600409EXPORT_SYMBOL(blk_mq_alloc_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100410
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700411struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800412 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
Ming Lin1f5bd332016-06-13 16:45:21 +0200413{
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800414 struct blk_mq_alloc_data alloc_data = { .flags = flags };
Ming Lin1f5bd332016-06-13 16:45:21 +0200415 struct request *rq;
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800416 unsigned int cpu;
Ming Lin1f5bd332016-06-13 16:45:21 +0200417 int ret;
418
419 /*
420 * If the tag allocator sleeps we could get an allocation for a
421 * different hardware context. No need to complicate the low level
422 * allocator for this for the rare use case of a command tied to
423 * a specific queue.
424 */
425 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
426 return ERR_PTR(-EINVAL);
427
428 if (hctx_idx >= q->nr_hw_queues)
429 return ERR_PTR(-EIO);
430
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800431 ret = blk_queue_enter(q, flags);
Ming Lin1f5bd332016-06-13 16:45:21 +0200432 if (ret)
433 return ERR_PTR(ret);
434
Christoph Hellwigc8712c62016-09-23 10:25:48 -0600435 /*
436 * Check if the hardware context is actually mapped to anything.
437 * If not tell the caller that it should skip this queue.
438 */
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800439 alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
440 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
441 blk_queue_exit(q);
442 return ERR_PTR(-EXDEV);
Christoph Hellwigc8712c62016-09-23 10:25:48 -0600443 }
Christoph Hellwig20e4d8132018-01-12 10:53:06 +0800444 cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800445 alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
Ming Lin1f5bd332016-06-13 16:45:21 +0200446
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700447 rq = blk_mq_get_request(q, NULL, op, &alloc_data);
Keith Busch3280d662017-08-14 16:40:11 -0400448 blk_queue_exit(q);
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800449
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800450 if (!rq)
451 return ERR_PTR(-EWOULDBLOCK);
Ming Lin1f5bd332016-06-13 16:45:21 +0200452
453 return rq;
454}
455EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
456
Christoph Hellwig6af54052017-06-16 18:15:22 +0200457void blk_mq_free_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100458{
Jens Axboe320ae512013-10-24 09:20:05 +0100459 struct request_queue *q = rq->q;
Christoph Hellwig6af54052017-06-16 18:15:22 +0200460 struct elevator_queue *e = q->elevator;
461 struct blk_mq_ctx *ctx = rq->mq_ctx;
462 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
463 const int sched_tag = rq->internal_tag;
Jens Axboe320ae512013-10-24 09:20:05 +0100464
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200465 if (rq->rq_flags & RQF_ELVPRIV) {
Christoph Hellwig6af54052017-06-16 18:15:22 +0200466 if (e && e->type->ops.mq.finish_request)
467 e->type->ops.mq.finish_request(rq);
468 if (rq->elv.icq) {
469 put_io_context(rq->elv.icq->ioc);
470 rq->elv.icq = NULL;
471 }
472 }
473
474 ctx->rq_completed[rq_is_sync(rq)]++;
Christoph Hellwige8064022016-10-20 15:12:13 +0200475 if (rq->rq_flags & RQF_MQ_INFLIGHT)
Jens Axboe0d2602c2014-05-13 15:10:52 -0600476 atomic_dec(&hctx->nr_active);
Jens Axboe87760e52016-11-09 12:38:14 -0700477
Jens Axboe7beb2f82017-09-30 02:08:24 -0600478 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
479 laptop_io_completion(q->backing_dev_info);
480
Jens Axboe87760e52016-11-09 12:38:14 -0700481 wbt_done(q->rq_wb, &rq->issue_stat);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600482
Shaohua Li85acb3b2017-10-06 17:56:00 -0700483 if (blk_rq_rl(rq))
484 blk_put_rl(blk_rq_rl(rq));
485
Tejun Heo1d9bd512018-01-09 08:29:48 -0800486 blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
Jens Axboebd166ef2017-01-17 06:03:22 -0700487 if (rq->tag != -1)
488 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
489 if (sched_tag != -1)
Omar Sandovalc05f8522017-04-14 01:00:01 -0700490 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
Bart Van Assche6d8c6c02017-04-07 12:40:09 -0600491 blk_mq_sched_restart(hctx);
Dan Williams3ef28e82015-10-21 13:20:12 -0400492 blk_queue_exit(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100493}
Jens Axboe1a3b5952014-11-17 10:40:48 -0700494EXPORT_SYMBOL_GPL(blk_mq_free_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100495
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200496inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
Jens Axboe320ae512013-10-24 09:20:05 +0100497{
Ming Lei0d11e6a2013-12-05 10:50:39 -0700498 blk_account_io_done(rq);
499
Christoph Hellwig91b63632014-04-16 09:44:53 +0200500 if (rq->end_io) {
Jens Axboe87760e52016-11-09 12:38:14 -0700501 wbt_done(rq->q->rq_wb, &rq->issue_stat);
Jens Axboe320ae512013-10-24 09:20:05 +0100502 rq->end_io(rq, error);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200503 } else {
504 if (unlikely(blk_bidi_rq(rq)))
505 blk_mq_free_request(rq->next_rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100506 blk_mq_free_request(rq);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200507 }
Jens Axboe320ae512013-10-24 09:20:05 +0100508}
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700509EXPORT_SYMBOL(__blk_mq_end_request);
Christoph Hellwig63151a42014-04-16 09:44:52 +0200510
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200511void blk_mq_end_request(struct request *rq, blk_status_t error)
Christoph Hellwig63151a42014-04-16 09:44:52 +0200512{
513 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
514 BUG();
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700515 __blk_mq_end_request(rq, error);
Christoph Hellwig63151a42014-04-16 09:44:52 +0200516}
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700517EXPORT_SYMBOL(blk_mq_end_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100518
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800519static void __blk_mq_complete_request_remote(void *data)
Jens Axboe320ae512013-10-24 09:20:05 +0100520{
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800521 struct request *rq = data;
Jens Axboe320ae512013-10-24 09:20:05 +0100522
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800523 rq->q->softirq_done_fn(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100524}
525
Christoph Hellwig453f8342017-04-20 16:03:10 +0200526static void __blk_mq_complete_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100527{
528 struct blk_mq_ctx *ctx = rq->mq_ctx;
Christoph Hellwig38535202014-04-25 02:32:53 -0700529 bool shared = false;
Jens Axboe320ae512013-10-24 09:20:05 +0100530 int cpu;
531
Tejun Heo1d9bd512018-01-09 08:29:48 -0800532 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT);
Tejun Heo5a61c362018-01-09 08:29:52 -0800533 blk_mq_rq_update_state(rq, MQ_RQ_COMPLETE);
Tejun Heo1d9bd512018-01-09 08:29:48 -0800534
Christoph Hellwig453f8342017-04-20 16:03:10 +0200535 if (rq->internal_tag != -1)
536 blk_mq_sched_completed_request(rq);
537 if (rq->rq_flags & RQF_STATS) {
538 blk_mq_poll_stats_start(rq->q);
539 blk_stat_add(rq);
540 }
541
Christoph Hellwig38535202014-04-25 02:32:53 -0700542 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800543 rq->q->softirq_done_fn(rq);
544 return;
545 }
Jens Axboe320ae512013-10-24 09:20:05 +0100546
547 cpu = get_cpu();
Christoph Hellwig38535202014-04-25 02:32:53 -0700548 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
549 shared = cpus_share_cache(cpu, ctx->cpu);
550
551 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800552 rq->csd.func = __blk_mq_complete_request_remote;
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800553 rq->csd.info = rq;
554 rq->csd.flags = 0;
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +0100555 smp_call_function_single_async(ctx->cpu, &rq->csd);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800556 } else {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800557 rq->q->softirq_done_fn(rq);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800558 }
Jens Axboe320ae512013-10-24 09:20:05 +0100559 put_cpu();
560}
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800561
Jens Axboe04ced152018-01-09 08:29:46 -0800562static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
Bart Van Asscheb7435db2018-01-10 11:34:27 -0800563 __releases(hctx->srcu)
Jens Axboe04ced152018-01-09 08:29:46 -0800564{
565 if (!(hctx->flags & BLK_MQ_F_BLOCKING))
566 rcu_read_unlock();
567 else
Tejun Heo05707b62018-01-09 08:29:53 -0800568 srcu_read_unlock(hctx->srcu, srcu_idx);
Jens Axboe04ced152018-01-09 08:29:46 -0800569}
570
571static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
Bart Van Asscheb7435db2018-01-10 11:34:27 -0800572 __acquires(hctx->srcu)
Jens Axboe04ced152018-01-09 08:29:46 -0800573{
Jens Axboe08b5a6e2018-01-09 09:32:25 -0700574 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
575 /* shut up gcc false positive */
576 *srcu_idx = 0;
Jens Axboe04ced152018-01-09 08:29:46 -0800577 rcu_read_lock();
Jens Axboe08b5a6e2018-01-09 09:32:25 -0700578 } else
Tejun Heo05707b62018-01-09 08:29:53 -0800579 *srcu_idx = srcu_read_lock(hctx->srcu);
Jens Axboe04ced152018-01-09 08:29:46 -0800580}
581
Tejun Heo1d9bd512018-01-09 08:29:48 -0800582static void blk_mq_rq_update_aborted_gstate(struct request *rq, u64 gstate)
583{
584 unsigned long flags;
585
586 /*
587 * blk_mq_rq_aborted_gstate() is used from the completion path and
588 * can thus be called from irq context. u64_stats_fetch in the
589 * middle of update on the same CPU leads to lockup. Disable irq
590 * while updating.
591 */
592 local_irq_save(flags);
593 u64_stats_update_begin(&rq->aborted_gstate_sync);
594 rq->aborted_gstate = gstate;
595 u64_stats_update_end(&rq->aborted_gstate_sync);
596 local_irq_restore(flags);
597}
598
599static u64 blk_mq_rq_aborted_gstate(struct request *rq)
600{
601 unsigned int start;
602 u64 aborted_gstate;
603
604 do {
605 start = u64_stats_fetch_begin(&rq->aborted_gstate_sync);
606 aborted_gstate = rq->aborted_gstate;
607 } while (u64_stats_fetch_retry(&rq->aborted_gstate_sync, start));
608
609 return aborted_gstate;
610}
611
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800612/**
613 * blk_mq_complete_request - end I/O on a request
614 * @rq: the request being processed
615 *
616 * Description:
617 * Ends all I/O on a request. It does not handle partial completions.
618 * The actual completion happens out-of-order, through a IPI handler.
619 **/
Christoph Hellwig08e00292017-04-20 16:03:09 +0200620void blk_mq_complete_request(struct request *rq)
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800621{
Jens Axboe95f09682014-05-27 17:46:48 -0600622 struct request_queue *q = rq->q;
Tejun Heo5197c052018-01-09 08:29:47 -0800623 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
624 int srcu_idx;
Jens Axboe95f09682014-05-27 17:46:48 -0600625
626 if (unlikely(blk_should_fake_timeout(q)))
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800627 return;
Tejun Heo5197c052018-01-09 08:29:47 -0800628
Tejun Heo1d9bd512018-01-09 08:29:48 -0800629 /*
630 * If @rq->aborted_gstate equals the current instance, timeout is
631 * claiming @rq and we lost. This is synchronized through
632 * hctx_lock(). See blk_mq_timeout_work() for details.
633 *
634 * Completion path never blocks and we can directly use RCU here
635 * instead of hctx_lock() which can be either RCU or SRCU.
636 * However, that would complicate paths which want to synchronize
637 * against us. Let stay in sync with the issue path so that
638 * hctx_lock() covers both issue and completion paths.
639 */
Tejun Heo5197c052018-01-09 08:29:47 -0800640 hctx_lock(hctx, &srcu_idx);
Tejun Heo634f9e42018-01-09 08:29:51 -0800641 if (blk_mq_rq_aborted_gstate(rq) != rq->gstate)
Jens Axboeed851862014-05-30 21:20:50 -0600642 __blk_mq_complete_request(rq);
Tejun Heo5197c052018-01-09 08:29:47 -0800643 hctx_unlock(hctx, srcu_idx);
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800644}
645EXPORT_SYMBOL(blk_mq_complete_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100646
Keith Busch973c0192015-01-07 18:55:43 -0700647int blk_mq_request_started(struct request *rq)
648{
Tejun Heo5a61c362018-01-09 08:29:52 -0800649 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
Keith Busch973c0192015-01-07 18:55:43 -0700650}
651EXPORT_SYMBOL_GPL(blk_mq_request_started);
652
Christoph Hellwige2490072014-09-13 16:40:09 -0700653void blk_mq_start_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100654{
655 struct request_queue *q = rq->q;
656
Jens Axboebd166ef2017-01-17 06:03:22 -0700657 blk_mq_sched_started_request(rq);
658
Jens Axboe320ae512013-10-24 09:20:05 +0100659 trace_block_rq_issue(q, rq);
660
Jens Axboecf43e6b2016-11-07 21:32:37 -0700661 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
Shaohua Li88eeca42017-03-27 15:19:41 -0700662 blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq));
Jens Axboecf43e6b2016-11-07 21:32:37 -0700663 rq->rq_flags |= RQF_STATS;
Jens Axboe87760e52016-11-09 12:38:14 -0700664 wbt_issue(q->rq_wb, &rq->issue_stat);
Jens Axboecf43e6b2016-11-07 21:32:37 -0700665 }
666
Tejun Heo1d9bd512018-01-09 08:29:48 -0800667 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
Jens Axboe538b7532014-09-16 10:37:37 -0600668
669 /*
Tejun Heo1d9bd512018-01-09 08:29:48 -0800670 * Mark @rq in-flight which also advances the generation number,
671 * and register for timeout. Protect with a seqcount to allow the
672 * timeout path to read both @rq->gstate and @rq->deadline
673 * coherently.
Peter Zijlstraa7af0af2017-09-06 10:00:22 +0200674 *
Tejun Heo1d9bd512018-01-09 08:29:48 -0800675 * This is the only place where a request is marked in-flight. If
676 * the timeout path reads an in-flight @rq->gstate, the
677 * @rq->deadline it reads together under @rq->gstate_seq is
678 * guaranteed to be the matching one.
Jens Axboe87ee7b12014-04-24 08:51:47 -0600679 */
Tejun Heo1d9bd512018-01-09 08:29:48 -0800680 preempt_disable();
681 write_seqcount_begin(&rq->gstate_seq);
682
683 blk_mq_rq_update_state(rq, MQ_RQ_IN_FLIGHT);
684 blk_add_timer(rq);
685
686 write_seqcount_end(&rq->gstate_seq);
687 preempt_enable();
688
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800689 if (q->dma_drain_size && blk_rq_bytes(rq)) {
690 /*
691 * Make sure space for the drain appears. We know we can do
692 * this because max_hw_segments has been adjusted to be one
693 * fewer than the device can handle.
694 */
695 rq->nr_phys_segments++;
696 }
Jens Axboe320ae512013-10-24 09:20:05 +0100697}
Christoph Hellwige2490072014-09-13 16:40:09 -0700698EXPORT_SYMBOL(blk_mq_start_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100699
Ming Leid9d149a2017-03-27 20:06:55 +0800700/*
Tejun Heo5a61c362018-01-09 08:29:52 -0800701 * When we reach here because queue is busy, it's safe to change the state
702 * to IDLE without checking @rq->aborted_gstate because we should still be
703 * holding the RCU read lock and thus protected against timeout.
Ming Leid9d149a2017-03-27 20:06:55 +0800704 */
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200705static void __blk_mq_requeue_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100706{
707 struct request_queue *q = rq->q;
708
Ming Lei923218f2017-11-02 23:24:38 +0800709 blk_mq_put_driver_tag(rq);
710
Jens Axboe320ae512013-10-24 09:20:05 +0100711 trace_block_rq_requeue(q, rq);
Jens Axboe87760e52016-11-09 12:38:14 -0700712 wbt_requeue(q->rq_wb, &rq->issue_stat);
Jens Axboebd166ef2017-01-17 06:03:22 -0700713 blk_mq_sched_requeue_request(rq);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800714
Tejun Heo5a61c362018-01-09 08:29:52 -0800715 if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) {
Tejun Heo1d9bd512018-01-09 08:29:48 -0800716 blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
Christoph Hellwige2490072014-09-13 16:40:09 -0700717 if (q->dma_drain_size && blk_rq_bytes(rq))
718 rq->nr_phys_segments--;
719 }
Jens Axboe320ae512013-10-24 09:20:05 +0100720}
721
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700722void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200723{
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200724 __blk_mq_requeue_request(rq);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200725
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200726 BUG_ON(blk_queued_rq(rq));
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700727 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200728}
729EXPORT_SYMBOL(blk_mq_requeue_request);
730
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600731static void blk_mq_requeue_work(struct work_struct *work)
732{
733 struct request_queue *q =
Mike Snitzer28494502016-09-14 13:28:30 -0400734 container_of(work, struct request_queue, requeue_work.work);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600735 LIST_HEAD(rq_list);
736 struct request *rq, *next;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600737
Jens Axboe18e97812017-07-27 08:03:57 -0600738 spin_lock_irq(&q->requeue_lock);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600739 list_splice_init(&q->requeue_list, &rq_list);
Jens Axboe18e97812017-07-27 08:03:57 -0600740 spin_unlock_irq(&q->requeue_lock);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600741
742 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
Christoph Hellwige8064022016-10-20 15:12:13 +0200743 if (!(rq->rq_flags & RQF_SOFTBARRIER))
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600744 continue;
745
Christoph Hellwige8064022016-10-20 15:12:13 +0200746 rq->rq_flags &= ~RQF_SOFTBARRIER;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600747 list_del_init(&rq->queuelist);
Jens Axboebd6737f2017-01-27 01:00:47 -0700748 blk_mq_sched_insert_request(rq, true, false, false, true);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600749 }
750
751 while (!list_empty(&rq_list)) {
752 rq = list_entry(rq_list.next, struct request, queuelist);
753 list_del_init(&rq->queuelist);
Jens Axboebd6737f2017-01-27 01:00:47 -0700754 blk_mq_sched_insert_request(rq, false, false, false, true);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600755 }
756
Bart Van Assche52d7f1b2016-10-28 17:20:32 -0700757 blk_mq_run_hw_queues(q, false);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600758}
759
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700760void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
761 bool kick_requeue_list)
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600762{
763 struct request_queue *q = rq->q;
764 unsigned long flags;
765
766 /*
767 * We abuse this flag that is otherwise used by the I/O scheduler to
Jens Axboeff821d22017-11-10 22:05:12 -0700768 * request head insertion from the workqueue.
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600769 */
Christoph Hellwige8064022016-10-20 15:12:13 +0200770 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600771
772 spin_lock_irqsave(&q->requeue_lock, flags);
773 if (at_head) {
Christoph Hellwige8064022016-10-20 15:12:13 +0200774 rq->rq_flags |= RQF_SOFTBARRIER;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600775 list_add(&rq->queuelist, &q->requeue_list);
776 } else {
777 list_add_tail(&rq->queuelist, &q->requeue_list);
778 }
779 spin_unlock_irqrestore(&q->requeue_lock, flags);
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700780
781 if (kick_requeue_list)
782 blk_mq_kick_requeue_list(q);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600783}
784EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
785
786void blk_mq_kick_requeue_list(struct request_queue *q)
787{
Mike Snitzer28494502016-09-14 13:28:30 -0400788 kblockd_schedule_delayed_work(&q->requeue_work, 0);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600789}
790EXPORT_SYMBOL(blk_mq_kick_requeue_list);
791
Mike Snitzer28494502016-09-14 13:28:30 -0400792void blk_mq_delay_kick_requeue_list(struct request_queue *q,
793 unsigned long msecs)
794{
Bart Van Assched4acf362017-08-09 11:28:06 -0700795 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
796 msecs_to_jiffies(msecs));
Mike Snitzer28494502016-09-14 13:28:30 -0400797}
798EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
799
Jens Axboe0e62f512014-06-04 10:23:49 -0600800struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
801{
Jens Axboe88c7b2b2016-08-25 08:07:30 -0600802 if (tag < tags->nr_tags) {
803 prefetch(tags->rqs[tag]);
Hannes Reinecke4ee86ba2016-03-15 12:03:28 -0700804 return tags->rqs[tag];
Jens Axboe88c7b2b2016-08-25 08:07:30 -0600805 }
Hannes Reinecke4ee86ba2016-03-15 12:03:28 -0700806
807 return NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600808}
809EXPORT_SYMBOL(blk_mq_tag_to_rq);
810
Jens Axboe320ae512013-10-24 09:20:05 +0100811struct blk_mq_timeout_data {
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700812 unsigned long next;
813 unsigned int next_set;
Tejun Heo1d9bd512018-01-09 08:29:48 -0800814 unsigned int nr_expired;
Jens Axboe320ae512013-10-24 09:20:05 +0100815};
816
Tejun Heo358f70d2018-01-09 08:29:50 -0800817static void blk_mq_rq_timed_out(struct request *req, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100818{
Jens Axboef8a5b122016-12-13 09:24:51 -0700819 const struct blk_mq_ops *ops = req->q->mq_ops;
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700820 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
Jens Axboe87ee7b12014-04-24 08:51:47 -0600821
Tejun Heo634f9e42018-01-09 08:29:51 -0800822 req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED;
823
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700824 if (ops->timeout)
Christoph Hellwig0152fb62014-09-13 16:40:13 -0700825 ret = ops->timeout(req, reserved);
Jens Axboe87ee7b12014-04-24 08:51:47 -0600826
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700827 switch (ret) {
828 case BLK_EH_HANDLED:
829 __blk_mq_complete_request(req);
830 break;
831 case BLK_EH_RESET_TIMER:
Tejun Heo1d9bd512018-01-09 08:29:48 -0800832 /*
833 * As nothing prevents from completion happening while
834 * ->aborted_gstate is set, this may lead to ignored
835 * completions and further spurious timeouts.
836 */
837 blk_mq_rq_update_aborted_gstate(req, 0);
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700838 blk_add_timer(req);
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700839 break;
840 case BLK_EH_NOT_HANDLED:
841 break;
842 default:
843 printk(KERN_ERR "block: bad eh return: %d\n", ret);
844 break;
845 }
Jens Axboe87ee7b12014-04-24 08:51:47 -0600846}
Keith Busch5b3f25f2015-01-07 18:55:46 -0700847
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700848static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
849 struct request *rq, void *priv, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100850{
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700851 struct blk_mq_timeout_data *data = priv;
Tejun Heo1d9bd512018-01-09 08:29:48 -0800852 unsigned long gstate, deadline;
853 int start;
854
855 might_sleep();
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700856
Tejun Heo5a61c362018-01-09 08:29:52 -0800857 if (rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED)
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700858 return;
Jens Axboe320ae512013-10-24 09:20:05 +0100859
Tejun Heo1d9bd512018-01-09 08:29:48 -0800860 /* read coherent snapshots of @rq->state_gen and @rq->deadline */
861 while (true) {
862 start = read_seqcount_begin(&rq->gstate_seq);
863 gstate = READ_ONCE(rq->gstate);
Jens Axboe0a72e7f2018-01-09 14:23:42 -0700864 deadline = blk_rq_deadline(rq);
Tejun Heo1d9bd512018-01-09 08:29:48 -0800865 if (!read_seqcount_retry(&rq->gstate_seq, start))
866 break;
867 cond_resched();
868 }
Peter Zijlstraa7af0af2017-09-06 10:00:22 +0200869
Tejun Heo1d9bd512018-01-09 08:29:48 -0800870 /* if in-flight && overdue, mark for abortion */
871 if ((gstate & MQ_RQ_STATE_MASK) == MQ_RQ_IN_FLIGHT &&
872 time_after_eq(jiffies, deadline)) {
873 blk_mq_rq_update_aborted_gstate(rq, gstate);
874 data->nr_expired++;
875 hctx->nr_expired++;
Peter Zijlstraa7af0af2017-09-06 10:00:22 +0200876 } else if (!data->next_set || time_after(data->next, deadline)) {
877 data->next = deadline;
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700878 data->next_set = 1;
879 }
Jens Axboe320ae512013-10-24 09:20:05 +0100880}
881
Tejun Heo1d9bd512018-01-09 08:29:48 -0800882static void blk_mq_terminate_expired(struct blk_mq_hw_ctx *hctx,
883 struct request *rq, void *priv, bool reserved)
884{
885 /*
886 * We marked @rq->aborted_gstate and waited for RCU. If there were
887 * completions that we lost to, they would have finished and
888 * updated @rq->gstate by now; otherwise, the completion path is
889 * now guaranteed to see @rq->aborted_gstate and yield. If
890 * @rq->aborted_gstate still matches @rq->gstate, @rq is ours.
891 */
Tejun Heo634f9e42018-01-09 08:29:51 -0800892 if (!(rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) &&
893 READ_ONCE(rq->gstate) == rq->aborted_gstate)
Tejun Heo1d9bd512018-01-09 08:29:48 -0800894 blk_mq_rq_timed_out(rq, reserved);
895}
896
Christoph Hellwig287922e2015-10-30 20:57:30 +0800897static void blk_mq_timeout_work(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +0100898{
Christoph Hellwig287922e2015-10-30 20:57:30 +0800899 struct request_queue *q =
900 container_of(work, struct request_queue, timeout_work);
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700901 struct blk_mq_timeout_data data = {
902 .next = 0,
903 .next_set = 0,
Tejun Heo1d9bd512018-01-09 08:29:48 -0800904 .nr_expired = 0,
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700905 };
Tejun Heo1d9bd512018-01-09 08:29:48 -0800906 struct blk_mq_hw_ctx *hctx;
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700907 int i;
Jens Axboe320ae512013-10-24 09:20:05 +0100908
Gabriel Krisman Bertazi71f79fb2016-08-01 08:23:39 -0600909 /* A deadlock might occur if a request is stuck requiring a
910 * timeout at the same time a queue freeze is waiting
911 * completion, since the timeout code would not be able to
912 * acquire the queue reference here.
913 *
914 * That's why we don't use blk_queue_enter here; instead, we use
915 * percpu_ref_tryget directly, because we need to be able to
916 * obtain a reference even in the short window between the queue
917 * starting to freeze, by dropping the first reference in
Ming Lei1671d522017-03-27 20:06:57 +0800918 * blk_freeze_queue_start, and the moment the last request is
Gabriel Krisman Bertazi71f79fb2016-08-01 08:23:39 -0600919 * consumed, marked by the instant q_usage_counter reaches
920 * zero.
921 */
922 if (!percpu_ref_tryget(&q->q_usage_counter))
Christoph Hellwig287922e2015-10-30 20:57:30 +0800923 return;
924
Tejun Heo1d9bd512018-01-09 08:29:48 -0800925 /* scan for the expired ones and set their ->aborted_gstate */
Christoph Hellwig0bf6cd52015-09-27 21:01:51 +0200926 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
Jens Axboe320ae512013-10-24 09:20:05 +0100927
Tejun Heo1d9bd512018-01-09 08:29:48 -0800928 if (data.nr_expired) {
929 bool has_rcu = false;
930
931 /*
932 * Wait till everyone sees ->aborted_gstate. The
933 * sequential waits for SRCUs aren't ideal. If this ever
934 * becomes a problem, we can add per-hw_ctx rcu_head and
935 * wait in parallel.
936 */
937 queue_for_each_hw_ctx(q, hctx, i) {
938 if (!hctx->nr_expired)
939 continue;
940
941 if (!(hctx->flags & BLK_MQ_F_BLOCKING))
942 has_rcu = true;
943 else
Tejun Heo05707b62018-01-09 08:29:53 -0800944 synchronize_srcu(hctx->srcu);
Tejun Heo1d9bd512018-01-09 08:29:48 -0800945
946 hctx->nr_expired = 0;
947 }
948 if (has_rcu)
949 synchronize_rcu();
950
951 /* terminate the ones we won */
952 blk_mq_queue_tag_busy_iter(q, blk_mq_terminate_expired, NULL);
953 }
954
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700955 if (data.next_set) {
956 data.next = blk_rq_timeout(round_jiffies_up(data.next));
957 mod_timer(&q->timeout, data.next);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600958 } else {
Bart Van Asschefcd36c32018-01-10 08:33:33 -0800959 /*
960 * Request timeouts are handled as a forward rolling timer. If
961 * we end up here it means that no requests are pending and
962 * also that no request has been pending for a while. Mark
963 * each hctx as idle.
964 */
Ming Leif054b562015-04-21 10:00:19 +0800965 queue_for_each_hw_ctx(q, hctx, i) {
966 /* the hctx may be unmapped, so check it here */
967 if (blk_mq_hw_queue_mapped(hctx))
968 blk_mq_tag_idle(hctx);
969 }
Jens Axboe0d2602c2014-05-13 15:10:52 -0600970 }
Christoph Hellwig287922e2015-10-30 20:57:30 +0800971 blk_queue_exit(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100972}
973
Omar Sandoval88459642016-09-17 08:38:44 -0600974struct flush_busy_ctx_data {
975 struct blk_mq_hw_ctx *hctx;
976 struct list_head *list;
977};
978
979static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
980{
981 struct flush_busy_ctx_data *flush_data = data;
982 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
983 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
984
985 sbitmap_clear_bit(sb, bitnr);
986 spin_lock(&ctx->lock);
987 list_splice_tail_init(&ctx->rq_list, flush_data->list);
988 spin_unlock(&ctx->lock);
989 return true;
990}
991
Jens Axboe320ae512013-10-24 09:20:05 +0100992/*
Jens Axboe1429d7c2014-05-19 09:23:55 -0600993 * Process software queues that have been marked busy, splicing them
994 * to the for-dispatch
995 */
Jens Axboe2c3ad662016-12-14 14:34:47 -0700996void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
Jens Axboe1429d7c2014-05-19 09:23:55 -0600997{
Omar Sandoval88459642016-09-17 08:38:44 -0600998 struct flush_busy_ctx_data data = {
999 .hctx = hctx,
1000 .list = list,
1001 };
Jens Axboe1429d7c2014-05-19 09:23:55 -06001002
Omar Sandoval88459642016-09-17 08:38:44 -06001003 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
Jens Axboe1429d7c2014-05-19 09:23:55 -06001004}
Jens Axboe2c3ad662016-12-14 14:34:47 -07001005EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
Jens Axboe1429d7c2014-05-19 09:23:55 -06001006
Ming Leib3476892017-10-14 17:22:30 +08001007struct dispatch_rq_data {
1008 struct blk_mq_hw_ctx *hctx;
1009 struct request *rq;
1010};
1011
1012static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1013 void *data)
1014{
1015 struct dispatch_rq_data *dispatch_data = data;
1016 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1017 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1018
1019 spin_lock(&ctx->lock);
1020 if (unlikely(!list_empty(&ctx->rq_list))) {
1021 dispatch_data->rq = list_entry_rq(ctx->rq_list.next);
1022 list_del_init(&dispatch_data->rq->queuelist);
1023 if (list_empty(&ctx->rq_list))
1024 sbitmap_clear_bit(sb, bitnr);
1025 }
1026 spin_unlock(&ctx->lock);
1027
1028 return !dispatch_data->rq;
1029}
1030
1031struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1032 struct blk_mq_ctx *start)
1033{
1034 unsigned off = start ? start->index_hw : 0;
1035 struct dispatch_rq_data data = {
1036 .hctx = hctx,
1037 .rq = NULL,
1038 };
1039
1040 __sbitmap_for_each_set(&hctx->ctx_map, off,
1041 dispatch_rq_from_ctx, &data);
1042
1043 return data.rq;
1044}
1045
Jens Axboe703fd1c2016-09-16 13:59:14 -06001046static inline unsigned int queued_to_index(unsigned int queued)
1047{
1048 if (!queued)
1049 return 0;
Jens Axboe1429d7c2014-05-19 09:23:55 -06001050
Jens Axboe703fd1c2016-09-16 13:59:14 -06001051 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
Jens Axboe1429d7c2014-05-19 09:23:55 -06001052}
1053
Jens Axboebd6737f2017-01-27 01:00:47 -07001054bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
1055 bool wait)
Jens Axboebd166ef2017-01-17 06:03:22 -07001056{
1057 struct blk_mq_alloc_data data = {
1058 .q = rq->q,
Jens Axboebd166ef2017-01-17 06:03:22 -07001059 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
1060 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
1061 };
1062
Jens Axboe5feeacd2017-04-20 17:23:13 -06001063 might_sleep_if(wait);
1064
Omar Sandoval81380ca2017-04-07 08:56:26 -06001065 if (rq->tag != -1)
1066 goto done;
Jens Axboebd166ef2017-01-17 06:03:22 -07001067
Sagi Grimberg415b8062017-02-27 10:04:39 -07001068 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
1069 data.flags |= BLK_MQ_REQ_RESERVED;
1070
Jens Axboebd166ef2017-01-17 06:03:22 -07001071 rq->tag = blk_mq_get_tag(&data);
1072 if (rq->tag >= 0) {
Jens Axboe200e86b2017-01-25 08:11:38 -07001073 if (blk_mq_tag_busy(data.hctx)) {
1074 rq->rq_flags |= RQF_MQ_INFLIGHT;
1075 atomic_inc(&data.hctx->nr_active);
1076 }
Jens Axboebd166ef2017-01-17 06:03:22 -07001077 data.hctx->tags->rqs[rq->tag] = rq;
Jens Axboebd166ef2017-01-17 06:03:22 -07001078 }
1079
Omar Sandoval81380ca2017-04-07 08:56:26 -06001080done:
1081 if (hctx)
1082 *hctx = data.hctx;
1083 return rq->tag != -1;
Jens Axboebd166ef2017-01-17 06:03:22 -07001084}
1085
Jens Axboeeb619fd2017-11-09 08:32:43 -07001086static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1087 int flags, void *key)
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001088{
1089 struct blk_mq_hw_ctx *hctx;
1090
1091 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1092
Jens Axboeeb619fd2017-11-09 08:32:43 -07001093 list_del_init(&wait->entry);
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001094 blk_mq_run_hw_queue(hctx, true);
1095 return 1;
1096}
1097
Jens Axboef906a6a2017-11-09 16:10:13 -07001098/*
1099 * Mark us waiting for a tag. For shared tags, this involves hooking us into
Bart Van Asscheee3e4de2018-01-09 10:09:15 -08001100 * the tag wakeups. For non-shared tags, we can simply mark us needing a
1101 * restart. For both cases, take care to check the condition again after
Jens Axboef906a6a2017-11-09 16:10:13 -07001102 * marking us as waiting.
1103 */
1104static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
1105 struct request *rq)
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001106{
Jens Axboeeb619fd2017-11-09 08:32:43 -07001107 struct blk_mq_hw_ctx *this_hctx = *hctx;
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001108 struct sbq_wait_state *ws;
Jens Axboef906a6a2017-11-09 16:10:13 -07001109 wait_queue_entry_t *wait;
1110 bool ret;
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001111
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001112 if (!(this_hctx->flags & BLK_MQ_F_TAG_SHARED)) {
Jens Axboef906a6a2017-11-09 16:10:13 -07001113 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state))
1114 set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state);
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001115
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001116 /*
1117 * It's possible that a tag was freed in the window between the
1118 * allocation failure and adding the hardware queue to the wait
1119 * queue.
1120 *
1121 * Don't clear RESTART here, someone else could have set it.
1122 * At most this will cost an extra queue run.
1123 */
1124 return blk_mq_get_driver_tag(rq, hctx, false);
Jens Axboeeb619fd2017-11-09 08:32:43 -07001125 }
1126
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001127 wait = &this_hctx->dispatch_wait;
1128 if (!list_empty_careful(&wait->entry))
1129 return false;
1130
1131 spin_lock(&this_hctx->lock);
1132 if (!list_empty(&wait->entry)) {
1133 spin_unlock(&this_hctx->lock);
1134 return false;
1135 }
1136
1137 ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx);
1138 add_wait_queue(&ws->wait, wait);
1139
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001140 /*
Jens Axboeeb619fd2017-11-09 08:32:43 -07001141 * It's possible that a tag was freed in the window between the
1142 * allocation failure and adding the hardware queue to the wait
1143 * queue.
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001144 */
Jens Axboef906a6a2017-11-09 16:10:13 -07001145 ret = blk_mq_get_driver_tag(rq, hctx, false);
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001146 if (!ret) {
Jens Axboef906a6a2017-11-09 16:10:13 -07001147 spin_unlock(&this_hctx->lock);
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001148 return false;
Jens Axboef906a6a2017-11-09 16:10:13 -07001149 }
Bart Van Asschec27d53f2018-01-10 13:41:21 -08001150
1151 /*
1152 * We got a tag, remove ourselves from the wait queue to ensure
1153 * someone else gets the wakeup.
1154 */
1155 spin_lock_irq(&ws->wait.lock);
1156 list_del_init(&wait->entry);
1157 spin_unlock_irq(&ws->wait.lock);
1158 spin_unlock(&this_hctx->lock);
1159
1160 return true;
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001161}
1162
Ming Leide148292017-10-14 17:22:29 +08001163bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
Jens Axboeeb619fd2017-11-09 08:32:43 -07001164 bool got_budget)
Jens Axboef04c3df2016-12-07 08:41:17 -07001165{
Omar Sandoval81380ca2017-04-07 08:56:26 -06001166 struct blk_mq_hw_ctx *hctx;
Jianchao Wang6d6f167c2017-11-02 23:24:32 +08001167 struct request *rq, *nxt;
Jens Axboeeb619fd2017-11-09 08:32:43 -07001168 bool no_tag = false;
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001169 int errors, queued;
Jens Axboef04c3df2016-12-07 08:41:17 -07001170
Omar Sandoval81380ca2017-04-07 08:56:26 -06001171 if (list_empty(list))
1172 return false;
1173
Ming Leide148292017-10-14 17:22:29 +08001174 WARN_ON(!list_is_singular(list) && got_budget);
1175
Jens Axboef04c3df2016-12-07 08:41:17 -07001176 /*
Jens Axboef04c3df2016-12-07 08:41:17 -07001177 * Now process all the entries, sending them to the driver.
1178 */
Jens Axboe93efe982017-03-24 12:04:19 -06001179 errors = queued = 0;
Omar Sandoval81380ca2017-04-07 08:56:26 -06001180 do {
Jens Axboef04c3df2016-12-07 08:41:17 -07001181 struct blk_mq_queue_data bd;
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001182 blk_status_t ret;
Jens Axboef04c3df2016-12-07 08:41:17 -07001183
1184 rq = list_first_entry(list, struct request, queuelist);
Jens Axboebd166ef2017-01-17 06:03:22 -07001185 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
Jens Axboe3c782d62017-01-26 12:50:36 -07001186 /*
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001187 * The initial allocation attempt failed, so we need to
Jens Axboeeb619fd2017-11-09 08:32:43 -07001188 * rerun the hardware queue when a tag is freed. The
1189 * waitqueue takes care of that. If the queue is run
1190 * before we add this entry back on the dispatch list,
1191 * we'll re-run it below.
Jens Axboe3c782d62017-01-26 12:50:36 -07001192 */
Jens Axboef906a6a2017-11-09 16:10:13 -07001193 if (!blk_mq_mark_tag_wait(&hctx, rq)) {
Ming Leide148292017-10-14 17:22:29 +08001194 if (got_budget)
1195 blk_mq_put_dispatch_budget(hctx);
Jens Axboef906a6a2017-11-09 16:10:13 -07001196 /*
1197 * For non-shared tags, the RESTART check
1198 * will suffice.
1199 */
1200 if (hctx->flags & BLK_MQ_F_TAG_SHARED)
1201 no_tag = true;
Omar Sandoval807b1042017-04-05 12:01:35 -07001202 break;
Ming Leide148292017-10-14 17:22:29 +08001203 }
1204 }
1205
Ming Lei0c6af1c2017-11-08 09:11:22 +08001206 if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
1207 blk_mq_put_driver_tag(rq);
Ming Lei88022d72017-11-05 02:21:12 +08001208 break;
Ming Lei0c6af1c2017-11-08 09:11:22 +08001209 }
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001210
Jens Axboef04c3df2016-12-07 08:41:17 -07001211 list_del_init(&rq->queuelist);
1212
1213 bd.rq = rq;
Jens Axboe113285b2017-03-02 13:26:04 -07001214
1215 /*
1216 * Flag last if we have no more requests, or if we have more
1217 * but can't assign a driver tag to it.
1218 */
1219 if (list_empty(list))
1220 bd.last = true;
1221 else {
Jens Axboe113285b2017-03-02 13:26:04 -07001222 nxt = list_first_entry(list, struct request, queuelist);
1223 bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1224 }
Jens Axboef04c3df2016-12-07 08:41:17 -07001225
1226 ret = q->mq_ops->queue_rq(hctx, &bd);
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001227 if (ret == BLK_STS_RESOURCE) {
Jianchao Wang6d6f167c2017-11-02 23:24:32 +08001228 /*
1229 * If an I/O scheduler has been configured and we got a
Jens Axboeff821d22017-11-10 22:05:12 -07001230 * driver tag for the next request already, free it
1231 * again.
Jianchao Wang6d6f167c2017-11-02 23:24:32 +08001232 */
1233 if (!list_empty(list)) {
1234 nxt = list_first_entry(list, struct request, queuelist);
1235 blk_mq_put_driver_tag(nxt);
1236 }
Jens Axboef04c3df2016-12-07 08:41:17 -07001237 list_add(&rq->queuelist, list);
1238 __blk_mq_requeue_request(rq);
1239 break;
Jens Axboef04c3df2016-12-07 08:41:17 -07001240 }
1241
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001242 if (unlikely(ret != BLK_STS_OK)) {
1243 errors++;
1244 blk_mq_end_request(rq, BLK_STS_IOERR);
1245 continue;
1246 }
1247
1248 queued++;
Omar Sandoval81380ca2017-04-07 08:56:26 -06001249 } while (!list_empty(list));
Jens Axboef04c3df2016-12-07 08:41:17 -07001250
1251 hctx->dispatched[queued_to_index(queued)]++;
1252
1253 /*
1254 * Any items that need requeuing? Stuff them into hctx->dispatch,
1255 * that is where we will continue on next queue run.
1256 */
1257 if (!list_empty(list)) {
1258 spin_lock(&hctx->lock);
Jens Axboec13660a2017-01-26 12:40:07 -07001259 list_splice_init(list, &hctx->dispatch);
Jens Axboef04c3df2016-12-07 08:41:17 -07001260 spin_unlock(&hctx->lock);
1261
1262 /*
Bart Van Assche710c7852017-04-07 11:16:51 -07001263 * If SCHED_RESTART was set by the caller of this function and
1264 * it is no longer set that means that it was cleared by another
1265 * thread and hence that a queue rerun is needed.
Jens Axboef04c3df2016-12-07 08:41:17 -07001266 *
Jens Axboeeb619fd2017-11-09 08:32:43 -07001267 * If 'no_tag' is set, that means that we failed getting
1268 * a driver tag with an I/O scheduler attached. If our dispatch
1269 * waitqueue is no longer active, ensure that we run the queue
1270 * AFTER adding our entries back to the list.
Jens Axboebd166ef2017-01-17 06:03:22 -07001271 *
Bart Van Assche710c7852017-04-07 11:16:51 -07001272 * If no I/O scheduler has been configured it is possible that
1273 * the hardware queue got stopped and restarted before requests
1274 * were pushed back onto the dispatch list. Rerun the queue to
1275 * avoid starvation. Notes:
1276 * - blk_mq_run_hw_queue() checks whether or not a queue has
1277 * been stopped before rerunning a queue.
1278 * - Some but not all block drivers stop a queue before
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001279 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
Bart Van Assche710c7852017-04-07 11:16:51 -07001280 * and dm-rq.
Jens Axboebd166ef2017-01-17 06:03:22 -07001281 */
Jens Axboeeb619fd2017-11-09 08:32:43 -07001282 if (!blk_mq_sched_needs_restart(hctx) ||
1283 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
Jens Axboebd166ef2017-01-17 06:03:22 -07001284 blk_mq_run_hw_queue(hctx, true);
Jens Axboef04c3df2016-12-07 08:41:17 -07001285 }
1286
Jens Axboe93efe982017-03-24 12:04:19 -06001287 return (queued + errors) != 0;
Jens Axboef04c3df2016-12-07 08:41:17 -07001288}
1289
Bart Van Assche6a83e742016-11-02 10:09:51 -06001290static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1291{
1292 int srcu_idx;
1293
Jens Axboeb7a71e62017-08-01 09:28:24 -06001294 /*
1295 * We should be running this queue from one of the CPUs that
1296 * are mapped to it.
Ming Lei7df938f2018-01-18 00:41:52 +08001297 *
1298 * There are at least two related races now between setting
1299 * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
1300 * __blk_mq_run_hw_queue():
1301 *
1302 * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
1303 * but later it becomes online, then this warning is harmless
1304 * at all
1305 *
1306 * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
1307 * but later it becomes offline, then the warning can't be
1308 * triggered, and we depend on blk-mq timeout handler to
1309 * handle dispatched requests to this hctx
Jens Axboeb7a71e62017-08-01 09:28:24 -06001310 */
Ming Lei7df938f2018-01-18 00:41:52 +08001311 if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1312 cpu_online(hctx->next_cpu)) {
1313 printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
1314 raw_smp_processor_id(),
1315 cpumask_empty(hctx->cpumask) ? "inactive": "active");
1316 dump_stack();
1317 }
Bart Van Assche6a83e742016-11-02 10:09:51 -06001318
Jens Axboeb7a71e62017-08-01 09:28:24 -06001319 /*
1320 * We can't run the queue inline with ints disabled. Ensure that
1321 * we catch bad users of this early.
1322 */
1323 WARN_ON_ONCE(in_interrupt());
1324
Jens Axboe04ced152018-01-09 08:29:46 -08001325 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
Jens Axboebf4907c2017-03-30 12:30:39 -06001326
Jens Axboe04ced152018-01-09 08:29:46 -08001327 hctx_lock(hctx, &srcu_idx);
1328 blk_mq_sched_dispatch_requests(hctx);
1329 hctx_unlock(hctx, srcu_idx);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001330}
1331
Jens Axboe506e9312014-05-07 10:26:44 -06001332/*
1333 * It'd be great if the workqueue API had a way to pass
1334 * in a mask and had some smarts for more clever placement.
1335 * For now we just round-robin here, switching for every
1336 * BLK_MQ_CPU_WORK_BATCH queued items.
1337 */
1338static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1339{
Ming Lei7bed4592018-01-18 00:41:51 +08001340 bool tried = false;
1341
Christoph Hellwigb657d7e2014-11-24 09:27:23 +01001342 if (hctx->queue->nr_hw_queues == 1)
1343 return WORK_CPU_UNBOUND;
Jens Axboe506e9312014-05-07 10:26:44 -06001344
1345 if (--hctx->next_cpu_batch <= 0) {
Gabriel Krisman Bertazic02ebfd2016-09-28 00:24:24 -03001346 int next_cpu;
Ming Lei7bed4592018-01-18 00:41:51 +08001347select_cpu:
Christoph Hellwig20e4d8132018-01-12 10:53:06 +08001348 next_cpu = cpumask_next_and(hctx->next_cpu, hctx->cpumask,
1349 cpu_online_mask);
Jens Axboe506e9312014-05-07 10:26:44 -06001350 if (next_cpu >= nr_cpu_ids)
Christoph Hellwig20e4d8132018-01-12 10:53:06 +08001351 next_cpu = cpumask_first_and(hctx->cpumask,cpu_online_mask);
Jens Axboe506e9312014-05-07 10:26:44 -06001352
Ming Lei7bed4592018-01-18 00:41:51 +08001353 /*
1354 * No online CPU is found, so have to make sure hctx->next_cpu
1355 * is set correctly for not breaking workqueue.
1356 */
1357 if (next_cpu >= nr_cpu_ids)
1358 hctx->next_cpu = cpumask_first(hctx->cpumask);
1359 else
1360 hctx->next_cpu = next_cpu;
Jens Axboe506e9312014-05-07 10:26:44 -06001361 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1362 }
1363
Ming Lei7bed4592018-01-18 00:41:51 +08001364 /*
1365 * Do unbound schedule if we can't find a online CPU for this hctx,
1366 * and it should only happen in the path of handling CPU DEAD.
1367 */
1368 if (!cpu_online(hctx->next_cpu)) {
1369 if (!tried) {
1370 tried = true;
1371 goto select_cpu;
1372 }
1373
1374 /*
1375 * Make sure to re-select CPU next time once after CPUs
1376 * in hctx->cpumask become online again.
1377 */
1378 hctx->next_cpu_batch = 1;
1379 return WORK_CPU_UNBOUND;
1380 }
Christoph Hellwigb657d7e2014-11-24 09:27:23 +01001381 return hctx->next_cpu;
Jens Axboe506e9312014-05-07 10:26:44 -06001382}
1383
Bart Van Assche7587a5a2017-04-07 11:16:52 -07001384static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1385 unsigned long msecs)
Jens Axboe320ae512013-10-24 09:20:05 +01001386{
Bart Van Assche5435c022017-06-20 11:15:49 -07001387 if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1388 return;
1389
1390 if (unlikely(blk_mq_hctx_stopped(hctx)))
Jens Axboe320ae512013-10-24 09:20:05 +01001391 return;
1392
Jens Axboe1b792f22016-09-21 10:12:13 -06001393 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001394 int cpu = get_cpu();
1395 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
Paolo Bonzini398205b2014-11-07 23:03:59 +01001396 __blk_mq_run_hw_queue(hctx);
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001397 put_cpu();
Paolo Bonzini398205b2014-11-07 23:03:59 +01001398 return;
1399 }
Jens Axboee4043dc2014-04-09 10:18:23 -06001400
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001401 put_cpu();
Jens Axboee4043dc2014-04-09 10:18:23 -06001402 }
Paolo Bonzini398205b2014-11-07 23:03:59 +01001403
Jens Axboe9f993732017-04-10 09:54:54 -06001404 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1405 &hctx->run_work,
1406 msecs_to_jiffies(msecs));
Bart Van Assche7587a5a2017-04-07 11:16:52 -07001407}
1408
1409void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1410{
1411 __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1412}
1413EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1414
Jens Axboe79f720a2017-11-10 09:13:21 -07001415bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
Bart Van Assche7587a5a2017-04-07 11:16:52 -07001416{
Ming Lei24f5a902018-01-06 16:27:38 +08001417 int srcu_idx;
1418 bool need_run;
1419
1420 /*
1421 * When queue is quiesced, we may be switching io scheduler, or
1422 * updating nr_hw_queues, or other things, and we can't run queue
1423 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
1424 *
1425 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
1426 * quiesced.
1427 */
Jens Axboe04ced152018-01-09 08:29:46 -08001428 hctx_lock(hctx, &srcu_idx);
1429 need_run = !blk_queue_quiesced(hctx->queue) &&
1430 blk_mq_hctx_has_pending(hctx);
1431 hctx_unlock(hctx, srcu_idx);
Ming Lei24f5a902018-01-06 16:27:38 +08001432
1433 if (need_run) {
Jens Axboe79f720a2017-11-10 09:13:21 -07001434 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1435 return true;
1436 }
1437
1438 return false;
Jens Axboe320ae512013-10-24 09:20:05 +01001439}
Omar Sandoval5b727272017-04-14 01:00:00 -07001440EXPORT_SYMBOL(blk_mq_run_hw_queue);
Jens Axboe320ae512013-10-24 09:20:05 +01001441
Mike Snitzerb94ec292015-03-11 23:56:38 -04001442void blk_mq_run_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +01001443{
1444 struct blk_mq_hw_ctx *hctx;
1445 int i;
1446
1447 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe79f720a2017-11-10 09:13:21 -07001448 if (blk_mq_hctx_stopped(hctx))
Jens Axboe320ae512013-10-24 09:20:05 +01001449 continue;
1450
Mike Snitzerb94ec292015-03-11 23:56:38 -04001451 blk_mq_run_hw_queue(hctx, async);
Jens Axboe320ae512013-10-24 09:20:05 +01001452 }
1453}
Mike Snitzerb94ec292015-03-11 23:56:38 -04001454EXPORT_SYMBOL(blk_mq_run_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01001455
Bart Van Asschefd001442016-10-28 17:19:37 -07001456/**
1457 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1458 * @q: request queue.
1459 *
1460 * The caller is responsible for serializing this function against
1461 * blk_mq_{start,stop}_hw_queue().
1462 */
1463bool blk_mq_queue_stopped(struct request_queue *q)
1464{
1465 struct blk_mq_hw_ctx *hctx;
1466 int i;
1467
1468 queue_for_each_hw_ctx(q, hctx, i)
1469 if (blk_mq_hctx_stopped(hctx))
1470 return true;
1471
1472 return false;
1473}
1474EXPORT_SYMBOL(blk_mq_queue_stopped);
1475
Ming Lei39a70c72017-06-06 23:22:09 +08001476/*
1477 * This function is often used for pausing .queue_rq() by driver when
1478 * there isn't enough resource or some conditions aren't satisfied, and
Bart Van Assche4d606212017-08-17 16:23:00 -07001479 * BLK_STS_RESOURCE is usually returned.
Ming Lei39a70c72017-06-06 23:22:09 +08001480 *
1481 * We do not guarantee that dispatch can be drained or blocked
1482 * after blk_mq_stop_hw_queue() returns. Please use
1483 * blk_mq_quiesce_queue() for that requirement.
1484 */
Jens Axboe320ae512013-10-24 09:20:05 +01001485void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1486{
Ming Lei641a9ed2017-06-06 23:22:10 +08001487 cancel_delayed_work(&hctx->run_work);
1488
1489 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboe320ae512013-10-24 09:20:05 +01001490}
1491EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1492
Ming Lei39a70c72017-06-06 23:22:09 +08001493/*
1494 * This function is often used for pausing .queue_rq() by driver when
1495 * there isn't enough resource or some conditions aren't satisfied, and
Bart Van Assche4d606212017-08-17 16:23:00 -07001496 * BLK_STS_RESOURCE is usually returned.
Ming Lei39a70c72017-06-06 23:22:09 +08001497 *
1498 * We do not guarantee that dispatch can be drained or blocked
1499 * after blk_mq_stop_hw_queues() returns. Please use
1500 * blk_mq_quiesce_queue() for that requirement.
1501 */
Jens Axboe2719aa22017-05-03 11:08:14 -06001502void blk_mq_stop_hw_queues(struct request_queue *q)
1503{
Ming Lei641a9ed2017-06-06 23:22:10 +08001504 struct blk_mq_hw_ctx *hctx;
1505 int i;
1506
1507 queue_for_each_hw_ctx(q, hctx, i)
1508 blk_mq_stop_hw_queue(hctx);
Christoph Hellwig280d45f2013-10-25 14:45:58 +01001509}
1510EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1511
Jens Axboe320ae512013-10-24 09:20:05 +01001512void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1513{
1514 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboee4043dc2014-04-09 10:18:23 -06001515
Jens Axboe0ffbce82014-06-25 08:22:34 -06001516 blk_mq_run_hw_queue(hctx, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001517}
1518EXPORT_SYMBOL(blk_mq_start_hw_queue);
1519
Christoph Hellwig2f268552014-04-16 09:44:56 +02001520void blk_mq_start_hw_queues(struct request_queue *q)
1521{
1522 struct blk_mq_hw_ctx *hctx;
1523 int i;
1524
1525 queue_for_each_hw_ctx(q, hctx, i)
1526 blk_mq_start_hw_queue(hctx);
1527}
1528EXPORT_SYMBOL(blk_mq_start_hw_queues);
1529
Jens Axboeae911c52016-12-08 13:19:30 -07001530void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1531{
1532 if (!blk_mq_hctx_stopped(hctx))
1533 return;
1534
1535 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1536 blk_mq_run_hw_queue(hctx, async);
1537}
1538EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1539
Christoph Hellwig1b4a3252014-04-16 09:44:54 +02001540void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +01001541{
1542 struct blk_mq_hw_ctx *hctx;
1543 int i;
1544
Jens Axboeae911c52016-12-08 13:19:30 -07001545 queue_for_each_hw_ctx(q, hctx, i)
1546 blk_mq_start_stopped_hw_queue(hctx, async);
Jens Axboe320ae512013-10-24 09:20:05 +01001547}
1548EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1549
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001550static void blk_mq_run_work_fn(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +01001551{
1552 struct blk_mq_hw_ctx *hctx;
1553
Jens Axboe9f993732017-04-10 09:54:54 -06001554 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
Jens Axboe21c6e932017-04-10 09:54:56 -06001555
1556 /*
1557 * If we are stopped, don't run the queue. The exception is if
1558 * BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear
1559 * the STOPPED bit and run it.
1560 */
1561 if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) {
1562 if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state))
1563 return;
1564
1565 clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1566 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1567 }
Jens Axboee4043dc2014-04-09 10:18:23 -06001568
Jens Axboe320ae512013-10-24 09:20:05 +01001569 __blk_mq_run_hw_queue(hctx);
1570}
1571
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001572
1573void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1574{
Bart Van Assche5435c022017-06-20 11:15:49 -07001575 if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
Ming Lei19c66e52014-12-03 19:38:04 +08001576 return;
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001577
Jens Axboe21c6e932017-04-10 09:54:56 -06001578 /*
1579 * Stop the hw queue, then modify currently delayed work.
1580 * This should prevent us from running the queue prematurely.
1581 * Mark the queue as auto-clearing STOPPED when it runs.
1582 */
Jens Axboe7e79dad2017-01-19 07:58:59 -07001583 blk_mq_stop_hw_queue(hctx);
Jens Axboe21c6e932017-04-10 09:54:56 -06001584 set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1585 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1586 &hctx->run_work,
1587 msecs_to_jiffies(msecs));
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001588}
1589EXPORT_SYMBOL(blk_mq_delay_queue);
1590
Ming Leicfd0c552015-10-20 23:13:57 +08001591static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
Ming Leicfd0c552015-10-20 23:13:57 +08001592 struct request *rq,
1593 bool at_head)
Jens Axboe320ae512013-10-24 09:20:05 +01001594{
Jens Axboee57690f2016-08-24 15:34:35 -06001595 struct blk_mq_ctx *ctx = rq->mq_ctx;
1596
Bart Van Assche7b607812017-06-20 11:15:47 -07001597 lockdep_assert_held(&ctx->lock);
1598
Jens Axboe01b983c2013-11-19 18:59:10 -07001599 trace_block_rq_insert(hctx->queue, rq);
1600
Christoph Hellwig72a0a362014-02-07 10:22:36 -08001601 if (at_head)
1602 list_add(&rq->queuelist, &ctx->rq_list);
1603 else
1604 list_add_tail(&rq->queuelist, &ctx->rq_list);
Ming Leicfd0c552015-10-20 23:13:57 +08001605}
Jens Axboe4bb659b2014-05-09 09:36:49 -06001606
Jens Axboe2c3ad662016-12-14 14:34:47 -07001607void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1608 bool at_head)
Ming Leicfd0c552015-10-20 23:13:57 +08001609{
1610 struct blk_mq_ctx *ctx = rq->mq_ctx;
1611
Bart Van Assche7b607812017-06-20 11:15:47 -07001612 lockdep_assert_held(&ctx->lock);
1613
Jens Axboee57690f2016-08-24 15:34:35 -06001614 __blk_mq_insert_req_list(hctx, rq, at_head);
Jens Axboe320ae512013-10-24 09:20:05 +01001615 blk_mq_hctx_mark_pending(hctx, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001616}
1617
Jens Axboe157f3772017-09-11 16:43:57 -06001618/*
1619 * Should only be used carefully, when the caller knows we want to
1620 * bypass a potential IO scheduler on the target device.
1621 */
Ming Leib0850292017-11-02 23:24:34 +08001622void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
Jens Axboe157f3772017-09-11 16:43:57 -06001623{
1624 struct blk_mq_ctx *ctx = rq->mq_ctx;
1625 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
1626
1627 spin_lock(&hctx->lock);
1628 list_add_tail(&rq->queuelist, &hctx->dispatch);
1629 spin_unlock(&hctx->lock);
1630
Ming Leib0850292017-11-02 23:24:34 +08001631 if (run_queue)
1632 blk_mq_run_hw_queue(hctx, false);
Jens Axboe157f3772017-09-11 16:43:57 -06001633}
1634
Jens Axboebd166ef2017-01-17 06:03:22 -07001635void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1636 struct list_head *list)
Jens Axboe320ae512013-10-24 09:20:05 +01001637
1638{
Jens Axboe320ae512013-10-24 09:20:05 +01001639 /*
1640 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1641 * offline now
1642 */
1643 spin_lock(&ctx->lock);
1644 while (!list_empty(list)) {
1645 struct request *rq;
1646
1647 rq = list_first_entry(list, struct request, queuelist);
Jens Axboee57690f2016-08-24 15:34:35 -06001648 BUG_ON(rq->mq_ctx != ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001649 list_del_init(&rq->queuelist);
Jens Axboee57690f2016-08-24 15:34:35 -06001650 __blk_mq_insert_req_list(hctx, rq, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001651 }
Ming Leicfd0c552015-10-20 23:13:57 +08001652 blk_mq_hctx_mark_pending(hctx, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001653 spin_unlock(&ctx->lock);
Jens Axboe320ae512013-10-24 09:20:05 +01001654}
1655
1656static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1657{
1658 struct request *rqa = container_of(a, struct request, queuelist);
1659 struct request *rqb = container_of(b, struct request, queuelist);
1660
1661 return !(rqa->mq_ctx < rqb->mq_ctx ||
1662 (rqa->mq_ctx == rqb->mq_ctx &&
1663 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1664}
1665
1666void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1667{
1668 struct blk_mq_ctx *this_ctx;
1669 struct request_queue *this_q;
1670 struct request *rq;
1671 LIST_HEAD(list);
1672 LIST_HEAD(ctx_list);
1673 unsigned int depth;
1674
1675 list_splice_init(&plug->mq_list, &list);
1676
1677 list_sort(NULL, &list, plug_ctx_cmp);
1678
1679 this_q = NULL;
1680 this_ctx = NULL;
1681 depth = 0;
1682
1683 while (!list_empty(&list)) {
1684 rq = list_entry_rq(list.next);
1685 list_del_init(&rq->queuelist);
1686 BUG_ON(!rq->q);
1687 if (rq->mq_ctx != this_ctx) {
1688 if (this_ctx) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001689 trace_block_unplug(this_q, depth, from_schedule);
1690 blk_mq_sched_insert_requests(this_q, this_ctx,
1691 &ctx_list,
1692 from_schedule);
Jens Axboe320ae512013-10-24 09:20:05 +01001693 }
1694
1695 this_ctx = rq->mq_ctx;
1696 this_q = rq->q;
1697 depth = 0;
1698 }
1699
1700 depth++;
1701 list_add_tail(&rq->queuelist, &ctx_list);
1702 }
1703
1704 /*
1705 * If 'this_ctx' is set, we know we have entries to complete
1706 * on 'ctx_list'. Do those.
1707 */
1708 if (this_ctx) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001709 trace_block_unplug(this_q, depth, from_schedule);
1710 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1711 from_schedule);
Jens Axboe320ae512013-10-24 09:20:05 +01001712 }
1713}
1714
1715static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1716{
Bart Van Asscheda8d7f02017-04-19 14:01:24 -07001717 blk_init_request_from_bio(rq, bio);
Jens Axboe4b570522014-05-29 11:00:11 -06001718
Shaohua Li85acb3b2017-10-06 17:56:00 -07001719 blk_rq_set_rl(rq, blk_get_rl(rq->q, bio));
1720
Jens Axboe6e85eaf2016-12-02 20:00:14 -07001721 blk_account_io_start(rq, true);
Jens Axboe320ae512013-10-24 09:20:05 +01001722}
1723
Ming Leiab42f352017-05-26 19:53:19 +08001724static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
1725 struct blk_mq_ctx *ctx,
1726 struct request *rq)
1727{
1728 spin_lock(&ctx->lock);
1729 __blk_mq_insert_request(hctx, rq, false);
1730 spin_unlock(&ctx->lock);
Jens Axboe07068d52014-05-22 10:40:51 -06001731}
1732
Jens Axboefd2d3322017-01-12 10:04:45 -07001733static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1734{
Jens Axboebd166ef2017-01-17 06:03:22 -07001735 if (rq->tag != -1)
1736 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1737
1738 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
Jens Axboefd2d3322017-01-12 10:04:45 -07001739}
1740
Mike Snitzer0f955492018-01-17 11:25:56 -05001741static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1742 struct request *rq,
1743 blk_qc_t *cookie)
Shaohua Lif984df12015-05-08 10:51:32 -07001744{
Shaohua Lif984df12015-05-08 10:51:32 -07001745 struct request_queue *q = rq->q;
Shaohua Lif984df12015-05-08 10:51:32 -07001746 struct blk_mq_queue_data bd = {
1747 .rq = rq,
Omar Sandovald945a362017-04-05 12:01:36 -07001748 .last = true,
Shaohua Lif984df12015-05-08 10:51:32 -07001749 };
Jens Axboebd166ef2017-01-17 06:03:22 -07001750 blk_qc_t new_cookie;
Jens Axboef06345a2017-06-12 11:22:46 -06001751 blk_status_t ret;
Mike Snitzer0f955492018-01-17 11:25:56 -05001752
1753 new_cookie = request_to_qc_t(hctx, rq);
1754
1755 /*
1756 * For OK queue, we are done. For error, caller may kill it.
1757 * Any other error (busy), just add it to our list as we
1758 * previously would have done.
1759 */
1760 ret = q->mq_ops->queue_rq(hctx, &bd);
1761 switch (ret) {
1762 case BLK_STS_OK:
1763 *cookie = new_cookie;
1764 break;
1765 case BLK_STS_RESOURCE:
1766 __blk_mq_requeue_request(rq);
1767 break;
1768 default:
1769 *cookie = BLK_QC_T_NONE;
1770 break;
1771 }
1772
1773 return ret;
1774}
1775
1776static void __blk_mq_fallback_to_insert(struct blk_mq_hw_ctx *hctx,
1777 struct request *rq,
Ming Lei396eaf22018-01-17 11:25:57 -05001778 bool run_queue, bool bypass_insert)
Mike Snitzer0f955492018-01-17 11:25:56 -05001779{
Ming Lei396eaf22018-01-17 11:25:57 -05001780 if (!bypass_insert)
1781 blk_mq_sched_insert_request(rq, false, run_queue, false,
1782 hctx->flags & BLK_MQ_F_BLOCKING);
1783 else
1784 blk_mq_request_bypass_insert(rq, run_queue);
Mike Snitzer0f955492018-01-17 11:25:56 -05001785}
1786
1787static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1788 struct request *rq,
Ming Lei396eaf22018-01-17 11:25:57 -05001789 blk_qc_t *cookie,
1790 bool bypass_insert)
Mike Snitzer0f955492018-01-17 11:25:56 -05001791{
1792 struct request_queue *q = rq->q;
Ming Leid964f042017-06-06 23:22:00 +08001793 bool run_queue = true;
1794
Ming Leif4560ff2017-06-18 14:24:27 -06001795 /* RCU or SRCU read lock is needed before checking quiesced flag */
1796 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
Ming Leid964f042017-06-06 23:22:00 +08001797 run_queue = false;
1798 goto insert;
1799 }
Shaohua Lif984df12015-05-08 10:51:32 -07001800
Ming Lei396eaf22018-01-17 11:25:57 -05001801 if (q->elevator && !bypass_insert)
Bart Van Assche2253efc2016-10-28 17:20:02 -07001802 goto insert;
1803
Ming Leid964f042017-06-06 23:22:00 +08001804 if (!blk_mq_get_driver_tag(rq, NULL, false))
Jens Axboebd166ef2017-01-17 06:03:22 -07001805 goto insert;
1806
Ming Lei88022d72017-11-05 02:21:12 +08001807 if (!blk_mq_get_dispatch_budget(hctx)) {
Ming Leide148292017-10-14 17:22:29 +08001808 blk_mq_put_driver_tag(rq);
1809 goto insert;
Ming Lei88022d72017-11-05 02:21:12 +08001810 }
Ming Leide148292017-10-14 17:22:29 +08001811
Mike Snitzer0f955492018-01-17 11:25:56 -05001812 return __blk_mq_issue_directly(hctx, rq, cookie);
Bart Van Assche2253efc2016-10-28 17:20:02 -07001813insert:
Ming Lei396eaf22018-01-17 11:25:57 -05001814 __blk_mq_fallback_to_insert(hctx, rq, run_queue, bypass_insert);
1815 if (bypass_insert)
1816 return BLK_STS_RESOURCE;
Mike Snitzer0f955492018-01-17 11:25:56 -05001817
1818 return BLK_STS_OK;
Shaohua Lif984df12015-05-08 10:51:32 -07001819}
1820
Christoph Hellwig5eb61262017-03-22 15:01:51 -04001821static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1822 struct request *rq, blk_qc_t *cookie)
1823{
Mike Snitzer0f955492018-01-17 11:25:56 -05001824 blk_status_t ret;
Jens Axboe04ced152018-01-09 08:29:46 -08001825 int srcu_idx;
Jens Axboebf4907c2017-03-30 12:30:39 -06001826
Jens Axboe04ced152018-01-09 08:29:46 -08001827 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
Jens Axboebf4907c2017-03-30 12:30:39 -06001828
Jens Axboe04ced152018-01-09 08:29:46 -08001829 hctx_lock(hctx, &srcu_idx);
Mike Snitzer0f955492018-01-17 11:25:56 -05001830
Ming Lei396eaf22018-01-17 11:25:57 -05001831 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
Mike Snitzer0f955492018-01-17 11:25:56 -05001832 if (ret == BLK_STS_RESOURCE)
Ming Lei396eaf22018-01-17 11:25:57 -05001833 __blk_mq_fallback_to_insert(hctx, rq, true, false);
Mike Snitzer0f955492018-01-17 11:25:56 -05001834 else if (ret != BLK_STS_OK)
1835 blk_mq_end_request(rq, ret);
1836
Jens Axboe04ced152018-01-09 08:29:46 -08001837 hctx_unlock(hctx, srcu_idx);
Christoph Hellwig5eb61262017-03-22 15:01:51 -04001838}
1839
Ming Lei396eaf22018-01-17 11:25:57 -05001840blk_status_t blk_mq_request_direct_issue(struct request *rq)
1841{
1842 blk_status_t ret;
1843 int srcu_idx;
1844 blk_qc_t unused_cookie;
1845 struct blk_mq_ctx *ctx = rq->mq_ctx;
1846 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
1847
1848 hctx_lock(hctx, &srcu_idx);
1849 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true);
1850 hctx_unlock(hctx, srcu_idx);
1851
1852 return ret;
1853}
1854
Jens Axboedece1632015-11-05 10:41:16 -07001855static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
Jens Axboe07068d52014-05-22 10:40:51 -06001856{
Christoph Hellwigef295ec2016-10-28 08:48:16 -06001857 const int is_sync = op_is_sync(bio->bi_opf);
Christoph Hellwigf73f44e2017-01-27 08:30:47 -07001858 const int is_flush_fua = op_is_flush(bio->bi_opf);
Jens Axboe5a797e02017-01-26 12:22:11 -07001859 struct blk_mq_alloc_data data = { .flags = 0 };
Jens Axboe07068d52014-05-22 10:40:51 -06001860 struct request *rq;
Christoph Hellwig5eb61262017-03-22 15:01:51 -04001861 unsigned int request_count = 0;
Shaohua Lif984df12015-05-08 10:51:32 -07001862 struct blk_plug *plug;
Shaohua Li5b3f3412015-05-08 10:51:33 -07001863 struct request *same_queue_rq = NULL;
Jens Axboe7b371632015-11-05 10:41:40 -07001864 blk_qc_t cookie;
Jens Axboe87760e52016-11-09 12:38:14 -07001865 unsigned int wb_acct;
Jens Axboe07068d52014-05-22 10:40:51 -06001866
1867 blk_queue_bounce(q, &bio);
1868
NeilBrownaf67c312017-06-18 14:38:57 +10001869 blk_queue_split(q, &bio);
Wen Xiongf36ea502017-05-10 08:54:11 -05001870
Dmitry Monakhove23947b2017-06-29 11:31:11 -07001871 if (!bio_integrity_prep(bio))
Jens Axboedece1632015-11-05 10:41:16 -07001872 return BLK_QC_T_NONE;
Jens Axboe07068d52014-05-22 10:40:51 -06001873
Omar Sandoval87c279e2016-06-01 22:18:48 -07001874 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1875 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1876 return BLK_QC_T_NONE;
Shaohua Lif984df12015-05-08 10:51:32 -07001877
Jens Axboebd166ef2017-01-17 06:03:22 -07001878 if (blk_mq_sched_bio_merge(q, bio))
1879 return BLK_QC_T_NONE;
1880
Jens Axboe87760e52016-11-09 12:38:14 -07001881 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1882
Jens Axboebd166ef2017-01-17 06:03:22 -07001883 trace_block_getrq(q, bio, bio->bi_opf);
1884
Christoph Hellwigd2c0d382017-06-16 18:15:19 +02001885 rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
Jens Axboe87760e52016-11-09 12:38:14 -07001886 if (unlikely(!rq)) {
1887 __wbt_done(q->rq_wb, wb_acct);
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -05001888 if (bio->bi_opf & REQ_NOWAIT)
1889 bio_wouldblock_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -07001890 return BLK_QC_T_NONE;
Jens Axboe87760e52016-11-09 12:38:14 -07001891 }
1892
1893 wbt_track(&rq->issue_stat, wb_acct);
Jens Axboe07068d52014-05-22 10:40:51 -06001894
Jens Axboefd2d3322017-01-12 10:04:45 -07001895 cookie = request_to_qc_t(data.hctx, rq);
Jens Axboe07068d52014-05-22 10:40:51 -06001896
Shaohua Lif984df12015-05-08 10:51:32 -07001897 plug = current->plug;
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001898 if (unlikely(is_flush_fua)) {
Shaohua Lif984df12015-05-08 10:51:32 -07001899 blk_mq_put_ctx(data.ctx);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001900 blk_mq_bio_to_request(rq, bio);
Ming Lei923218f2017-11-02 23:24:38 +08001901
1902 /* bypass scheduler for flush rq */
1903 blk_insert_flush(rq);
1904 blk_mq_run_hw_queue(data.hctx, true);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001905 } else if (plug && q->nr_hw_queues == 1) {
Shaohua Li600271d2016-11-03 17:03:54 -07001906 struct request *last = NULL;
1907
Jens Axboeb00c53e2017-04-20 16:40:36 -06001908 blk_mq_put_ctx(data.ctx);
Jeff Moyere6c44382015-05-08 10:51:30 -07001909 blk_mq_bio_to_request(rq, bio);
Ming Lei0a6219a2016-11-16 18:07:05 +08001910
1911 /*
1912 * @request_count may become stale because of schedule
1913 * out, so check the list again.
1914 */
1915 if (list_empty(&plug->mq_list))
1916 request_count = 0;
Christoph Hellwig254d2592017-03-22 15:01:50 -04001917 else if (blk_queue_nomerges(q))
1918 request_count = blk_plug_queued_count(q);
1919
Ming Lei676d0602015-10-20 23:13:56 +08001920 if (!request_count)
Jeff Moyere6c44382015-05-08 10:51:30 -07001921 trace_block_plug(q);
Shaohua Li600271d2016-11-03 17:03:54 -07001922 else
1923 last = list_entry_rq(plug->mq_list.prev);
Jens Axboeb094f892015-11-20 20:29:45 -07001924
Shaohua Li600271d2016-11-03 17:03:54 -07001925 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1926 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
Jeff Moyere6c44382015-05-08 10:51:30 -07001927 blk_flush_plug_list(plug, false);
1928 trace_block_plug(q);
Jens Axboe320ae512013-10-24 09:20:05 +01001929 }
Jens Axboeb094f892015-11-20 20:29:45 -07001930
Jeff Moyere6c44382015-05-08 10:51:30 -07001931 list_add_tail(&rq->queuelist, &plug->mq_list);
Christoph Hellwig22997222017-03-22 15:01:52 -04001932 } else if (plug && !blk_queue_nomerges(q)) {
Jens Axboe320ae512013-10-24 09:20:05 +01001933 blk_mq_bio_to_request(rq, bio);
Jens Axboe320ae512013-10-24 09:20:05 +01001934
Jens Axboe320ae512013-10-24 09:20:05 +01001935 /*
1936 * We do limited plugging. If the bio can be merged, do that.
1937 * Otherwise the existing request in the plug list will be
1938 * issued. So the plug list will have one request at most
Christoph Hellwig22997222017-03-22 15:01:52 -04001939 * The plug list might get flushed before this. If that happens,
1940 * the plug list is empty, and same_queue_rq is invalid.
Jens Axboe320ae512013-10-24 09:20:05 +01001941 */
Christoph Hellwig22997222017-03-22 15:01:52 -04001942 if (list_empty(&plug->mq_list))
1943 same_queue_rq = NULL;
1944 if (same_queue_rq)
1945 list_del_init(&same_queue_rq->queuelist);
1946 list_add_tail(&rq->queuelist, &plug->mq_list);
1947
Jens Axboebf4907c2017-03-30 12:30:39 -06001948 blk_mq_put_ctx(data.ctx);
1949
Ming Leidad7a3b2017-06-06 23:21:59 +08001950 if (same_queue_rq) {
1951 data.hctx = blk_mq_map_queue(q,
1952 same_queue_rq->mq_ctx->cpu);
Christoph Hellwig22997222017-03-22 15:01:52 -04001953 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1954 &cookie);
Ming Leidad7a3b2017-06-06 23:21:59 +08001955 }
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001956 } else if (q->nr_hw_queues > 1 && is_sync) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001957 blk_mq_put_ctx(data.ctx);
1958 blk_mq_bio_to_request(rq, bio);
Christoph Hellwig22997222017-03-22 15:01:52 -04001959 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001960 } else if (q->elevator) {
Jens Axboeb00c53e2017-04-20 16:40:36 -06001961 blk_mq_put_ctx(data.ctx);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001962 blk_mq_bio_to_request(rq, bio);
1963 blk_mq_sched_insert_request(rq, false, true, true, true);
Ming Leiab42f352017-05-26 19:53:19 +08001964 } else {
Jens Axboeb00c53e2017-04-20 16:40:36 -06001965 blk_mq_put_ctx(data.ctx);
Ming Leiab42f352017-05-26 19:53:19 +08001966 blk_mq_bio_to_request(rq, bio);
1967 blk_mq_queue_io(data.hctx, data.ctx, rq);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001968 blk_mq_run_hw_queue(data.hctx, true);
Ming Leiab42f352017-05-26 19:53:19 +08001969 }
Jens Axboe320ae512013-10-24 09:20:05 +01001970
Jens Axboe7b371632015-11-05 10:41:40 -07001971 return cookie;
Jens Axboe320ae512013-10-24 09:20:05 +01001972}
1973
Jens Axboecc71a6f2017-01-11 14:29:56 -07001974void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1975 unsigned int hctx_idx)
Jens Axboe320ae512013-10-24 09:20:05 +01001976{
1977 struct page *page;
1978
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001979 if (tags->rqs && set->ops->exit_request) {
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001980 int i;
1981
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001982 for (i = 0; i < tags->nr_tags; i++) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001983 struct request *rq = tags->static_rqs[i];
1984
1985 if (!rq)
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001986 continue;
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001987 set->ops->exit_request(set, rq, hctx_idx);
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001988 tags->static_rqs[i] = NULL;
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001989 }
1990 }
1991
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001992 while (!list_empty(&tags->page_list)) {
1993 page = list_first_entry(&tags->page_list, struct page, lru);
Dave Hansen67534712014-01-08 20:17:46 -07001994 list_del_init(&page->lru);
Catalin Marinasf75782e2015-09-14 18:16:02 +01001995 /*
1996 * Remove kmemleak object previously allocated in
1997 * blk_mq_init_rq_map().
1998 */
1999 kmemleak_free(page_address(page));
Jens Axboe320ae512013-10-24 09:20:05 +01002000 __free_pages(page, page->private);
2001 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07002002}
Jens Axboe320ae512013-10-24 09:20:05 +01002003
Jens Axboecc71a6f2017-01-11 14:29:56 -07002004void blk_mq_free_rq_map(struct blk_mq_tags *tags)
2005{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002006 kfree(tags->rqs);
Jens Axboecc71a6f2017-01-11 14:29:56 -07002007 tags->rqs = NULL;
Jens Axboe2af8cbe2017-01-13 14:39:30 -07002008 kfree(tags->static_rqs);
2009 tags->static_rqs = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01002010
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002011 blk_mq_free_tags(tags);
Jens Axboe320ae512013-10-24 09:20:05 +01002012}
2013
Jens Axboecc71a6f2017-01-11 14:29:56 -07002014struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
2015 unsigned int hctx_idx,
2016 unsigned int nr_tags,
2017 unsigned int reserved_tags)
Jens Axboe320ae512013-10-24 09:20:05 +01002018{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002019 struct blk_mq_tags *tags;
Shaohua Li59f082e2017-02-01 09:53:14 -08002020 int node;
Jens Axboe320ae512013-10-24 09:20:05 +01002021
Shaohua Li59f082e2017-02-01 09:53:14 -08002022 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
2023 if (node == NUMA_NO_NODE)
2024 node = set->numa_node;
2025
2026 tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
Shaohua Li24391c02015-01-23 14:18:00 -07002027 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002028 if (!tags)
2029 return NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01002030
Jens Axboecc71a6f2017-01-11 14:29:56 -07002031 tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02002032 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
Shaohua Li59f082e2017-02-01 09:53:14 -08002033 node);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002034 if (!tags->rqs) {
2035 blk_mq_free_tags(tags);
2036 return NULL;
2037 }
Jens Axboe320ae512013-10-24 09:20:05 +01002038
Jens Axboe2af8cbe2017-01-13 14:39:30 -07002039 tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
2040 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
Shaohua Li59f082e2017-02-01 09:53:14 -08002041 node);
Jens Axboe2af8cbe2017-01-13 14:39:30 -07002042 if (!tags->static_rqs) {
2043 kfree(tags->rqs);
2044 blk_mq_free_tags(tags);
2045 return NULL;
2046 }
2047
Jens Axboecc71a6f2017-01-11 14:29:56 -07002048 return tags;
2049}
2050
2051static size_t order_to_size(unsigned int order)
2052{
2053 return (size_t)PAGE_SIZE << order;
2054}
2055
Tejun Heo1d9bd512018-01-09 08:29:48 -08002056static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2057 unsigned int hctx_idx, int node)
2058{
2059 int ret;
2060
2061 if (set->ops->init_request) {
2062 ret = set->ops->init_request(set, rq, hctx_idx, node);
2063 if (ret)
2064 return ret;
2065 }
2066
2067 seqcount_init(&rq->gstate_seq);
2068 u64_stats_init(&rq->aborted_gstate_sync);
2069 return 0;
2070}
2071
Jens Axboecc71a6f2017-01-11 14:29:56 -07002072int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2073 unsigned int hctx_idx, unsigned int depth)
2074{
2075 unsigned int i, j, entries_per_page, max_order = 4;
2076 size_t rq_size, left;
Shaohua Li59f082e2017-02-01 09:53:14 -08002077 int node;
2078
2079 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
2080 if (node == NUMA_NO_NODE)
2081 node = set->numa_node;
Jens Axboecc71a6f2017-01-11 14:29:56 -07002082
2083 INIT_LIST_HEAD(&tags->page_list);
2084
Jens Axboe320ae512013-10-24 09:20:05 +01002085 /*
2086 * rq_size is the size of the request plus driver payload, rounded
2087 * to the cacheline size
2088 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002089 rq_size = round_up(sizeof(struct request) + set->cmd_size,
Jens Axboe320ae512013-10-24 09:20:05 +01002090 cache_line_size());
Jens Axboecc71a6f2017-01-11 14:29:56 -07002091 left = rq_size * depth;
Jens Axboe320ae512013-10-24 09:20:05 +01002092
Jens Axboecc71a6f2017-01-11 14:29:56 -07002093 for (i = 0; i < depth; ) {
Jens Axboe320ae512013-10-24 09:20:05 +01002094 int this_order = max_order;
2095 struct page *page;
2096 int to_do;
2097 void *p;
2098
Bartlomiej Zolnierkiewiczb3a834b2016-05-16 09:54:47 -06002099 while (this_order && left < order_to_size(this_order - 1))
Jens Axboe320ae512013-10-24 09:20:05 +01002100 this_order--;
2101
2102 do {
Shaohua Li59f082e2017-02-01 09:53:14 -08002103 page = alloc_pages_node(node,
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02002104 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
Jens Axboea5164402014-09-10 09:02:03 -06002105 this_order);
Jens Axboe320ae512013-10-24 09:20:05 +01002106 if (page)
2107 break;
2108 if (!this_order--)
2109 break;
2110 if (order_to_size(this_order) < rq_size)
2111 break;
2112 } while (1);
2113
2114 if (!page)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002115 goto fail;
Jens Axboe320ae512013-10-24 09:20:05 +01002116
2117 page->private = this_order;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002118 list_add_tail(&page->lru, &tags->page_list);
Jens Axboe320ae512013-10-24 09:20:05 +01002119
2120 p = page_address(page);
Catalin Marinasf75782e2015-09-14 18:16:02 +01002121 /*
2122 * Allow kmemleak to scan these pages as they contain pointers
2123 * to additional allocations like via ops->init_request().
2124 */
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02002125 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
Jens Axboe320ae512013-10-24 09:20:05 +01002126 entries_per_page = order_to_size(this_order) / rq_size;
Jens Axboecc71a6f2017-01-11 14:29:56 -07002127 to_do = min(entries_per_page, depth - i);
Jens Axboe320ae512013-10-24 09:20:05 +01002128 left -= to_do * rq_size;
2129 for (j = 0; j < to_do; j++) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07002130 struct request *rq = p;
2131
2132 tags->static_rqs[i] = rq;
Tejun Heo1d9bd512018-01-09 08:29:48 -08002133 if (blk_mq_init_request(set, rq, hctx_idx, node)) {
2134 tags->static_rqs[i] = NULL;
2135 goto fail;
Christoph Hellwige9b267d2014-04-15 13:59:10 -06002136 }
2137
Jens Axboe320ae512013-10-24 09:20:05 +01002138 p += rq_size;
2139 i++;
2140 }
2141 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07002142 return 0;
Jens Axboe320ae512013-10-24 09:20:05 +01002143
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002144fail:
Jens Axboecc71a6f2017-01-11 14:29:56 -07002145 blk_mq_free_rqs(set, tags, hctx_idx);
2146 return -ENOMEM;
Jens Axboe320ae512013-10-24 09:20:05 +01002147}
2148
Jens Axboee57690f2016-08-24 15:34:35 -06002149/*
2150 * 'cpu' is going away. splice any existing rq_list entries from this
2151 * software queue to the hw queue dispatch list, and ensure that it
2152 * gets run.
2153 */
Thomas Gleixner9467f852016-09-22 08:05:17 -06002154static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
Jens Axboe484b4062014-05-21 14:01:15 -06002155{
Thomas Gleixner9467f852016-09-22 08:05:17 -06002156 struct blk_mq_hw_ctx *hctx;
Jens Axboe484b4062014-05-21 14:01:15 -06002157 struct blk_mq_ctx *ctx;
2158 LIST_HEAD(tmp);
2159
Thomas Gleixner9467f852016-09-22 08:05:17 -06002160 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
Jens Axboee57690f2016-08-24 15:34:35 -06002161 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
Jens Axboe484b4062014-05-21 14:01:15 -06002162
2163 spin_lock(&ctx->lock);
2164 if (!list_empty(&ctx->rq_list)) {
2165 list_splice_init(&ctx->rq_list, &tmp);
2166 blk_mq_hctx_clear_pending(hctx, ctx);
2167 }
2168 spin_unlock(&ctx->lock);
2169
2170 if (list_empty(&tmp))
Thomas Gleixner9467f852016-09-22 08:05:17 -06002171 return 0;
Jens Axboe484b4062014-05-21 14:01:15 -06002172
Jens Axboee57690f2016-08-24 15:34:35 -06002173 spin_lock(&hctx->lock);
2174 list_splice_tail_init(&tmp, &hctx->dispatch);
2175 spin_unlock(&hctx->lock);
Jens Axboe484b4062014-05-21 14:01:15 -06002176
2177 blk_mq_run_hw_queue(hctx, true);
Thomas Gleixner9467f852016-09-22 08:05:17 -06002178 return 0;
Jens Axboe484b4062014-05-21 14:01:15 -06002179}
2180
Thomas Gleixner9467f852016-09-22 08:05:17 -06002181static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
Jens Axboe484b4062014-05-21 14:01:15 -06002182{
Thomas Gleixner9467f852016-09-22 08:05:17 -06002183 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
2184 &hctx->cpuhp_dead);
Jens Axboe484b4062014-05-21 14:01:15 -06002185}
2186
Ming Leic3b4afc2015-06-04 22:25:04 +08002187/* hctx->ctxs will be freed in queue's release handler */
Ming Lei08e98fc2014-09-25 23:23:38 +08002188static void blk_mq_exit_hctx(struct request_queue *q,
2189 struct blk_mq_tag_set *set,
2190 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2191{
Omar Sandoval9c1051a2017-05-04 08:17:21 -06002192 blk_mq_debugfs_unregister_hctx(hctx);
2193
Ming Lei8ab0b7d2018-01-09 21:28:29 +08002194 if (blk_mq_hw_queue_mapped(hctx))
2195 blk_mq_tag_idle(hctx);
Ming Lei08e98fc2014-09-25 23:23:38 +08002196
Ming Leif70ced02014-09-25 23:23:47 +08002197 if (set->ops->exit_request)
Christoph Hellwigd6296d392017-05-01 10:19:08 -06002198 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
Ming Leif70ced02014-09-25 23:23:47 +08002199
Omar Sandoval93252632017-04-05 12:01:31 -07002200 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2201
Ming Lei08e98fc2014-09-25 23:23:38 +08002202 if (set->ops->exit_hctx)
2203 set->ops->exit_hctx(hctx, hctx_idx);
2204
Bart Van Assche6a83e742016-11-02 10:09:51 -06002205 if (hctx->flags & BLK_MQ_F_BLOCKING)
Tejun Heo05707b62018-01-09 08:29:53 -08002206 cleanup_srcu_struct(hctx->srcu);
Bart Van Assche6a83e742016-11-02 10:09:51 -06002207
Thomas Gleixner9467f852016-09-22 08:05:17 -06002208 blk_mq_remove_cpuhp(hctx);
Ming Leif70ced02014-09-25 23:23:47 +08002209 blk_free_flush_queue(hctx->fq);
Omar Sandoval88459642016-09-17 08:38:44 -06002210 sbitmap_free(&hctx->ctx_map);
Ming Lei08e98fc2014-09-25 23:23:38 +08002211}
2212
Ming Lei624dbe42014-05-27 23:35:13 +08002213static void blk_mq_exit_hw_queues(struct request_queue *q,
2214 struct blk_mq_tag_set *set, int nr_queue)
2215{
2216 struct blk_mq_hw_ctx *hctx;
2217 unsigned int i;
2218
2219 queue_for_each_hw_ctx(q, hctx, i) {
2220 if (i == nr_queue)
2221 break;
Ming Lei08e98fc2014-09-25 23:23:38 +08002222 blk_mq_exit_hctx(q, set, hctx, i);
Ming Lei624dbe42014-05-27 23:35:13 +08002223 }
Ming Lei624dbe42014-05-27 23:35:13 +08002224}
2225
Ming Lei08e98fc2014-09-25 23:23:38 +08002226static int blk_mq_init_hctx(struct request_queue *q,
2227 struct blk_mq_tag_set *set,
2228 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2229{
2230 int node;
2231
2232 node = hctx->numa_node;
2233 if (node == NUMA_NO_NODE)
2234 node = hctx->numa_node = set->numa_node;
2235
Jens Axboe9f993732017-04-10 09:54:54 -06002236 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
Ming Lei08e98fc2014-09-25 23:23:38 +08002237 spin_lock_init(&hctx->lock);
2238 INIT_LIST_HEAD(&hctx->dispatch);
2239 hctx->queue = q;
Jeff Moyer2404e602015-11-03 10:40:06 -05002240 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
Ming Lei08e98fc2014-09-25 23:23:38 +08002241
Thomas Gleixner9467f852016-09-22 08:05:17 -06002242 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
Ming Lei08e98fc2014-09-25 23:23:38 +08002243
2244 hctx->tags = set->tags[hctx_idx];
2245
2246 /*
2247 * Allocate space for all possible cpus to avoid allocation at
2248 * runtime
2249 */
Johannes Thumshirnd904bfa2017-11-15 17:32:33 -08002250 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
Ming Lei08e98fc2014-09-25 23:23:38 +08002251 GFP_KERNEL, node);
2252 if (!hctx->ctxs)
2253 goto unregister_cpu_notifier;
2254
Omar Sandoval88459642016-09-17 08:38:44 -06002255 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
2256 node))
Ming Lei08e98fc2014-09-25 23:23:38 +08002257 goto free_ctxs;
2258
2259 hctx->nr_ctx = 0;
2260
Jens Axboeeb619fd2017-11-09 08:32:43 -07002261 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2262 INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2263
Ming Lei08e98fc2014-09-25 23:23:38 +08002264 if (set->ops->init_hctx &&
2265 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2266 goto free_bitmap;
2267
Omar Sandoval93252632017-04-05 12:01:31 -07002268 if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
2269 goto exit_hctx;
2270
Ming Leif70ced02014-09-25 23:23:47 +08002271 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2272 if (!hctx->fq)
Omar Sandoval93252632017-04-05 12:01:31 -07002273 goto sched_exit_hctx;
Ming Leif70ced02014-09-25 23:23:47 +08002274
Tejun Heo1d9bd512018-01-09 08:29:48 -08002275 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
Ming Leif70ced02014-09-25 23:23:47 +08002276 goto free_fq;
2277
Bart Van Assche6a83e742016-11-02 10:09:51 -06002278 if (hctx->flags & BLK_MQ_F_BLOCKING)
Tejun Heo05707b62018-01-09 08:29:53 -08002279 init_srcu_struct(hctx->srcu);
Bart Van Assche6a83e742016-11-02 10:09:51 -06002280
Omar Sandoval9c1051a2017-05-04 08:17:21 -06002281 blk_mq_debugfs_register_hctx(q, hctx);
2282
Ming Lei08e98fc2014-09-25 23:23:38 +08002283 return 0;
2284
Ming Leif70ced02014-09-25 23:23:47 +08002285 free_fq:
2286 kfree(hctx->fq);
Omar Sandoval93252632017-04-05 12:01:31 -07002287 sched_exit_hctx:
2288 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
Ming Leif70ced02014-09-25 23:23:47 +08002289 exit_hctx:
2290 if (set->ops->exit_hctx)
2291 set->ops->exit_hctx(hctx, hctx_idx);
Ming Lei08e98fc2014-09-25 23:23:38 +08002292 free_bitmap:
Omar Sandoval88459642016-09-17 08:38:44 -06002293 sbitmap_free(&hctx->ctx_map);
Ming Lei08e98fc2014-09-25 23:23:38 +08002294 free_ctxs:
2295 kfree(hctx->ctxs);
2296 unregister_cpu_notifier:
Thomas Gleixner9467f852016-09-22 08:05:17 -06002297 blk_mq_remove_cpuhp(hctx);
Ming Lei08e98fc2014-09-25 23:23:38 +08002298 return -1;
2299}
2300
Jens Axboe320ae512013-10-24 09:20:05 +01002301static void blk_mq_init_cpu_queues(struct request_queue *q,
2302 unsigned int nr_hw_queues)
2303{
2304 unsigned int i;
2305
2306 for_each_possible_cpu(i) {
2307 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2308 struct blk_mq_hw_ctx *hctx;
2309
Jens Axboe320ae512013-10-24 09:20:05 +01002310 __ctx->cpu = i;
2311 spin_lock_init(&__ctx->lock);
2312 INIT_LIST_HEAD(&__ctx->rq_list);
2313 __ctx->queue = q;
2314
Jens Axboe320ae512013-10-24 09:20:05 +01002315 /*
2316 * Set local node, IFF we have more than one hw queue. If
2317 * not, we remain on the home node of the device
2318 */
Christoph Hellwig20e4d8132018-01-12 10:53:06 +08002319 hctx = blk_mq_map_queue(q, i);
Jens Axboe320ae512013-10-24 09:20:05 +01002320 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
Raghavendra K Tbffed452015-12-02 16:59:05 +05302321 hctx->numa_node = local_memory_node(cpu_to_node(i));
Jens Axboe320ae512013-10-24 09:20:05 +01002322 }
2323}
2324
Jens Axboecc71a6f2017-01-11 14:29:56 -07002325static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2326{
2327 int ret = 0;
2328
2329 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2330 set->queue_depth, set->reserved_tags);
2331 if (!set->tags[hctx_idx])
2332 return false;
2333
2334 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2335 set->queue_depth);
2336 if (!ret)
2337 return true;
2338
2339 blk_mq_free_rq_map(set->tags[hctx_idx]);
2340 set->tags[hctx_idx] = NULL;
2341 return false;
2342}
2343
2344static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2345 unsigned int hctx_idx)
2346{
Jens Axboebd166ef2017-01-17 06:03:22 -07002347 if (set->tags[hctx_idx]) {
2348 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2349 blk_mq_free_rq_map(set->tags[hctx_idx]);
2350 set->tags[hctx_idx] = NULL;
2351 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07002352}
2353
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002354static void blk_mq_map_swqueue(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +01002355{
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002356 unsigned int i, hctx_idx;
Jens Axboe320ae512013-10-24 09:20:05 +01002357 struct blk_mq_hw_ctx *hctx;
2358 struct blk_mq_ctx *ctx;
Ming Lei2a34c082015-04-21 10:00:20 +08002359 struct blk_mq_tag_set *set = q->tag_set;
Jens Axboe320ae512013-10-24 09:20:05 +01002360
Akinobu Mita60de0742015-09-27 02:09:25 +09002361 /*
2362 * Avoid others reading imcomplete hctx->cpumask through sysfs
2363 */
2364 mutex_lock(&q->sysfs_lock);
2365
Jens Axboe320ae512013-10-24 09:20:05 +01002366 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboee4043dc2014-04-09 10:18:23 -06002367 cpumask_clear(hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01002368 hctx->nr_ctx = 0;
2369 }
2370
2371 /*
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002372 * Map software to hardware queues.
2373 *
2374 * If the cpu isn't present, the cpu is mapped to first hctx.
Jens Axboe320ae512013-10-24 09:20:05 +01002375 */
Christoph Hellwig20e4d8132018-01-12 10:53:06 +08002376 for_each_possible_cpu(i) {
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002377 hctx_idx = q->mq_map[i];
2378 /* unmapped hw queue can be remapped after CPU topo changed */
Jens Axboecc71a6f2017-01-11 14:29:56 -07002379 if (!set->tags[hctx_idx] &&
2380 !__blk_mq_alloc_rq_map(set, hctx_idx)) {
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002381 /*
2382 * If tags initialization fail for some hctx,
2383 * that hctx won't be brought online. In this
2384 * case, remap the current ctx to hctx[0] which
2385 * is guaranteed to always have tags allocated
2386 */
Jens Axboecc71a6f2017-01-11 14:29:56 -07002387 q->mq_map[i] = 0;
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002388 }
2389
Thomas Gleixner897bb0c2016-03-19 11:30:33 +01002390 ctx = per_cpu_ptr(q->queue_ctx, i);
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02002391 hctx = blk_mq_map_queue(q, i);
Keith Busch868f2f02015-12-17 17:08:14 -07002392
Jens Axboee4043dc2014-04-09 10:18:23 -06002393 cpumask_set_cpu(i, hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01002394 ctx->index_hw = hctx->nr_ctx;
2395 hctx->ctxs[hctx->nr_ctx++] = ctx;
2396 }
Jens Axboe506e9312014-05-07 10:26:44 -06002397
Akinobu Mita60de0742015-09-27 02:09:25 +09002398 mutex_unlock(&q->sysfs_lock);
2399
Jens Axboe506e9312014-05-07 10:26:44 -06002400 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe484b4062014-05-21 14:01:15 -06002401 /*
Jens Axboea68aafa2014-08-15 13:19:15 -06002402 * If no software queues are mapped to this hardware queue,
2403 * disable it and free the request entries.
Jens Axboe484b4062014-05-21 14:01:15 -06002404 */
2405 if (!hctx->nr_ctx) {
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002406 /* Never unmap queue 0. We need it as a
2407 * fallback in case of a new remap fails
2408 * allocation
2409 */
Jens Axboecc71a6f2017-01-11 14:29:56 -07002410 if (i && set->tags[i])
2411 blk_mq_free_map_and_requests(set, i);
2412
Ming Lei2a34c082015-04-21 10:00:20 +08002413 hctx->tags = NULL;
Jens Axboe484b4062014-05-21 14:01:15 -06002414 continue;
2415 }
2416
Ming Lei2a34c082015-04-21 10:00:20 +08002417 hctx->tags = set->tags[i];
2418 WARN_ON(!hctx->tags);
2419
Jens Axboe484b4062014-05-21 14:01:15 -06002420 /*
Chong Yuan889fa312015-04-15 11:39:29 -06002421 * Set the map size to the number of mapped software queues.
2422 * This is more accurate and more efficient than looping
2423 * over all possibly mapped software queues.
2424 */
Omar Sandoval88459642016-09-17 08:38:44 -06002425 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
Chong Yuan889fa312015-04-15 11:39:29 -06002426
2427 /*
Jens Axboe484b4062014-05-21 14:01:15 -06002428 * Initialize batch roundrobin counts
2429 */
Christoph Hellwig20e4d8132018-01-12 10:53:06 +08002430 hctx->next_cpu = cpumask_first_and(hctx->cpumask,
2431 cpu_online_mask);
Jens Axboe506e9312014-05-07 10:26:44 -06002432 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2433 }
Jens Axboe320ae512013-10-24 09:20:05 +01002434}
2435
Jens Axboe8e8320c2017-06-20 17:56:13 -06002436/*
2437 * Caller needs to ensure that we're either frozen/quiesced, or that
2438 * the queue isn't live yet.
2439 */
Jeff Moyer2404e602015-11-03 10:40:06 -05002440static void queue_set_hctx_shared(struct request_queue *q, bool shared)
Jens Axboe0d2602c2014-05-13 15:10:52 -06002441{
2442 struct blk_mq_hw_ctx *hctx;
Jens Axboe0d2602c2014-05-13 15:10:52 -06002443 int i;
2444
Jeff Moyer2404e602015-11-03 10:40:06 -05002445 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe8e8320c2017-06-20 17:56:13 -06002446 if (shared) {
2447 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2448 atomic_inc(&q->shared_hctx_restart);
Jeff Moyer2404e602015-11-03 10:40:06 -05002449 hctx->flags |= BLK_MQ_F_TAG_SHARED;
Jens Axboe8e8320c2017-06-20 17:56:13 -06002450 } else {
2451 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2452 atomic_dec(&q->shared_hctx_restart);
Jeff Moyer2404e602015-11-03 10:40:06 -05002453 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
Jens Axboe8e8320c2017-06-20 17:56:13 -06002454 }
Jeff Moyer2404e602015-11-03 10:40:06 -05002455 }
2456}
2457
Jens Axboe8e8320c2017-06-20 17:56:13 -06002458static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2459 bool shared)
Jeff Moyer2404e602015-11-03 10:40:06 -05002460{
2461 struct request_queue *q;
Jens Axboe0d2602c2014-05-13 15:10:52 -06002462
Bart Van Assche705cda92017-04-07 11:16:49 -07002463 lockdep_assert_held(&set->tag_list_lock);
2464
Jens Axboe0d2602c2014-05-13 15:10:52 -06002465 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2466 blk_mq_freeze_queue(q);
Jeff Moyer2404e602015-11-03 10:40:06 -05002467 queue_set_hctx_shared(q, shared);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002468 blk_mq_unfreeze_queue(q);
2469 }
2470}
2471
2472static void blk_mq_del_queue_tag_set(struct request_queue *q)
2473{
2474 struct blk_mq_tag_set *set = q->tag_set;
2475
Jens Axboe0d2602c2014-05-13 15:10:52 -06002476 mutex_lock(&set->tag_list_lock);
Bart Van Assche705cda92017-04-07 11:16:49 -07002477 list_del_rcu(&q->tag_set_list);
2478 INIT_LIST_HEAD(&q->tag_set_list);
Jeff Moyer2404e602015-11-03 10:40:06 -05002479 if (list_is_singular(&set->tag_list)) {
2480 /* just transitioned to unshared */
2481 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2482 /* update existing queue */
2483 blk_mq_update_tag_set_depth(set, false);
2484 }
Jens Axboe0d2602c2014-05-13 15:10:52 -06002485 mutex_unlock(&set->tag_list_lock);
Bart Van Assche705cda92017-04-07 11:16:49 -07002486
2487 synchronize_rcu();
Jens Axboe0d2602c2014-05-13 15:10:52 -06002488}
2489
2490static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2491 struct request_queue *q)
2492{
2493 q->tag_set = set;
2494
2495 mutex_lock(&set->tag_list_lock);
Jeff Moyer2404e602015-11-03 10:40:06 -05002496
Jens Axboeff821d22017-11-10 22:05:12 -07002497 /*
2498 * Check to see if we're transitioning to shared (from 1 to 2 queues).
2499 */
2500 if (!list_empty(&set->tag_list) &&
2501 !(set->flags & BLK_MQ_F_TAG_SHARED)) {
Jeff Moyer2404e602015-11-03 10:40:06 -05002502 set->flags |= BLK_MQ_F_TAG_SHARED;
2503 /* update existing queue */
2504 blk_mq_update_tag_set_depth(set, true);
2505 }
2506 if (set->flags & BLK_MQ_F_TAG_SHARED)
2507 queue_set_hctx_shared(q, true);
Bart Van Assche705cda92017-04-07 11:16:49 -07002508 list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
Jeff Moyer2404e602015-11-03 10:40:06 -05002509
Jens Axboe0d2602c2014-05-13 15:10:52 -06002510 mutex_unlock(&set->tag_list_lock);
2511}
2512
Ming Leie09aae72015-01-29 20:17:27 +08002513/*
2514 * It is the actual release handler for mq, but we do it from
2515 * request queue's release handler for avoiding use-after-free
2516 * and headache because q->mq_kobj shouldn't have been introduced,
2517 * but we can't group ctx/kctx kobj without it.
2518 */
2519void blk_mq_release(struct request_queue *q)
2520{
2521 struct blk_mq_hw_ctx *hctx;
2522 unsigned int i;
2523
2524 /* hctx kobj stays in hctx */
Ming Leic3b4afc2015-06-04 22:25:04 +08002525 queue_for_each_hw_ctx(q, hctx, i) {
2526 if (!hctx)
2527 continue;
Ming Lei6c8b2322017-02-22 18:14:01 +08002528 kobject_put(&hctx->kobj);
Ming Leic3b4afc2015-06-04 22:25:04 +08002529 }
Ming Leie09aae72015-01-29 20:17:27 +08002530
Akinobu Mitaa723bab2015-09-27 02:09:21 +09002531 q->mq_map = NULL;
2532
Ming Leie09aae72015-01-29 20:17:27 +08002533 kfree(q->queue_hw_ctx);
2534
Ming Lei7ea5fe32017-02-22 18:14:00 +08002535 /*
2536 * release .mq_kobj and sw queue's kobject now because
2537 * both share lifetime with request queue.
2538 */
2539 blk_mq_sysfs_deinit(q);
2540
Ming Leie09aae72015-01-29 20:17:27 +08002541 free_percpu(q->queue_ctx);
2542}
2543
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002544struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
Jens Axboe320ae512013-10-24 09:20:05 +01002545{
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002546 struct request_queue *uninit_q, *q;
2547
2548 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2549 if (!uninit_q)
2550 return ERR_PTR(-ENOMEM);
2551
2552 q = blk_mq_init_allocated_queue(set, uninit_q);
2553 if (IS_ERR(q))
2554 blk_cleanup_queue(uninit_q);
2555
2556 return q;
2557}
2558EXPORT_SYMBOL(blk_mq_init_queue);
2559
Bart Van Assche07319672017-06-20 11:15:38 -07002560static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2561{
2562 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2563
Tejun Heo05707b62018-01-09 08:29:53 -08002564 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
Bart Van Assche07319672017-06-20 11:15:38 -07002565 __alignof__(struct blk_mq_hw_ctx)) !=
2566 sizeof(struct blk_mq_hw_ctx));
2567
2568 if (tag_set->flags & BLK_MQ_F_BLOCKING)
2569 hw_ctx_size += sizeof(struct srcu_struct);
2570
2571 return hw_ctx_size;
2572}
2573
Keith Busch868f2f02015-12-17 17:08:14 -07002574static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2575 struct request_queue *q)
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002576{
Keith Busch868f2f02015-12-17 17:08:14 -07002577 int i, j;
2578 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +01002579
Keith Busch868f2f02015-12-17 17:08:14 -07002580 blk_mq_sysfs_unregister(q);
Ming Leifb350e02018-01-06 16:27:40 +08002581
2582 /* protect against switching io scheduler */
2583 mutex_lock(&q->sysfs_lock);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002584 for (i = 0; i < set->nr_hw_queues; i++) {
Keith Busch868f2f02015-12-17 17:08:14 -07002585 int node;
Jens Axboef14bbe72014-05-27 12:06:53 -06002586
Keith Busch868f2f02015-12-17 17:08:14 -07002587 if (hctxs[i])
2588 continue;
2589
2590 node = blk_mq_hw_queue_to_node(q->mq_map, i);
Bart Van Assche07319672017-06-20 11:15:38 -07002591 hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
Christoph Hellwigcdef54d2014-05-28 18:11:06 +02002592 GFP_KERNEL, node);
Jens Axboe320ae512013-10-24 09:20:05 +01002593 if (!hctxs[i])
Keith Busch868f2f02015-12-17 17:08:14 -07002594 break;
Jens Axboe320ae512013-10-24 09:20:05 +01002595
Jens Axboea86073e2014-10-13 15:41:54 -06002596 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
Keith Busch868f2f02015-12-17 17:08:14 -07002597 node)) {
2598 kfree(hctxs[i]);
2599 hctxs[i] = NULL;
2600 break;
2601 }
Jens Axboee4043dc2014-04-09 10:18:23 -06002602
Jens Axboe0d2602c2014-05-13 15:10:52 -06002603 atomic_set(&hctxs[i]->nr_active, 0);
Jens Axboef14bbe72014-05-27 12:06:53 -06002604 hctxs[i]->numa_node = node;
Jens Axboe320ae512013-10-24 09:20:05 +01002605 hctxs[i]->queue_num = i;
Keith Busch868f2f02015-12-17 17:08:14 -07002606
2607 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2608 free_cpumask_var(hctxs[i]->cpumask);
2609 kfree(hctxs[i]);
2610 hctxs[i] = NULL;
2611 break;
2612 }
2613 blk_mq_hctx_kobj_init(hctxs[i]);
Jens Axboe320ae512013-10-24 09:20:05 +01002614 }
Keith Busch868f2f02015-12-17 17:08:14 -07002615 for (j = i; j < q->nr_hw_queues; j++) {
2616 struct blk_mq_hw_ctx *hctx = hctxs[j];
2617
2618 if (hctx) {
Jens Axboecc71a6f2017-01-11 14:29:56 -07002619 if (hctx->tags)
2620 blk_mq_free_map_and_requests(set, j);
Keith Busch868f2f02015-12-17 17:08:14 -07002621 blk_mq_exit_hctx(q, set, hctx, j);
Keith Busch868f2f02015-12-17 17:08:14 -07002622 kobject_put(&hctx->kobj);
Keith Busch868f2f02015-12-17 17:08:14 -07002623 hctxs[j] = NULL;
2624
2625 }
2626 }
2627 q->nr_hw_queues = i;
Ming Leifb350e02018-01-06 16:27:40 +08002628 mutex_unlock(&q->sysfs_lock);
Keith Busch868f2f02015-12-17 17:08:14 -07002629 blk_mq_sysfs_register(q);
2630}
2631
2632struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2633 struct request_queue *q)
2634{
Ming Lei66841672016-02-12 15:27:00 +08002635 /* mark the queue as mq asap */
2636 q->mq_ops = set->ops;
2637
Omar Sandoval34dbad52017-03-21 08:56:08 -07002638 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
Stephen Bates720b8cc2017-04-07 06:24:03 -06002639 blk_mq_poll_stats_bkt,
2640 BLK_MQ_POLL_STATS_BKTS, q);
Omar Sandoval34dbad52017-03-21 08:56:08 -07002641 if (!q->poll_cb)
2642 goto err_exit;
2643
Keith Busch868f2f02015-12-17 17:08:14 -07002644 q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2645 if (!q->queue_ctx)
Ming Linc7de5722016-05-25 23:23:27 -07002646 goto err_exit;
Keith Busch868f2f02015-12-17 17:08:14 -07002647
Ming Lei737f98c2017-02-22 18:13:59 +08002648 /* init q->mq_kobj and sw queues' kobjects */
2649 blk_mq_sysfs_init(q);
2650
Keith Busch868f2f02015-12-17 17:08:14 -07002651 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2652 GFP_KERNEL, set->numa_node);
2653 if (!q->queue_hw_ctx)
2654 goto err_percpu;
2655
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002656 q->mq_map = set->mq_map;
Keith Busch868f2f02015-12-17 17:08:14 -07002657
2658 blk_mq_realloc_hw_ctxs(set, q);
2659 if (!q->nr_hw_queues)
2660 goto err_hctxs;
Jens Axboe320ae512013-10-24 09:20:05 +01002661
Christoph Hellwig287922e2015-10-30 20:57:30 +08002662 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
Ming Leie56f6982015-07-16 19:53:22 +08002663 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
Jens Axboe320ae512013-10-24 09:20:05 +01002664
2665 q->nr_queues = nr_cpu_ids;
Jens Axboe320ae512013-10-24 09:20:05 +01002666
Jens Axboe94eddfb2013-11-19 09:25:07 -07002667 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
Jens Axboe320ae512013-10-24 09:20:05 +01002668
Jens Axboe05f1dd52014-05-29 09:53:32 -06002669 if (!(set->flags & BLK_MQ_F_SG_MERGE))
2670 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2671
Christoph Hellwig1be036e2014-02-07 10:22:39 -08002672 q->sg_reserved_size = INT_MAX;
2673
Mike Snitzer28494502016-09-14 13:28:30 -04002674 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -06002675 INIT_LIST_HEAD(&q->requeue_list);
2676 spin_lock_init(&q->requeue_lock);
2677
Christoph Hellwig254d2592017-03-22 15:01:50 -04002678 blk_queue_make_request(q, blk_mq_make_request);
Christoph Hellwigea435e12017-11-02 21:29:54 +03002679 if (q->mq_ops->poll)
2680 q->poll_fn = blk_mq_poll;
Jens Axboe07068d52014-05-22 10:40:51 -06002681
Jens Axboeeba71762014-05-20 15:17:27 -06002682 /*
2683 * Do this after blk_queue_make_request() overrides it...
2684 */
2685 q->nr_requests = set->queue_depth;
2686
Jens Axboe64f1c212016-11-14 13:03:03 -07002687 /*
2688 * Default to classic polling
2689 */
2690 q->poll_nsec = -1;
2691
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002692 if (set->ops->complete)
2693 blk_queue_softirq_done(q, set->ops->complete);
Christoph Hellwig30a91cb2014-02-10 03:24:38 -08002694
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002695 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002696 blk_mq_add_queue_tag_set(set, q);
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002697 blk_mq_map_swqueue(q);
Akinobu Mita4593fdb2015-09-27 02:09:20 +09002698
Jens Axboed3484992017-01-13 14:43:58 -07002699 if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2700 int ret;
2701
2702 ret = blk_mq_sched_init(q);
2703 if (ret)
2704 return ERR_PTR(ret);
2705 }
2706
Jens Axboe320ae512013-10-24 09:20:05 +01002707 return q;
Christoph Hellwig18741982014-02-10 09:29:00 -07002708
Jens Axboe320ae512013-10-24 09:20:05 +01002709err_hctxs:
Keith Busch868f2f02015-12-17 17:08:14 -07002710 kfree(q->queue_hw_ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01002711err_percpu:
Keith Busch868f2f02015-12-17 17:08:14 -07002712 free_percpu(q->queue_ctx);
Ming Linc7de5722016-05-25 23:23:27 -07002713err_exit:
2714 q->mq_ops = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01002715 return ERR_PTR(-ENOMEM);
2716}
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002717EXPORT_SYMBOL(blk_mq_init_allocated_queue);
Jens Axboe320ae512013-10-24 09:20:05 +01002718
2719void blk_mq_free_queue(struct request_queue *q)
2720{
Ming Lei624dbe42014-05-27 23:35:13 +08002721 struct blk_mq_tag_set *set = q->tag_set;
Jens Axboe320ae512013-10-24 09:20:05 +01002722
Jens Axboe0d2602c2014-05-13 15:10:52 -06002723 blk_mq_del_queue_tag_set(q);
Ming Lei624dbe42014-05-27 23:35:13 +08002724 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01002725}
Jens Axboe320ae512013-10-24 09:20:05 +01002726
2727/* Basically redo blk_mq_init_queue with queue frozen */
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002728static void blk_mq_queue_reinit(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +01002729{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +02002730 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
Jens Axboe320ae512013-10-24 09:20:05 +01002731
Omar Sandoval9c1051a2017-05-04 08:17:21 -06002732 blk_mq_debugfs_unregister_hctxs(q);
Jens Axboe67aec142014-05-30 08:25:36 -06002733 blk_mq_sysfs_unregister(q);
2734
Jens Axboe320ae512013-10-24 09:20:05 +01002735 /*
2736 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
Jens Axboeff821d22017-11-10 22:05:12 -07002737 * we should change hctx numa_node according to the new topology (this
2738 * involves freeing and re-allocating memory, worth doing?)
Jens Axboe320ae512013-10-24 09:20:05 +01002739 */
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002740 blk_mq_map_swqueue(q);
Jens Axboe320ae512013-10-24 09:20:05 +01002741
Jens Axboe67aec142014-05-30 08:25:36 -06002742 blk_mq_sysfs_register(q);
Omar Sandoval9c1051a2017-05-04 08:17:21 -06002743 blk_mq_debugfs_register_hctxs(q);
Jens Axboe320ae512013-10-24 09:20:05 +01002744}
2745
Jens Axboea5164402014-09-10 09:02:03 -06002746static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2747{
2748 int i;
2749
Jens Axboecc71a6f2017-01-11 14:29:56 -07002750 for (i = 0; i < set->nr_hw_queues; i++)
2751 if (!__blk_mq_alloc_rq_map(set, i))
Jens Axboea5164402014-09-10 09:02:03 -06002752 goto out_unwind;
Jens Axboea5164402014-09-10 09:02:03 -06002753
2754 return 0;
2755
2756out_unwind:
2757 while (--i >= 0)
Jens Axboecc71a6f2017-01-11 14:29:56 -07002758 blk_mq_free_rq_map(set->tags[i]);
Jens Axboea5164402014-09-10 09:02:03 -06002759
Jens Axboea5164402014-09-10 09:02:03 -06002760 return -ENOMEM;
2761}
2762
2763/*
2764 * Allocate the request maps associated with this tag_set. Note that this
2765 * may reduce the depth asked for, if memory is tight. set->queue_depth
2766 * will be updated to reflect the allocated depth.
2767 */
2768static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2769{
2770 unsigned int depth;
2771 int err;
2772
2773 depth = set->queue_depth;
2774 do {
2775 err = __blk_mq_alloc_rq_maps(set);
2776 if (!err)
2777 break;
2778
2779 set->queue_depth >>= 1;
2780 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2781 err = -ENOMEM;
2782 break;
2783 }
2784 } while (set->queue_depth);
2785
2786 if (!set->queue_depth || err) {
2787 pr_err("blk-mq: failed to allocate request map\n");
2788 return -ENOMEM;
2789 }
2790
2791 if (depth != set->queue_depth)
2792 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2793 depth, set->queue_depth);
2794
2795 return 0;
2796}
2797
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002798static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2799{
Ming Lei7d4901a2018-01-06 16:27:39 +08002800 if (set->ops->map_queues) {
2801 int cpu;
2802 /*
2803 * transport .map_queues is usually done in the following
2804 * way:
2805 *
2806 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
2807 * mask = get_cpu_mask(queue)
2808 * for_each_cpu(cpu, mask)
2809 * set->mq_map[cpu] = queue;
2810 * }
2811 *
2812 * When we need to remap, the table has to be cleared for
2813 * killing stale mapping since one CPU may not be mapped
2814 * to any hw queue.
2815 */
2816 for_each_possible_cpu(cpu)
2817 set->mq_map[cpu] = 0;
2818
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002819 return set->ops->map_queues(set);
Ming Lei7d4901a2018-01-06 16:27:39 +08002820 } else
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002821 return blk_mq_map_queues(set);
2822}
2823
Jens Axboea4391c62014-06-05 15:21:56 -06002824/*
2825 * Alloc a tag set to be associated with one or more request queues.
2826 * May fail with EINVAL for various error conditions. May adjust the
2827 * requested depth down, if if it too large. In that case, the set
2828 * value will be stored in set->queue_depth.
2829 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002830int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2831{
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002832 int ret;
2833
Bart Van Assche205fb5f2014-10-30 14:45:11 +01002834 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2835
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002836 if (!set->nr_hw_queues)
2837 return -EINVAL;
Jens Axboea4391c62014-06-05 15:21:56 -06002838 if (!set->queue_depth)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002839 return -EINVAL;
2840 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2841 return -EINVAL;
2842
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02002843 if (!set->ops->queue_rq)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002844 return -EINVAL;
2845
Ming Leide148292017-10-14 17:22:29 +08002846 if (!set->ops->get_budget ^ !set->ops->put_budget)
2847 return -EINVAL;
2848
Jens Axboea4391c62014-06-05 15:21:56 -06002849 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2850 pr_info("blk-mq: reduced tag depth to %u\n",
2851 BLK_MQ_MAX_DEPTH);
2852 set->queue_depth = BLK_MQ_MAX_DEPTH;
2853 }
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002854
Shaohua Li6637fad2014-11-30 16:00:58 -08002855 /*
2856 * If a crashdump is active, then we are potentially in a very
2857 * memory constrained environment. Limit us to 1 queue and
2858 * 64 tags to prevent using too much memory.
2859 */
2860 if (is_kdump_kernel()) {
2861 set->nr_hw_queues = 1;
2862 set->queue_depth = min(64U, set->queue_depth);
2863 }
Keith Busch868f2f02015-12-17 17:08:14 -07002864 /*
2865 * There is no use for more h/w queues than cpus.
2866 */
2867 if (set->nr_hw_queues > nr_cpu_ids)
2868 set->nr_hw_queues = nr_cpu_ids;
Shaohua Li6637fad2014-11-30 16:00:58 -08002869
Keith Busch868f2f02015-12-17 17:08:14 -07002870 set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002871 GFP_KERNEL, set->numa_node);
2872 if (!set->tags)
Jens Axboea5164402014-09-10 09:02:03 -06002873 return -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002874
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002875 ret = -ENOMEM;
2876 set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2877 GFP_KERNEL, set->numa_node);
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002878 if (!set->mq_map)
2879 goto out_free_tags;
2880
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002881 ret = blk_mq_update_queue_map(set);
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002882 if (ret)
2883 goto out_free_mq_map;
2884
2885 ret = blk_mq_alloc_rq_maps(set);
2886 if (ret)
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002887 goto out_free_mq_map;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002888
Jens Axboe0d2602c2014-05-13 15:10:52 -06002889 mutex_init(&set->tag_list_lock);
2890 INIT_LIST_HEAD(&set->tag_list);
2891
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002892 return 0;
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002893
2894out_free_mq_map:
2895 kfree(set->mq_map);
2896 set->mq_map = NULL;
2897out_free_tags:
Robert Elliott5676e7b2014-09-02 11:38:44 -05002898 kfree(set->tags);
2899 set->tags = NULL;
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002900 return ret;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002901}
2902EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2903
2904void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2905{
2906 int i;
2907
Jens Axboecc71a6f2017-01-11 14:29:56 -07002908 for (i = 0; i < nr_cpu_ids; i++)
2909 blk_mq_free_map_and_requests(set, i);
Jens Axboe484b4062014-05-21 14:01:15 -06002910
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002911 kfree(set->mq_map);
2912 set->mq_map = NULL;
2913
Ming Lei981bd182014-04-24 00:07:34 +08002914 kfree(set->tags);
Robert Elliott5676e7b2014-09-02 11:38:44 -05002915 set->tags = NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002916}
2917EXPORT_SYMBOL(blk_mq_free_tag_set);
2918
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002919int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2920{
2921 struct blk_mq_tag_set *set = q->tag_set;
2922 struct blk_mq_hw_ctx *hctx;
2923 int i, ret;
2924
Jens Axboebd166ef2017-01-17 06:03:22 -07002925 if (!set)
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002926 return -EINVAL;
2927
Jens Axboe70f36b62017-01-19 10:59:07 -07002928 blk_mq_freeze_queue(q);
Ming Lei24f5a902018-01-06 16:27:38 +08002929 blk_mq_quiesce_queue(q);
Jens Axboe70f36b62017-01-19 10:59:07 -07002930
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002931 ret = 0;
2932 queue_for_each_hw_ctx(q, hctx, i) {
Keith Busche9137d42016-02-18 14:56:35 -07002933 if (!hctx->tags)
2934 continue;
Jens Axboebd166ef2017-01-17 06:03:22 -07002935 /*
2936 * If we're using an MQ scheduler, just update the scheduler
2937 * queue depth. This is similar to what the old code would do.
2938 */
Jens Axboe70f36b62017-01-19 10:59:07 -07002939 if (!hctx->sched_tags) {
weiping zhangc2e82a22017-09-22 23:36:28 +08002940 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
Jens Axboe70f36b62017-01-19 10:59:07 -07002941 false);
2942 } else {
2943 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2944 nr, true);
2945 }
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002946 if (ret)
2947 break;
2948 }
2949
2950 if (!ret)
2951 q->nr_requests = nr;
2952
Ming Lei24f5a902018-01-06 16:27:38 +08002953 blk_mq_unquiesce_queue(q);
Jens Axboe70f36b62017-01-19 10:59:07 -07002954 blk_mq_unfreeze_queue(q);
Jens Axboe70f36b62017-01-19 10:59:07 -07002955
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002956 return ret;
2957}
2958
Keith Busche4dc2b32017-05-30 14:39:11 -04002959static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2960 int nr_hw_queues)
Keith Busch868f2f02015-12-17 17:08:14 -07002961{
2962 struct request_queue *q;
2963
Bart Van Assche705cda92017-04-07 11:16:49 -07002964 lockdep_assert_held(&set->tag_list_lock);
2965
Keith Busch868f2f02015-12-17 17:08:14 -07002966 if (nr_hw_queues > nr_cpu_ids)
2967 nr_hw_queues = nr_cpu_ids;
2968 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2969 return;
2970
2971 list_for_each_entry(q, &set->tag_list, tag_set_list)
2972 blk_mq_freeze_queue(q);
2973
2974 set->nr_hw_queues = nr_hw_queues;
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002975 blk_mq_update_queue_map(set);
Keith Busch868f2f02015-12-17 17:08:14 -07002976 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2977 blk_mq_realloc_hw_ctxs(set, q);
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002978 blk_mq_queue_reinit(q);
Keith Busch868f2f02015-12-17 17:08:14 -07002979 }
2980
2981 list_for_each_entry(q, &set->tag_list, tag_set_list)
2982 blk_mq_unfreeze_queue(q);
2983}
Keith Busche4dc2b32017-05-30 14:39:11 -04002984
2985void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2986{
2987 mutex_lock(&set->tag_list_lock);
2988 __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
2989 mutex_unlock(&set->tag_list_lock);
2990}
Keith Busch868f2f02015-12-17 17:08:14 -07002991EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2992
Omar Sandoval34dbad52017-03-21 08:56:08 -07002993/* Enable polling stats and return whether they were already enabled. */
2994static bool blk_poll_stats_enable(struct request_queue *q)
2995{
2996 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2997 test_and_set_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
2998 return true;
2999 blk_stat_add_callback(q, q->poll_cb);
3000 return false;
3001}
3002
3003static void blk_mq_poll_stats_start(struct request_queue *q)
3004{
3005 /*
3006 * We don't arm the callback if polling stats are not enabled or the
3007 * callback is already active.
3008 */
3009 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3010 blk_stat_is_active(q->poll_cb))
3011 return;
3012
3013 blk_stat_activate_msecs(q->poll_cb, 100);
3014}
3015
3016static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
3017{
3018 struct request_queue *q = cb->data;
Stephen Bates720b8cc2017-04-07 06:24:03 -06003019 int bucket;
Omar Sandoval34dbad52017-03-21 08:56:08 -07003020
Stephen Bates720b8cc2017-04-07 06:24:03 -06003021 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
3022 if (cb->stat[bucket].nr_samples)
3023 q->poll_stat[bucket] = cb->stat[bucket];
3024 }
Omar Sandoval34dbad52017-03-21 08:56:08 -07003025}
3026
Jens Axboe64f1c212016-11-14 13:03:03 -07003027static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
3028 struct blk_mq_hw_ctx *hctx,
3029 struct request *rq)
3030{
Jens Axboe64f1c212016-11-14 13:03:03 -07003031 unsigned long ret = 0;
Stephen Bates720b8cc2017-04-07 06:24:03 -06003032 int bucket;
Jens Axboe64f1c212016-11-14 13:03:03 -07003033
3034 /*
3035 * If stats collection isn't on, don't sleep but turn it on for
3036 * future users
3037 */
Omar Sandoval34dbad52017-03-21 08:56:08 -07003038 if (!blk_poll_stats_enable(q))
Jens Axboe64f1c212016-11-14 13:03:03 -07003039 return 0;
3040
3041 /*
Jens Axboe64f1c212016-11-14 13:03:03 -07003042 * As an optimistic guess, use half of the mean service time
3043 * for this type of request. We can (and should) make this smarter.
3044 * For instance, if the completion latencies are tight, we can
3045 * get closer than just half the mean. This is especially
3046 * important on devices where the completion latencies are longer
Stephen Bates720b8cc2017-04-07 06:24:03 -06003047 * than ~10 usec. We do use the stats for the relevant IO size
3048 * if available which does lead to better estimates.
Jens Axboe64f1c212016-11-14 13:03:03 -07003049 */
Stephen Bates720b8cc2017-04-07 06:24:03 -06003050 bucket = blk_mq_poll_stats_bkt(rq);
3051 if (bucket < 0)
3052 return ret;
3053
3054 if (q->poll_stat[bucket].nr_samples)
3055 ret = (q->poll_stat[bucket].mean + 1) / 2;
Jens Axboe64f1c212016-11-14 13:03:03 -07003056
3057 return ret;
3058}
3059
Jens Axboe06426ad2016-11-14 13:01:59 -07003060static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
Jens Axboe64f1c212016-11-14 13:03:03 -07003061 struct blk_mq_hw_ctx *hctx,
Jens Axboe06426ad2016-11-14 13:01:59 -07003062 struct request *rq)
3063{
3064 struct hrtimer_sleeper hs;
3065 enum hrtimer_mode mode;
Jens Axboe64f1c212016-11-14 13:03:03 -07003066 unsigned int nsecs;
Jens Axboe06426ad2016-11-14 13:01:59 -07003067 ktime_t kt;
3068
Jens Axboe76a86f92018-01-10 11:30:56 -07003069 if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
Jens Axboe64f1c212016-11-14 13:03:03 -07003070 return false;
3071
3072 /*
3073 * poll_nsec can be:
3074 *
3075 * -1: don't ever hybrid sleep
3076 * 0: use half of prev avg
3077 * >0: use this specific value
3078 */
3079 if (q->poll_nsec == -1)
3080 return false;
3081 else if (q->poll_nsec > 0)
3082 nsecs = q->poll_nsec;
3083 else
3084 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
3085
3086 if (!nsecs)
Jens Axboe06426ad2016-11-14 13:01:59 -07003087 return false;
3088
Jens Axboe76a86f92018-01-10 11:30:56 -07003089 rq->rq_flags |= RQF_MQ_POLL_SLEPT;
Jens Axboe06426ad2016-11-14 13:01:59 -07003090
3091 /*
3092 * This will be replaced with the stats tracking code, using
3093 * 'avg_completion_time / 2' as the pre-sleep target.
3094 */
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01003095 kt = nsecs;
Jens Axboe06426ad2016-11-14 13:01:59 -07003096
3097 mode = HRTIMER_MODE_REL;
3098 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
3099 hrtimer_set_expires(&hs.timer, kt);
3100
3101 hrtimer_init_sleeper(&hs, current);
3102 do {
Tejun Heo5a61c362018-01-09 08:29:52 -08003103 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
Jens Axboe06426ad2016-11-14 13:01:59 -07003104 break;
3105 set_current_state(TASK_UNINTERRUPTIBLE);
3106 hrtimer_start_expires(&hs.timer, mode);
3107 if (hs.task)
3108 io_schedule();
3109 hrtimer_cancel(&hs.timer);
3110 mode = HRTIMER_MODE_ABS;
3111 } while (hs.task && !signal_pending(current));
3112
3113 __set_current_state(TASK_RUNNING);
3114 destroy_hrtimer_on_stack(&hs.timer);
3115 return true;
3116}
3117
Jens Axboebbd7bb72016-11-04 09:34:34 -06003118static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
3119{
3120 struct request_queue *q = hctx->queue;
3121 long state;
3122
Jens Axboe06426ad2016-11-14 13:01:59 -07003123 /*
3124 * If we sleep, have the caller restart the poll loop to reset
3125 * the state. Like for the other success return cases, the
3126 * caller is responsible for checking if the IO completed. If
3127 * the IO isn't complete, we'll get called again and will go
3128 * straight to the busy poll loop.
3129 */
Jens Axboe64f1c212016-11-14 13:03:03 -07003130 if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
Jens Axboe06426ad2016-11-14 13:01:59 -07003131 return true;
3132
Jens Axboebbd7bb72016-11-04 09:34:34 -06003133 hctx->poll_considered++;
3134
3135 state = current->state;
3136 while (!need_resched()) {
3137 int ret;
3138
3139 hctx->poll_invoked++;
3140
3141 ret = q->mq_ops->poll(hctx, rq->tag);
3142 if (ret > 0) {
3143 hctx->poll_success++;
3144 set_current_state(TASK_RUNNING);
3145 return true;
3146 }
3147
3148 if (signal_pending_state(state, current))
3149 set_current_state(TASK_RUNNING);
3150
3151 if (current->state == TASK_RUNNING)
3152 return true;
3153 if (ret < 0)
3154 break;
3155 cpu_relax();
3156 }
3157
3158 return false;
3159}
3160
Christoph Hellwigea435e12017-11-02 21:29:54 +03003161static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
Jens Axboebbd7bb72016-11-04 09:34:34 -06003162{
3163 struct blk_mq_hw_ctx *hctx;
Jens Axboebbd7bb72016-11-04 09:34:34 -06003164 struct request *rq;
3165
Christoph Hellwigea435e12017-11-02 21:29:54 +03003166 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
Jens Axboebbd7bb72016-11-04 09:34:34 -06003167 return false;
3168
Jens Axboebbd7bb72016-11-04 09:34:34 -06003169 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
Jens Axboebd166ef2017-01-17 06:03:22 -07003170 if (!blk_qc_t_is_internal(cookie))
3171 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
Jens Axboe3a07bb12017-04-20 14:53:28 -06003172 else {
Jens Axboebd166ef2017-01-17 06:03:22 -07003173 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
Jens Axboe3a07bb12017-04-20 14:53:28 -06003174 /*
3175 * With scheduling, if the request has completed, we'll
3176 * get a NULL return here, as we clear the sched tag when
3177 * that happens. The request still remains valid, like always,
3178 * so we should be safe with just the NULL check.
3179 */
3180 if (!rq)
3181 return false;
3182 }
Jens Axboebbd7bb72016-11-04 09:34:34 -06003183
3184 return __blk_mq_poll(hctx, rq);
3185}
Jens Axboebbd7bb72016-11-04 09:34:34 -06003186
Jens Axboe320ae512013-10-24 09:20:05 +01003187static int __init blk_mq_init(void)
3188{
Thomas Gleixner9467f852016-09-22 08:05:17 -06003189 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
3190 blk_mq_hctx_notify_dead);
Jens Axboe320ae512013-10-24 09:20:05 +01003191 return 0;
3192}
3193subsys_initcall(blk_mq_init);