blob: 6cef42f419a57d43ece64983915f3aefce7fefcf [file] [log] [blame]
Jens Axboe75bb4622014-05-28 10:15:41 -06001/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
Jens Axboe320ae512013-10-24 09:20:05 +01007#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/backing-dev.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
Catalin Marinasf75782e2015-09-14 18:16:02 +010012#include <linux/kmemleak.h>
Jens Axboe320ae512013-10-24 09:20:05 +010013#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/workqueue.h>
17#include <linux/smp.h>
18#include <linux/llist.h>
19#include <linux/list_sort.h>
20#include <linux/cpu.h>
21#include <linux/cache.h>
22#include <linux/sched/sysctl.h>
Ingo Molnar105ab3d2017-02-01 16:36:40 +010023#include <linux/sched/topology.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
Jens Axboe320ae512013-10-24 09:20:05 +010025#include <linux/delay.h>
Jens Axboeaedcd722014-09-17 08:27:03 -060026#include <linux/crash_dump.h>
Jens Axboe88c7b2b2016-08-25 08:07:30 -060027#include <linux/prefetch.h>
Jens Axboe320ae512013-10-24 09:20:05 +010028
29#include <trace/events/block.h>
30
31#include <linux/blk-mq.h>
32#include "blk.h"
33#include "blk-mq.h"
Omar Sandoval9c1051a2017-05-04 08:17:21 -060034#include "blk-mq-debugfs.h"
Jens Axboe320ae512013-10-24 09:20:05 +010035#include "blk-mq-tag.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -070036#include "blk-stat.h"
Jens Axboe87760e52016-11-09 12:38:14 -070037#include "blk-wbt.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070038#include "blk-mq-sched.h"
Jens Axboe320ae512013-10-24 09:20:05 +010039
Omar Sandoval34dbad52017-03-21 08:56:08 -070040static void blk_mq_poll_stats_start(struct request_queue *q);
41static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
42
Stephen Bates720b8cc2017-04-07 06:24:03 -060043static int blk_mq_poll_stats_bkt(const struct request *rq)
44{
45 int ddir, bytes, bucket;
46
Jens Axboe99c749a2017-04-21 07:55:42 -060047 ddir = rq_data_dir(rq);
Stephen Bates720b8cc2017-04-07 06:24:03 -060048 bytes = blk_rq_bytes(rq);
49
50 bucket = ddir + 2*(ilog2(bytes) - 9);
51
52 if (bucket < 0)
53 return -1;
54 else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
55 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
56
57 return bucket;
58}
59
Jens Axboe320ae512013-10-24 09:20:05 +010060/*
61 * Check if any of the ctx's have pending work in this hardware queue
62 */
Jens Axboe50e1dab2017-01-26 14:42:34 -070063bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
Jens Axboe320ae512013-10-24 09:20:05 +010064{
Jens Axboebd166ef2017-01-17 06:03:22 -070065 return sbitmap_any_bit_set(&hctx->ctx_map) ||
66 !list_empty_careful(&hctx->dispatch) ||
67 blk_mq_sched_has_work(hctx);
Jens Axboe320ae512013-10-24 09:20:05 +010068}
69
70/*
71 * Mark this ctx as having pending work in this hardware queue
72 */
73static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
74 struct blk_mq_ctx *ctx)
75{
Omar Sandoval88459642016-09-17 08:38:44 -060076 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
77 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
Jens Axboe1429d7c2014-05-19 09:23:55 -060078}
79
80static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
81 struct blk_mq_ctx *ctx)
82{
Omar Sandoval88459642016-09-17 08:38:44 -060083 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
Jens Axboe320ae512013-10-24 09:20:05 +010084}
85
Ming Lei1671d522017-03-27 20:06:57 +080086void blk_freeze_queue_start(struct request_queue *q)
Ming Lei43a5e4e2013-12-26 21:31:35 +080087{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +020088 int freeze_depth;
Tejun Heocddd5d12014-08-16 08:02:24 -040089
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +020090 freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
91 if (freeze_depth == 1) {
Dan Williams3ef28e82015-10-21 13:20:12 -040092 percpu_ref_kill(&q->q_usage_counter);
Mike Snitzerb94ec292015-03-11 23:56:38 -040093 blk_mq_run_hw_queues(q, false);
Tejun Heocddd5d12014-08-16 08:02:24 -040094 }
Tejun Heof3af0202014-11-04 13:52:27 -050095}
Ming Lei1671d522017-03-27 20:06:57 +080096EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
Tejun Heof3af0202014-11-04 13:52:27 -050097
Keith Busch6bae3632017-03-01 14:22:10 -050098void blk_mq_freeze_queue_wait(struct request_queue *q)
Tejun Heof3af0202014-11-04 13:52:27 -050099{
Dan Williams3ef28e82015-10-21 13:20:12 -0400100 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
Ming Lei43a5e4e2013-12-26 21:31:35 +0800101}
Keith Busch6bae3632017-03-01 14:22:10 -0500102EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
Ming Lei43a5e4e2013-12-26 21:31:35 +0800103
Keith Buschf91328c2017-03-01 14:22:11 -0500104int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
105 unsigned long timeout)
106{
107 return wait_event_timeout(q->mq_freeze_wq,
108 percpu_ref_is_zero(&q->q_usage_counter),
109 timeout);
110}
111EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
Jens Axboe320ae512013-10-24 09:20:05 +0100112
Tejun Heof3af0202014-11-04 13:52:27 -0500113/*
114 * Guarantee no request is in use, so we can change any data structure of
115 * the queue afterward.
116 */
Dan Williams3ef28e82015-10-21 13:20:12 -0400117void blk_freeze_queue(struct request_queue *q)
Tejun Heof3af0202014-11-04 13:52:27 -0500118{
Dan Williams3ef28e82015-10-21 13:20:12 -0400119 /*
120 * In the !blk_mq case we are only calling this to kill the
121 * q_usage_counter, otherwise this increases the freeze depth
122 * and waits for it to return to zero. For this reason there is
123 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
124 * exported to drivers as the only user for unfreeze is blk_mq.
125 */
Ming Lei1671d522017-03-27 20:06:57 +0800126 blk_freeze_queue_start(q);
Tejun Heof3af0202014-11-04 13:52:27 -0500127 blk_mq_freeze_queue_wait(q);
128}
Dan Williams3ef28e82015-10-21 13:20:12 -0400129
130void blk_mq_freeze_queue(struct request_queue *q)
131{
132 /*
133 * ...just an alias to keep freeze and unfreeze actions balanced
134 * in the blk_mq_* namespace
135 */
136 blk_freeze_queue(q);
137}
Jens Axboec761d962015-01-02 15:05:12 -0700138EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
Tejun Heof3af0202014-11-04 13:52:27 -0500139
Keith Buschb4c6a022014-12-19 17:54:14 -0700140void blk_mq_unfreeze_queue(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +0100141{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200142 int freeze_depth;
Jens Axboe320ae512013-10-24 09:20:05 +0100143
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200144 freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
145 WARN_ON_ONCE(freeze_depth < 0);
146 if (!freeze_depth) {
Dan Williams3ef28e82015-10-21 13:20:12 -0400147 percpu_ref_reinit(&q->q_usage_counter);
Jens Axboe320ae512013-10-24 09:20:05 +0100148 wake_up_all(&q->mq_freeze_wq);
Tejun Heoadd703f2014-07-01 10:34:38 -0600149 }
Jens Axboe320ae512013-10-24 09:20:05 +0100150}
Keith Buschb4c6a022014-12-19 17:54:14 -0700151EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
Jens Axboe320ae512013-10-24 09:20:05 +0100152
Bart Van Assche852ec802017-06-21 10:55:47 -0700153/*
154 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
155 * mpt3sas driver such that this function can be removed.
156 */
157void blk_mq_quiesce_queue_nowait(struct request_queue *q)
158{
159 unsigned long flags;
160
161 spin_lock_irqsave(q->queue_lock, flags);
162 queue_flag_set(QUEUE_FLAG_QUIESCED, q);
163 spin_unlock_irqrestore(q->queue_lock, flags);
164}
165EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
166
Bart Van Assche6a83e742016-11-02 10:09:51 -0600167/**
Ming Lei69e07c42017-06-06 23:22:07 +0800168 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
Bart Van Assche6a83e742016-11-02 10:09:51 -0600169 * @q: request queue.
170 *
171 * Note: this function does not prevent that the struct request end_io()
Ming Lei69e07c42017-06-06 23:22:07 +0800172 * callback function is invoked. Once this function is returned, we make
173 * sure no dispatch can happen until the queue is unquiesced via
174 * blk_mq_unquiesce_queue().
Bart Van Assche6a83e742016-11-02 10:09:51 -0600175 */
176void blk_mq_quiesce_queue(struct request_queue *q)
177{
178 struct blk_mq_hw_ctx *hctx;
179 unsigned int i;
180 bool rcu = false;
181
Ming Lei1d9e9bc2017-06-06 23:22:08 +0800182 blk_mq_quiesce_queue_nowait(q);
Ming Leif4560ff2017-06-18 14:24:27 -0600183
Bart Van Assche6a83e742016-11-02 10:09:51 -0600184 queue_for_each_hw_ctx(q, hctx, i) {
185 if (hctx->flags & BLK_MQ_F_BLOCKING)
Bart Van Assche07319672017-06-20 11:15:38 -0700186 synchronize_srcu(hctx->queue_rq_srcu);
Bart Van Assche6a83e742016-11-02 10:09:51 -0600187 else
188 rcu = true;
189 }
190 if (rcu)
191 synchronize_rcu();
192}
193EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
194
Ming Leie4e73912017-06-06 23:22:03 +0800195/*
196 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
197 * @q: request queue.
198 *
199 * This function recovers queue into the state before quiescing
200 * which is done by blk_mq_quiesce_queue.
201 */
202void blk_mq_unquiesce_queue(struct request_queue *q)
203{
Bart Van Assche852ec802017-06-21 10:55:47 -0700204 unsigned long flags;
205
206 spin_lock_irqsave(q->queue_lock, flags);
Ming Leif4560ff2017-06-18 14:24:27 -0600207 queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
Bart Van Assche852ec802017-06-21 10:55:47 -0700208 spin_unlock_irqrestore(q->queue_lock, flags);
Ming Leif4560ff2017-06-18 14:24:27 -0600209
Ming Lei1d9e9bc2017-06-06 23:22:08 +0800210 /* dispatch requests which are inserted during quiescing */
211 blk_mq_run_hw_queues(q, true);
Ming Leie4e73912017-06-06 23:22:03 +0800212}
213EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
214
Jens Axboeaed3ea92014-12-22 14:04:42 -0700215void blk_mq_wake_waiters(struct request_queue *q)
216{
217 struct blk_mq_hw_ctx *hctx;
218 unsigned int i;
219
220 queue_for_each_hw_ctx(q, hctx, i)
221 if (blk_mq_hw_queue_mapped(hctx))
222 blk_mq_tag_wakeup_all(hctx->tags, true);
Keith Busch3fd59402015-01-08 08:53:56 -0700223
224 /*
225 * If we are called because the queue has now been marked as
226 * dying, we need to ensure that processes currently waiting on
227 * the queue are notified as well.
228 */
229 wake_up_all(&q->mq_freeze_wq);
Jens Axboeaed3ea92014-12-22 14:04:42 -0700230}
231
Jens Axboe320ae512013-10-24 09:20:05 +0100232bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
233{
234 return blk_mq_has_free_tags(hctx->tags);
235}
236EXPORT_SYMBOL(blk_mq_can_queue);
237
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200238static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
239 unsigned int tag, unsigned int op)
Jens Axboe320ae512013-10-24 09:20:05 +0100240{
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200241 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
242 struct request *rq = tags->static_rqs[tag];
243
Bart Van Asschec3a148d2017-06-20 11:15:43 -0700244 rq->rq_flags = 0;
245
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200246 if (data->flags & BLK_MQ_REQ_INTERNAL) {
247 rq->tag = -1;
248 rq->internal_tag = tag;
249 } else {
250 if (blk_mq_tag_busy(data->hctx)) {
251 rq->rq_flags = RQF_MQ_INFLIGHT;
252 atomic_inc(&data->hctx->nr_active);
253 }
254 rq->tag = tag;
255 rq->internal_tag = -1;
256 data->hctx->tags->rqs[rq->tag] = rq;
257 }
258
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200259 INIT_LIST_HEAD(&rq->queuelist);
260 /* csd/requeue_work/fifo_time is initialized before use */
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200261 rq->q = data->q;
262 rq->mq_ctx = data->ctx;
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600263 rq->cmd_flags = op;
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200264 if (blk_queue_io_stat(data->q))
Christoph Hellwige8064022016-10-20 15:12:13 +0200265 rq->rq_flags |= RQF_IO_STAT;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200266 /* do not touch atomic flags, it needs atomic ops against the timer */
267 rq->cpu = -1;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200268 INIT_HLIST_NODE(&rq->hash);
269 RB_CLEAR_NODE(&rq->rb_node);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200270 rq->rq_disk = NULL;
271 rq->part = NULL;
Jens Axboe3ee32372014-06-09 09:36:53 -0600272 rq->start_time = jiffies;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200273#ifdef CONFIG_BLK_CGROUP
274 rq->rl = NULL;
Ming Lei0fec08b2014-01-03 10:00:08 -0700275 set_start_time_ns(rq);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200276 rq->io_start_time_ns = 0;
277#endif
278 rq->nr_phys_segments = 0;
279#if defined(CONFIG_BLK_DEV_INTEGRITY)
280 rq->nr_integrity_segments = 0;
281#endif
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200282 rq->special = NULL;
283 /* tag was already set */
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200284 rq->extra_len = 0;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200285
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200286 INIT_LIST_HEAD(&rq->timeout_list);
Jens Axboef6be4fb2014-06-06 11:03:48 -0600287 rq->timeout = 0;
288
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200289 rq->end_io = NULL;
290 rq->end_io_data = NULL;
291 rq->next_rq = NULL;
292
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200293 data->ctx->rq_dispatched[op_is_sync(op)]++;
294 return rq;
Jens Axboe320ae512013-10-24 09:20:05 +0100295}
296
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200297static struct request *blk_mq_get_request(struct request_queue *q,
298 struct bio *bio, unsigned int op,
299 struct blk_mq_alloc_data *data)
300{
301 struct elevator_queue *e = q->elevator;
302 struct request *rq;
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200303 unsigned int tag;
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200304
305 blk_queue_enter_live(q);
306 data->q = q;
307 if (likely(!data->ctx))
308 data->ctx = blk_mq_get_ctx(q);
309 if (likely(!data->hctx))
310 data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -0500311 if (op & REQ_NOWAIT)
312 data->flags |= BLK_MQ_REQ_NOWAIT;
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200313
314 if (e) {
315 data->flags |= BLK_MQ_REQ_INTERNAL;
316
317 /*
318 * Flush requests are special and go directly to the
319 * dispatch list.
320 */
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200321 if (!op_is_flush(op) && e->type->ops.mq.limit_depth)
322 e->type->ops.mq.limit_depth(op, data);
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200323 }
324
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200325 tag = blk_mq_get_tag(data);
326 if (tag == BLK_MQ_TAG_FAIL) {
Christoph Hellwig037cebb2017-06-16 18:15:23 +0200327 blk_queue_exit(q);
328 return NULL;
329 }
330
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200331 rq = blk_mq_rq_ctx_init(data, tag, op);
Christoph Hellwig037cebb2017-06-16 18:15:23 +0200332 if (!op_is_flush(op)) {
333 rq->elv.icq = NULL;
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200334 if (e && e->type->ops.mq.prepare_request) {
Christoph Hellwig44e8c2b2017-06-16 18:15:25 +0200335 if (e->type->icq_cache && rq_ioc(bio))
336 blk_mq_sched_assign_ioc(rq, bio);
337
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200338 e->type->ops.mq.prepare_request(rq, bio);
339 rq->rq_flags |= RQF_ELVPRIV;
Christoph Hellwig44e8c2b2017-06-16 18:15:25 +0200340 }
Christoph Hellwig037cebb2017-06-16 18:15:23 +0200341 }
342 data->hctx->queued++;
343 return rq;
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200344}
345
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700346struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100347 unsigned int flags)
Jens Axboe320ae512013-10-24 09:20:05 +0100348{
Jens Axboe5a797e02017-01-26 12:22:11 -0700349 struct blk_mq_alloc_data alloc_data = { .flags = flags };
Jens Axboebd166ef2017-01-17 06:03:22 -0700350 struct request *rq;
Joe Lawrencea492f072014-08-28 08:15:21 -0600351 int ret;
Jens Axboe320ae512013-10-24 09:20:05 +0100352
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100353 ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
Joe Lawrencea492f072014-08-28 08:15:21 -0600354 if (ret)
355 return ERR_PTR(ret);
Jens Axboe320ae512013-10-24 09:20:05 +0100356
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700357 rq = blk_mq_get_request(q, NULL, op, &alloc_data);
Jens Axboe841bac22016-09-21 10:08:43 -0600358
Jens Axboebd166ef2017-01-17 06:03:22 -0700359 blk_mq_put_ctx(alloc_data.ctx);
360 blk_queue_exit(q);
361
362 if (!rq)
Joe Lawrencea492f072014-08-28 08:15:21 -0600363 return ERR_PTR(-EWOULDBLOCK);
Christoph Hellwig0c4de0f2016-07-19 11:31:50 +0200364
365 rq->__data_len = 0;
366 rq->__sector = (sector_t) -1;
367 rq->bio = rq->biotail = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +0100368 return rq;
369}
Jens Axboe4bb659b2014-05-09 09:36:49 -0600370EXPORT_SYMBOL(blk_mq_alloc_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100371
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700372struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
373 unsigned int op, unsigned int flags, unsigned int hctx_idx)
Ming Lin1f5bd332016-06-13 16:45:21 +0200374{
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800375 struct blk_mq_alloc_data alloc_data = { .flags = flags };
Ming Lin1f5bd332016-06-13 16:45:21 +0200376 struct request *rq;
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800377 unsigned int cpu;
Ming Lin1f5bd332016-06-13 16:45:21 +0200378 int ret;
379
380 /*
381 * If the tag allocator sleeps we could get an allocation for a
382 * different hardware context. No need to complicate the low level
383 * allocator for this for the rare use case of a command tied to
384 * a specific queue.
385 */
386 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
387 return ERR_PTR(-EINVAL);
388
389 if (hctx_idx >= q->nr_hw_queues)
390 return ERR_PTR(-EIO);
391
392 ret = blk_queue_enter(q, true);
393 if (ret)
394 return ERR_PTR(ret);
395
Christoph Hellwigc8712c62016-09-23 10:25:48 -0600396 /*
397 * Check if the hardware context is actually mapped to anything.
398 * If not tell the caller that it should skip this queue.
399 */
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800400 alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
401 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
402 blk_queue_exit(q);
403 return ERR_PTR(-EXDEV);
Christoph Hellwigc8712c62016-09-23 10:25:48 -0600404 }
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800405 cpu = cpumask_first(alloc_data.hctx->cpumask);
406 alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
Ming Lin1f5bd332016-06-13 16:45:21 +0200407
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700408 rq = blk_mq_get_request(q, NULL, op, &alloc_data);
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800409
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800410 blk_queue_exit(q);
411
412 if (!rq)
413 return ERR_PTR(-EWOULDBLOCK);
Ming Lin1f5bd332016-06-13 16:45:21 +0200414
415 return rq;
416}
417EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
418
Christoph Hellwig6af54052017-06-16 18:15:22 +0200419void blk_mq_free_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100420{
Jens Axboe320ae512013-10-24 09:20:05 +0100421 struct request_queue *q = rq->q;
Christoph Hellwig6af54052017-06-16 18:15:22 +0200422 struct elevator_queue *e = q->elevator;
423 struct blk_mq_ctx *ctx = rq->mq_ctx;
424 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
425 const int sched_tag = rq->internal_tag;
Jens Axboe320ae512013-10-24 09:20:05 +0100426
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200427 if (rq->rq_flags & RQF_ELVPRIV) {
Christoph Hellwig6af54052017-06-16 18:15:22 +0200428 if (e && e->type->ops.mq.finish_request)
429 e->type->ops.mq.finish_request(rq);
430 if (rq->elv.icq) {
431 put_io_context(rq->elv.icq->ioc);
432 rq->elv.icq = NULL;
433 }
434 }
435
436 ctx->rq_completed[rq_is_sync(rq)]++;
Christoph Hellwige8064022016-10-20 15:12:13 +0200437 if (rq->rq_flags & RQF_MQ_INFLIGHT)
Jens Axboe0d2602c2014-05-13 15:10:52 -0600438 atomic_dec(&hctx->nr_active);
Jens Axboe87760e52016-11-09 12:38:14 -0700439
440 wbt_done(q->rq_wb, &rq->issue_stat);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600441
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200442 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
Jens Axboe06426ad2016-11-14 13:01:59 -0700443 clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
Jens Axboebd166ef2017-01-17 06:03:22 -0700444 if (rq->tag != -1)
445 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
446 if (sched_tag != -1)
Omar Sandovalc05f8522017-04-14 01:00:01 -0700447 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
Bart Van Assche6d8c6c02017-04-07 12:40:09 -0600448 blk_mq_sched_restart(hctx);
Dan Williams3ef28e82015-10-21 13:20:12 -0400449 blk_queue_exit(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100450}
Jens Axboe1a3b5952014-11-17 10:40:48 -0700451EXPORT_SYMBOL_GPL(blk_mq_free_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100452
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200453inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
Jens Axboe320ae512013-10-24 09:20:05 +0100454{
Ming Lei0d11e6a2013-12-05 10:50:39 -0700455 blk_account_io_done(rq);
456
Christoph Hellwig91b63632014-04-16 09:44:53 +0200457 if (rq->end_io) {
Jens Axboe87760e52016-11-09 12:38:14 -0700458 wbt_done(rq->q->rq_wb, &rq->issue_stat);
Jens Axboe320ae512013-10-24 09:20:05 +0100459 rq->end_io(rq, error);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200460 } else {
461 if (unlikely(blk_bidi_rq(rq)))
462 blk_mq_free_request(rq->next_rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100463 blk_mq_free_request(rq);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200464 }
Jens Axboe320ae512013-10-24 09:20:05 +0100465}
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700466EXPORT_SYMBOL(__blk_mq_end_request);
Christoph Hellwig63151a42014-04-16 09:44:52 +0200467
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200468void blk_mq_end_request(struct request *rq, blk_status_t error)
Christoph Hellwig63151a42014-04-16 09:44:52 +0200469{
470 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
471 BUG();
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700472 __blk_mq_end_request(rq, error);
Christoph Hellwig63151a42014-04-16 09:44:52 +0200473}
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700474EXPORT_SYMBOL(blk_mq_end_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100475
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800476static void __blk_mq_complete_request_remote(void *data)
Jens Axboe320ae512013-10-24 09:20:05 +0100477{
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800478 struct request *rq = data;
Jens Axboe320ae512013-10-24 09:20:05 +0100479
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800480 rq->q->softirq_done_fn(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100481}
482
Christoph Hellwig453f8342017-04-20 16:03:10 +0200483static void __blk_mq_complete_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100484{
485 struct blk_mq_ctx *ctx = rq->mq_ctx;
Christoph Hellwig38535202014-04-25 02:32:53 -0700486 bool shared = false;
Jens Axboe320ae512013-10-24 09:20:05 +0100487 int cpu;
488
Christoph Hellwig453f8342017-04-20 16:03:10 +0200489 if (rq->internal_tag != -1)
490 blk_mq_sched_completed_request(rq);
491 if (rq->rq_flags & RQF_STATS) {
492 blk_mq_poll_stats_start(rq->q);
493 blk_stat_add(rq);
494 }
495
Christoph Hellwig38535202014-04-25 02:32:53 -0700496 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800497 rq->q->softirq_done_fn(rq);
498 return;
499 }
Jens Axboe320ae512013-10-24 09:20:05 +0100500
501 cpu = get_cpu();
Christoph Hellwig38535202014-04-25 02:32:53 -0700502 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
503 shared = cpus_share_cache(cpu, ctx->cpu);
504
505 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800506 rq->csd.func = __blk_mq_complete_request_remote;
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800507 rq->csd.info = rq;
508 rq->csd.flags = 0;
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +0100509 smp_call_function_single_async(ctx->cpu, &rq->csd);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800510 } else {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800511 rq->q->softirq_done_fn(rq);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800512 }
Jens Axboe320ae512013-10-24 09:20:05 +0100513 put_cpu();
514}
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800515
516/**
517 * blk_mq_complete_request - end I/O on a request
518 * @rq: the request being processed
519 *
520 * Description:
521 * Ends all I/O on a request. It does not handle partial completions.
522 * The actual completion happens out-of-order, through a IPI handler.
523 **/
Christoph Hellwig08e00292017-04-20 16:03:09 +0200524void blk_mq_complete_request(struct request *rq)
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800525{
Jens Axboe95f09682014-05-27 17:46:48 -0600526 struct request_queue *q = rq->q;
527
528 if (unlikely(blk_should_fake_timeout(q)))
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800529 return;
Christoph Hellwig08e00292017-04-20 16:03:09 +0200530 if (!blk_mark_rq_complete(rq))
Jens Axboeed851862014-05-30 21:20:50 -0600531 __blk_mq_complete_request(rq);
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800532}
533EXPORT_SYMBOL(blk_mq_complete_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100534
Keith Busch973c0192015-01-07 18:55:43 -0700535int blk_mq_request_started(struct request *rq)
536{
537 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
538}
539EXPORT_SYMBOL_GPL(blk_mq_request_started);
540
Christoph Hellwige2490072014-09-13 16:40:09 -0700541void blk_mq_start_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100542{
543 struct request_queue *q = rq->q;
544
Jens Axboebd166ef2017-01-17 06:03:22 -0700545 blk_mq_sched_started_request(rq);
546
Jens Axboe320ae512013-10-24 09:20:05 +0100547 trace_block_rq_issue(q, rq);
548
Jens Axboecf43e6b2016-11-07 21:32:37 -0700549 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
Shaohua Li88eeca42017-03-27 15:19:41 -0700550 blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq));
Jens Axboecf43e6b2016-11-07 21:32:37 -0700551 rq->rq_flags |= RQF_STATS;
Jens Axboe87760e52016-11-09 12:38:14 -0700552 wbt_issue(q->rq_wb, &rq->issue_stat);
Jens Axboecf43e6b2016-11-07 21:32:37 -0700553 }
554
Ming Lei2b8393b2014-06-10 00:16:41 +0800555 blk_add_timer(rq);
Jens Axboe87ee7b12014-04-24 08:51:47 -0600556
557 /*
Jens Axboe538b7532014-09-16 10:37:37 -0600558 * Ensure that ->deadline is visible before set the started
559 * flag and clear the completed flag.
560 */
561 smp_mb__before_atomic();
562
563 /*
Jens Axboe87ee7b12014-04-24 08:51:47 -0600564 * Mark us as started and clear complete. Complete might have been
565 * set if requeue raced with timeout, which then marked it as
566 * complete. So be sure to clear complete again when we start
567 * the request, otherwise we'll ignore the completion event.
568 */
Jens Axboe4b570522014-05-29 11:00:11 -0600569 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
570 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
571 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
572 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800573
574 if (q->dma_drain_size && blk_rq_bytes(rq)) {
575 /*
576 * Make sure space for the drain appears. We know we can do
577 * this because max_hw_segments has been adjusted to be one
578 * fewer than the device can handle.
579 */
580 rq->nr_phys_segments++;
581 }
Jens Axboe320ae512013-10-24 09:20:05 +0100582}
Christoph Hellwige2490072014-09-13 16:40:09 -0700583EXPORT_SYMBOL(blk_mq_start_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100584
Ming Leid9d149a2017-03-27 20:06:55 +0800585/*
586 * When we reach here because queue is busy, REQ_ATOM_COMPLETE
Jens Axboe48b99c92017-03-29 11:10:34 -0600587 * flag isn't set yet, so there may be race with timeout handler,
Ming Leid9d149a2017-03-27 20:06:55 +0800588 * but given rq->deadline is just set in .queue_rq() under
589 * this situation, the race won't be possible in reality because
590 * rq->timeout should be set as big enough to cover the window
591 * between blk_mq_start_request() called from .queue_rq() and
592 * clearing REQ_ATOM_STARTED here.
593 */
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200594static void __blk_mq_requeue_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100595{
596 struct request_queue *q = rq->q;
597
598 trace_block_rq_requeue(q, rq);
Jens Axboe87760e52016-11-09 12:38:14 -0700599 wbt_requeue(q->rq_wb, &rq->issue_stat);
Jens Axboebd166ef2017-01-17 06:03:22 -0700600 blk_mq_sched_requeue_request(rq);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800601
Christoph Hellwige2490072014-09-13 16:40:09 -0700602 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
603 if (q->dma_drain_size && blk_rq_bytes(rq))
604 rq->nr_phys_segments--;
605 }
Jens Axboe320ae512013-10-24 09:20:05 +0100606}
607
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700608void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200609{
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200610 __blk_mq_requeue_request(rq);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200611
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200612 BUG_ON(blk_queued_rq(rq));
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700613 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200614}
615EXPORT_SYMBOL(blk_mq_requeue_request);
616
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600617static void blk_mq_requeue_work(struct work_struct *work)
618{
619 struct request_queue *q =
Mike Snitzer28494502016-09-14 13:28:30 -0400620 container_of(work, struct request_queue, requeue_work.work);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600621 LIST_HEAD(rq_list);
622 struct request *rq, *next;
623 unsigned long flags;
624
625 spin_lock_irqsave(&q->requeue_lock, flags);
626 list_splice_init(&q->requeue_list, &rq_list);
627 spin_unlock_irqrestore(&q->requeue_lock, flags);
628
629 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
Christoph Hellwige8064022016-10-20 15:12:13 +0200630 if (!(rq->rq_flags & RQF_SOFTBARRIER))
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600631 continue;
632
Christoph Hellwige8064022016-10-20 15:12:13 +0200633 rq->rq_flags &= ~RQF_SOFTBARRIER;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600634 list_del_init(&rq->queuelist);
Jens Axboebd6737f2017-01-27 01:00:47 -0700635 blk_mq_sched_insert_request(rq, true, false, false, true);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600636 }
637
638 while (!list_empty(&rq_list)) {
639 rq = list_entry(rq_list.next, struct request, queuelist);
640 list_del_init(&rq->queuelist);
Jens Axboebd6737f2017-01-27 01:00:47 -0700641 blk_mq_sched_insert_request(rq, false, false, false, true);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600642 }
643
Bart Van Assche52d7f1b2016-10-28 17:20:32 -0700644 blk_mq_run_hw_queues(q, false);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600645}
646
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700647void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
648 bool kick_requeue_list)
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600649{
650 struct request_queue *q = rq->q;
651 unsigned long flags;
652
653 /*
654 * We abuse this flag that is otherwise used by the I/O scheduler to
655 * request head insertation from the workqueue.
656 */
Christoph Hellwige8064022016-10-20 15:12:13 +0200657 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600658
659 spin_lock_irqsave(&q->requeue_lock, flags);
660 if (at_head) {
Christoph Hellwige8064022016-10-20 15:12:13 +0200661 rq->rq_flags |= RQF_SOFTBARRIER;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600662 list_add(&rq->queuelist, &q->requeue_list);
663 } else {
664 list_add_tail(&rq->queuelist, &q->requeue_list);
665 }
666 spin_unlock_irqrestore(&q->requeue_lock, flags);
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700667
668 if (kick_requeue_list)
669 blk_mq_kick_requeue_list(q);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600670}
671EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
672
673void blk_mq_kick_requeue_list(struct request_queue *q)
674{
Mike Snitzer28494502016-09-14 13:28:30 -0400675 kblockd_schedule_delayed_work(&q->requeue_work, 0);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600676}
677EXPORT_SYMBOL(blk_mq_kick_requeue_list);
678
Mike Snitzer28494502016-09-14 13:28:30 -0400679void blk_mq_delay_kick_requeue_list(struct request_queue *q,
680 unsigned long msecs)
681{
682 kblockd_schedule_delayed_work(&q->requeue_work,
683 msecs_to_jiffies(msecs));
684}
685EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
686
Jens Axboe0e62f512014-06-04 10:23:49 -0600687struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
688{
Jens Axboe88c7b2b2016-08-25 08:07:30 -0600689 if (tag < tags->nr_tags) {
690 prefetch(tags->rqs[tag]);
Hannes Reinecke4ee86ba2016-03-15 12:03:28 -0700691 return tags->rqs[tag];
Jens Axboe88c7b2b2016-08-25 08:07:30 -0600692 }
Hannes Reinecke4ee86ba2016-03-15 12:03:28 -0700693
694 return NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600695}
696EXPORT_SYMBOL(blk_mq_tag_to_rq);
697
Jens Axboe320ae512013-10-24 09:20:05 +0100698struct blk_mq_timeout_data {
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700699 unsigned long next;
700 unsigned int next_set;
Jens Axboe320ae512013-10-24 09:20:05 +0100701};
702
Christoph Hellwig90415832014-09-22 10:21:48 -0600703void blk_mq_rq_timed_out(struct request *req, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100704{
Jens Axboef8a5b122016-12-13 09:24:51 -0700705 const struct blk_mq_ops *ops = req->q->mq_ops;
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700706 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
Jens Axboe87ee7b12014-04-24 08:51:47 -0600707
708 /*
709 * We know that complete is set at this point. If STARTED isn't set
710 * anymore, then the request isn't active and the "timeout" should
711 * just be ignored. This can happen due to the bitflag ordering.
712 * Timeout first checks if STARTED is set, and if it is, assumes
713 * the request is active. But if we race with completion, then
Jens Axboe48b99c92017-03-29 11:10:34 -0600714 * both flags will get cleared. So check here again, and ignore
Jens Axboe87ee7b12014-04-24 08:51:47 -0600715 * a timeout event with a request that isn't active.
716 */
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700717 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
718 return;
Jens Axboe87ee7b12014-04-24 08:51:47 -0600719
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700720 if (ops->timeout)
Christoph Hellwig0152fb62014-09-13 16:40:13 -0700721 ret = ops->timeout(req, reserved);
Jens Axboe87ee7b12014-04-24 08:51:47 -0600722
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700723 switch (ret) {
724 case BLK_EH_HANDLED:
725 __blk_mq_complete_request(req);
726 break;
727 case BLK_EH_RESET_TIMER:
728 blk_add_timer(req);
729 blk_clear_rq_complete(req);
730 break;
731 case BLK_EH_NOT_HANDLED:
732 break;
733 default:
734 printk(KERN_ERR "block: bad eh return: %d\n", ret);
735 break;
736 }
Jens Axboe87ee7b12014-04-24 08:51:47 -0600737}
Keith Busch5b3f25f2015-01-07 18:55:46 -0700738
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700739static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
740 struct request *rq, void *priv, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100741{
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700742 struct blk_mq_timeout_data *data = priv;
743
Ming Lei95a49602017-03-22 10:14:43 +0800744 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700745 return;
Jens Axboe320ae512013-10-24 09:20:05 +0100746
Ming Leid9d149a2017-03-27 20:06:55 +0800747 /*
748 * The rq being checked may have been freed and reallocated
749 * out already here, we avoid this race by checking rq->deadline
750 * and REQ_ATOM_COMPLETE flag together:
751 *
752 * - if rq->deadline is observed as new value because of
753 * reusing, the rq won't be timed out because of timing.
754 * - if rq->deadline is observed as previous value,
755 * REQ_ATOM_COMPLETE flag won't be cleared in reuse path
756 * because we put a barrier between setting rq->deadline
757 * and clearing the flag in blk_mq_start_request(), so
758 * this rq won't be timed out too.
759 */
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700760 if (time_after_eq(jiffies, rq->deadline)) {
761 if (!blk_mark_rq_complete(rq))
Christoph Hellwig0152fb62014-09-13 16:40:13 -0700762 blk_mq_rq_timed_out(rq, reserved);
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700763 } else if (!data->next_set || time_after(data->next, rq->deadline)) {
764 data->next = rq->deadline;
765 data->next_set = 1;
766 }
Jens Axboe320ae512013-10-24 09:20:05 +0100767}
768
Christoph Hellwig287922e2015-10-30 20:57:30 +0800769static void blk_mq_timeout_work(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +0100770{
Christoph Hellwig287922e2015-10-30 20:57:30 +0800771 struct request_queue *q =
772 container_of(work, struct request_queue, timeout_work);
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700773 struct blk_mq_timeout_data data = {
774 .next = 0,
775 .next_set = 0,
776 };
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700777 int i;
Jens Axboe320ae512013-10-24 09:20:05 +0100778
Gabriel Krisman Bertazi71f79fb2016-08-01 08:23:39 -0600779 /* A deadlock might occur if a request is stuck requiring a
780 * timeout at the same time a queue freeze is waiting
781 * completion, since the timeout code would not be able to
782 * acquire the queue reference here.
783 *
784 * That's why we don't use blk_queue_enter here; instead, we use
785 * percpu_ref_tryget directly, because we need to be able to
786 * obtain a reference even in the short window between the queue
787 * starting to freeze, by dropping the first reference in
Ming Lei1671d522017-03-27 20:06:57 +0800788 * blk_freeze_queue_start, and the moment the last request is
Gabriel Krisman Bertazi71f79fb2016-08-01 08:23:39 -0600789 * consumed, marked by the instant q_usage_counter reaches
790 * zero.
791 */
792 if (!percpu_ref_tryget(&q->q_usage_counter))
Christoph Hellwig287922e2015-10-30 20:57:30 +0800793 return;
794
Christoph Hellwig0bf6cd52015-09-27 21:01:51 +0200795 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
Jens Axboe320ae512013-10-24 09:20:05 +0100796
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700797 if (data.next_set) {
798 data.next = blk_rq_timeout(round_jiffies_up(data.next));
799 mod_timer(&q->timeout, data.next);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600800 } else {
Christoph Hellwig0bf6cd52015-09-27 21:01:51 +0200801 struct blk_mq_hw_ctx *hctx;
802
Ming Leif054b562015-04-21 10:00:19 +0800803 queue_for_each_hw_ctx(q, hctx, i) {
804 /* the hctx may be unmapped, so check it here */
805 if (blk_mq_hw_queue_mapped(hctx))
806 blk_mq_tag_idle(hctx);
807 }
Jens Axboe0d2602c2014-05-13 15:10:52 -0600808 }
Christoph Hellwig287922e2015-10-30 20:57:30 +0800809 blk_queue_exit(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100810}
811
Omar Sandoval88459642016-09-17 08:38:44 -0600812struct flush_busy_ctx_data {
813 struct blk_mq_hw_ctx *hctx;
814 struct list_head *list;
815};
816
817static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
818{
819 struct flush_busy_ctx_data *flush_data = data;
820 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
821 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
822
823 sbitmap_clear_bit(sb, bitnr);
824 spin_lock(&ctx->lock);
825 list_splice_tail_init(&ctx->rq_list, flush_data->list);
826 spin_unlock(&ctx->lock);
827 return true;
828}
829
Jens Axboe320ae512013-10-24 09:20:05 +0100830/*
Jens Axboe1429d7c2014-05-19 09:23:55 -0600831 * Process software queues that have been marked busy, splicing them
832 * to the for-dispatch
833 */
Jens Axboe2c3ad662016-12-14 14:34:47 -0700834void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
Jens Axboe1429d7c2014-05-19 09:23:55 -0600835{
Omar Sandoval88459642016-09-17 08:38:44 -0600836 struct flush_busy_ctx_data data = {
837 .hctx = hctx,
838 .list = list,
839 };
Jens Axboe1429d7c2014-05-19 09:23:55 -0600840
Omar Sandoval88459642016-09-17 08:38:44 -0600841 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600842}
Jens Axboe2c3ad662016-12-14 14:34:47 -0700843EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600844
Jens Axboe703fd1c2016-09-16 13:59:14 -0600845static inline unsigned int queued_to_index(unsigned int queued)
846{
847 if (!queued)
848 return 0;
Jens Axboe1429d7c2014-05-19 09:23:55 -0600849
Jens Axboe703fd1c2016-09-16 13:59:14 -0600850 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600851}
852
Jens Axboebd6737f2017-01-27 01:00:47 -0700853bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
854 bool wait)
Jens Axboebd166ef2017-01-17 06:03:22 -0700855{
856 struct blk_mq_alloc_data data = {
857 .q = rq->q,
Jens Axboebd166ef2017-01-17 06:03:22 -0700858 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
859 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
860 };
861
Jens Axboe5feeacd2017-04-20 17:23:13 -0600862 might_sleep_if(wait);
863
Omar Sandoval81380ca2017-04-07 08:56:26 -0600864 if (rq->tag != -1)
865 goto done;
Jens Axboebd166ef2017-01-17 06:03:22 -0700866
Sagi Grimberg415b8062017-02-27 10:04:39 -0700867 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
868 data.flags |= BLK_MQ_REQ_RESERVED;
869
Jens Axboebd166ef2017-01-17 06:03:22 -0700870 rq->tag = blk_mq_get_tag(&data);
871 if (rq->tag >= 0) {
Jens Axboe200e86b2017-01-25 08:11:38 -0700872 if (blk_mq_tag_busy(data.hctx)) {
873 rq->rq_flags |= RQF_MQ_INFLIGHT;
874 atomic_inc(&data.hctx->nr_active);
875 }
Jens Axboebd166ef2017-01-17 06:03:22 -0700876 data.hctx->tags->rqs[rq->tag] = rq;
Jens Axboebd166ef2017-01-17 06:03:22 -0700877 }
878
Omar Sandoval81380ca2017-04-07 08:56:26 -0600879done:
880 if (hctx)
881 *hctx = data.hctx;
882 return rq->tag != -1;
Jens Axboebd166ef2017-01-17 06:03:22 -0700883}
884
Jens Axboe113285b2017-03-02 13:26:04 -0700885static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
886 struct request *rq)
Jens Axboe99cf1dc2017-01-26 12:32:32 -0700887{
Jens Axboe99cf1dc2017-01-26 12:32:32 -0700888 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
889 rq->tag = -1;
890
891 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
892 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
893 atomic_dec(&hctx->nr_active);
894 }
895}
896
Jens Axboe113285b2017-03-02 13:26:04 -0700897static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
898 struct request *rq)
899{
900 if (rq->tag == -1 || rq->internal_tag == -1)
901 return;
902
903 __blk_mq_put_driver_tag(hctx, rq);
904}
905
906static void blk_mq_put_driver_tag(struct request *rq)
907{
908 struct blk_mq_hw_ctx *hctx;
909
910 if (rq->tag == -1 || rq->internal_tag == -1)
911 return;
912
913 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
914 __blk_mq_put_driver_tag(hctx, rq);
915}
916
Jens Axboebd166ef2017-01-17 06:03:22 -0700917/*
918 * If we fail getting a driver tag because all the driver tags are already
919 * assigned and on the dispatch list, BUT the first entry does not have a
920 * tag, then we could deadlock. For that case, move entries with assigned
921 * driver tags to the front, leaving the set of tagged requests in the
922 * same order, and the untagged set in the same order.
923 */
924static bool reorder_tags_to_front(struct list_head *list)
925{
926 struct request *rq, *tmp, *first = NULL;
927
928 list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
929 if (rq == first)
930 break;
931 if (rq->tag != -1) {
932 list_move(&rq->queuelist, list);
933 if (!first)
934 first = rq;
935 }
936 }
937
938 return first != NULL;
939}
940
Ingo Molnarac6424b2017-06-20 12:06:13 +0200941static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
Omar Sandovalda55f2c2017-02-22 10:58:29 -0800942 void *key)
943{
944 struct blk_mq_hw_ctx *hctx;
945
946 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
947
Ingo Molnar2055da92017-06-20 12:06:46 +0200948 list_del(&wait->entry);
Omar Sandovalda55f2c2017-02-22 10:58:29 -0800949 clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
950 blk_mq_run_hw_queue(hctx, true);
951 return 1;
952}
953
954static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
955{
956 struct sbq_wait_state *ws;
957
958 /*
959 * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
960 * The thread which wins the race to grab this bit adds the hardware
961 * queue to the wait queue.
962 */
963 if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
964 test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
965 return false;
966
967 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
968 ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
969
970 /*
971 * As soon as this returns, it's no longer safe to fiddle with
972 * hctx->dispatch_wait, since a completion can wake up the wait queue
973 * and unlock the bit.
974 */
975 add_wait_queue(&ws->wait, &hctx->dispatch_wait);
976 return true;
977}
978
Omar Sandoval81380ca2017-04-07 08:56:26 -0600979bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
Jens Axboef04c3df2016-12-07 08:41:17 -0700980{
Omar Sandoval81380ca2017-04-07 08:56:26 -0600981 struct blk_mq_hw_ctx *hctx;
Jens Axboef04c3df2016-12-07 08:41:17 -0700982 struct request *rq;
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200983 int errors, queued;
Jens Axboef04c3df2016-12-07 08:41:17 -0700984
Omar Sandoval81380ca2017-04-07 08:56:26 -0600985 if (list_empty(list))
986 return false;
987
Jens Axboef04c3df2016-12-07 08:41:17 -0700988 /*
Jens Axboef04c3df2016-12-07 08:41:17 -0700989 * Now process all the entries, sending them to the driver.
990 */
Jens Axboe93efe982017-03-24 12:04:19 -0600991 errors = queued = 0;
Omar Sandoval81380ca2017-04-07 08:56:26 -0600992 do {
Jens Axboef04c3df2016-12-07 08:41:17 -0700993 struct blk_mq_queue_data bd;
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200994 blk_status_t ret;
Jens Axboef04c3df2016-12-07 08:41:17 -0700995
996 rq = list_first_entry(list, struct request, queuelist);
Jens Axboebd166ef2017-01-17 06:03:22 -0700997 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
998 if (!queued && reorder_tags_to_front(list))
999 continue;
Jens Axboe3c782d62017-01-26 12:50:36 -07001000
1001 /*
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001002 * The initial allocation attempt failed, so we need to
1003 * rerun the hardware queue when a tag is freed.
Jens Axboe3c782d62017-01-26 12:50:36 -07001004 */
Omar Sandoval807b1042017-04-05 12:01:35 -07001005 if (!blk_mq_dispatch_wait_add(hctx))
Jens Axboe3c782d62017-01-26 12:50:36 -07001006 break;
Omar Sandoval807b1042017-04-05 12:01:35 -07001007
1008 /*
1009 * It's possible that a tag was freed in the window
1010 * between the allocation failure and adding the
1011 * hardware queue to the wait queue.
1012 */
1013 if (!blk_mq_get_driver_tag(rq, &hctx, false))
1014 break;
Jens Axboebd166ef2017-01-17 06:03:22 -07001015 }
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001016
Jens Axboef04c3df2016-12-07 08:41:17 -07001017 list_del_init(&rq->queuelist);
1018
1019 bd.rq = rq;
Jens Axboe113285b2017-03-02 13:26:04 -07001020
1021 /*
1022 * Flag last if we have no more requests, or if we have more
1023 * but can't assign a driver tag to it.
1024 */
1025 if (list_empty(list))
1026 bd.last = true;
1027 else {
1028 struct request *nxt;
1029
1030 nxt = list_first_entry(list, struct request, queuelist);
1031 bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1032 }
Jens Axboef04c3df2016-12-07 08:41:17 -07001033
1034 ret = q->mq_ops->queue_rq(hctx, &bd);
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001035 if (ret == BLK_STS_RESOURCE) {
Jens Axboe113285b2017-03-02 13:26:04 -07001036 blk_mq_put_driver_tag_hctx(hctx, rq);
Jens Axboef04c3df2016-12-07 08:41:17 -07001037 list_add(&rq->queuelist, list);
1038 __blk_mq_requeue_request(rq);
1039 break;
Jens Axboef04c3df2016-12-07 08:41:17 -07001040 }
1041
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001042 if (unlikely(ret != BLK_STS_OK)) {
1043 errors++;
1044 blk_mq_end_request(rq, BLK_STS_IOERR);
1045 continue;
1046 }
1047
1048 queued++;
Omar Sandoval81380ca2017-04-07 08:56:26 -06001049 } while (!list_empty(list));
Jens Axboef04c3df2016-12-07 08:41:17 -07001050
1051 hctx->dispatched[queued_to_index(queued)]++;
1052
1053 /*
1054 * Any items that need requeuing? Stuff them into hctx->dispatch,
1055 * that is where we will continue on next queue run.
1056 */
1057 if (!list_empty(list)) {
Jens Axboe113285b2017-03-02 13:26:04 -07001058 /*
Bart Van Assche710c7852017-04-07 11:16:51 -07001059 * If an I/O scheduler has been configured and we got a driver
1060 * tag for the next request already, free it again.
Jens Axboe113285b2017-03-02 13:26:04 -07001061 */
1062 rq = list_first_entry(list, struct request, queuelist);
1063 blk_mq_put_driver_tag(rq);
1064
Jens Axboef04c3df2016-12-07 08:41:17 -07001065 spin_lock(&hctx->lock);
Jens Axboec13660a2017-01-26 12:40:07 -07001066 list_splice_init(list, &hctx->dispatch);
Jens Axboef04c3df2016-12-07 08:41:17 -07001067 spin_unlock(&hctx->lock);
1068
1069 /*
Bart Van Assche710c7852017-04-07 11:16:51 -07001070 * If SCHED_RESTART was set by the caller of this function and
1071 * it is no longer set that means that it was cleared by another
1072 * thread and hence that a queue rerun is needed.
Jens Axboef04c3df2016-12-07 08:41:17 -07001073 *
Bart Van Assche710c7852017-04-07 11:16:51 -07001074 * If TAG_WAITING is set that means that an I/O scheduler has
1075 * been configured and another thread is waiting for a driver
1076 * tag. To guarantee fairness, do not rerun this hardware queue
1077 * but let the other thread grab the driver tag.
Jens Axboebd166ef2017-01-17 06:03:22 -07001078 *
Bart Van Assche710c7852017-04-07 11:16:51 -07001079 * If no I/O scheduler has been configured it is possible that
1080 * the hardware queue got stopped and restarted before requests
1081 * were pushed back onto the dispatch list. Rerun the queue to
1082 * avoid starvation. Notes:
1083 * - blk_mq_run_hw_queue() checks whether or not a queue has
1084 * been stopped before rerunning a queue.
1085 * - Some but not all block drivers stop a queue before
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001086 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
Bart Van Assche710c7852017-04-07 11:16:51 -07001087 * and dm-rq.
Jens Axboebd166ef2017-01-17 06:03:22 -07001088 */
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001089 if (!blk_mq_sched_needs_restart(hctx) &&
1090 !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
Jens Axboebd166ef2017-01-17 06:03:22 -07001091 blk_mq_run_hw_queue(hctx, true);
Jens Axboef04c3df2016-12-07 08:41:17 -07001092 }
1093
Jens Axboe93efe982017-03-24 12:04:19 -06001094 return (queued + errors) != 0;
Jens Axboef04c3df2016-12-07 08:41:17 -07001095}
1096
Bart Van Assche6a83e742016-11-02 10:09:51 -06001097static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1098{
1099 int srcu_idx;
1100
1101 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1102 cpu_online(hctx->next_cpu));
1103
1104 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1105 rcu_read_lock();
Jens Axboebd166ef2017-01-17 06:03:22 -07001106 blk_mq_sched_dispatch_requests(hctx);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001107 rcu_read_unlock();
1108 } else {
Jens Axboebf4907c2017-03-30 12:30:39 -06001109 might_sleep();
1110
Bart Van Assche07319672017-06-20 11:15:38 -07001111 srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
Jens Axboebd166ef2017-01-17 06:03:22 -07001112 blk_mq_sched_dispatch_requests(hctx);
Bart Van Assche07319672017-06-20 11:15:38 -07001113 srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001114 }
1115}
1116
Jens Axboe506e9312014-05-07 10:26:44 -06001117/*
1118 * It'd be great if the workqueue API had a way to pass
1119 * in a mask and had some smarts for more clever placement.
1120 * For now we just round-robin here, switching for every
1121 * BLK_MQ_CPU_WORK_BATCH queued items.
1122 */
1123static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1124{
Christoph Hellwigb657d7e2014-11-24 09:27:23 +01001125 if (hctx->queue->nr_hw_queues == 1)
1126 return WORK_CPU_UNBOUND;
Jens Axboe506e9312014-05-07 10:26:44 -06001127
1128 if (--hctx->next_cpu_batch <= 0) {
Gabriel Krisman Bertazic02ebfd2016-09-28 00:24:24 -03001129 int next_cpu;
Jens Axboe506e9312014-05-07 10:26:44 -06001130
1131 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1132 if (next_cpu >= nr_cpu_ids)
1133 next_cpu = cpumask_first(hctx->cpumask);
1134
1135 hctx->next_cpu = next_cpu;
1136 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1137 }
1138
Christoph Hellwigb657d7e2014-11-24 09:27:23 +01001139 return hctx->next_cpu;
Jens Axboe506e9312014-05-07 10:26:44 -06001140}
1141
Bart Van Assche7587a5a2017-04-07 11:16:52 -07001142static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1143 unsigned long msecs)
Jens Axboe320ae512013-10-24 09:20:05 +01001144{
Bart Van Assche5435c022017-06-20 11:15:49 -07001145 if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1146 return;
1147
1148 if (unlikely(blk_mq_hctx_stopped(hctx)))
Jens Axboe320ae512013-10-24 09:20:05 +01001149 return;
1150
Jens Axboe1b792f22016-09-21 10:12:13 -06001151 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001152 int cpu = get_cpu();
1153 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
Paolo Bonzini398205b2014-11-07 23:03:59 +01001154 __blk_mq_run_hw_queue(hctx);
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001155 put_cpu();
Paolo Bonzini398205b2014-11-07 23:03:59 +01001156 return;
1157 }
Jens Axboee4043dc2014-04-09 10:18:23 -06001158
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001159 put_cpu();
Jens Axboee4043dc2014-04-09 10:18:23 -06001160 }
Paolo Bonzini398205b2014-11-07 23:03:59 +01001161
Jens Axboe9f993732017-04-10 09:54:54 -06001162 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1163 &hctx->run_work,
1164 msecs_to_jiffies(msecs));
Bart Van Assche7587a5a2017-04-07 11:16:52 -07001165}
1166
1167void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1168{
1169 __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1170}
1171EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1172
1173void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1174{
1175 __blk_mq_delay_run_hw_queue(hctx, async, 0);
Jens Axboe320ae512013-10-24 09:20:05 +01001176}
Omar Sandoval5b727272017-04-14 01:00:00 -07001177EXPORT_SYMBOL(blk_mq_run_hw_queue);
Jens Axboe320ae512013-10-24 09:20:05 +01001178
Mike Snitzerb94ec292015-03-11 23:56:38 -04001179void blk_mq_run_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +01001180{
1181 struct blk_mq_hw_ctx *hctx;
1182 int i;
1183
1184 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001185 if (!blk_mq_hctx_has_pending(hctx) ||
Bart Van Assche5d1b25c2016-10-28 17:19:15 -07001186 blk_mq_hctx_stopped(hctx))
Jens Axboe320ae512013-10-24 09:20:05 +01001187 continue;
1188
Mike Snitzerb94ec292015-03-11 23:56:38 -04001189 blk_mq_run_hw_queue(hctx, async);
Jens Axboe320ae512013-10-24 09:20:05 +01001190 }
1191}
Mike Snitzerb94ec292015-03-11 23:56:38 -04001192EXPORT_SYMBOL(blk_mq_run_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01001193
Bart Van Asschefd001442016-10-28 17:19:37 -07001194/**
1195 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1196 * @q: request queue.
1197 *
1198 * The caller is responsible for serializing this function against
1199 * blk_mq_{start,stop}_hw_queue().
1200 */
1201bool blk_mq_queue_stopped(struct request_queue *q)
1202{
1203 struct blk_mq_hw_ctx *hctx;
1204 int i;
1205
1206 queue_for_each_hw_ctx(q, hctx, i)
1207 if (blk_mq_hctx_stopped(hctx))
1208 return true;
1209
1210 return false;
1211}
1212EXPORT_SYMBOL(blk_mq_queue_stopped);
1213
Ming Lei39a70c72017-06-06 23:22:09 +08001214/*
1215 * This function is often used for pausing .queue_rq() by driver when
1216 * there isn't enough resource or some conditions aren't satisfied, and
1217 * BLK_MQ_RQ_QUEUE_BUSY is usually returned.
1218 *
1219 * We do not guarantee that dispatch can be drained or blocked
1220 * after blk_mq_stop_hw_queue() returns. Please use
1221 * blk_mq_quiesce_queue() for that requirement.
1222 */
Jens Axboe320ae512013-10-24 09:20:05 +01001223void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1224{
Ming Lei641a9ed2017-06-06 23:22:10 +08001225 cancel_delayed_work(&hctx->run_work);
1226
1227 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboe320ae512013-10-24 09:20:05 +01001228}
1229EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1230
Ming Lei39a70c72017-06-06 23:22:09 +08001231/*
1232 * This function is often used for pausing .queue_rq() by driver when
1233 * there isn't enough resource or some conditions aren't satisfied, and
1234 * BLK_MQ_RQ_QUEUE_BUSY is usually returned.
1235 *
1236 * We do not guarantee that dispatch can be drained or blocked
1237 * after blk_mq_stop_hw_queues() returns. Please use
1238 * blk_mq_quiesce_queue() for that requirement.
1239 */
Jens Axboe2719aa22017-05-03 11:08:14 -06001240void blk_mq_stop_hw_queues(struct request_queue *q)
1241{
Ming Lei641a9ed2017-06-06 23:22:10 +08001242 struct blk_mq_hw_ctx *hctx;
1243 int i;
1244
1245 queue_for_each_hw_ctx(q, hctx, i)
1246 blk_mq_stop_hw_queue(hctx);
Christoph Hellwig280d45f2013-10-25 14:45:58 +01001247}
1248EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1249
Jens Axboe320ae512013-10-24 09:20:05 +01001250void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1251{
1252 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboee4043dc2014-04-09 10:18:23 -06001253
Jens Axboe0ffbce82014-06-25 08:22:34 -06001254 blk_mq_run_hw_queue(hctx, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001255}
1256EXPORT_SYMBOL(blk_mq_start_hw_queue);
1257
Christoph Hellwig2f268552014-04-16 09:44:56 +02001258void blk_mq_start_hw_queues(struct request_queue *q)
1259{
1260 struct blk_mq_hw_ctx *hctx;
1261 int i;
1262
1263 queue_for_each_hw_ctx(q, hctx, i)
1264 blk_mq_start_hw_queue(hctx);
1265}
1266EXPORT_SYMBOL(blk_mq_start_hw_queues);
1267
Jens Axboeae911c52016-12-08 13:19:30 -07001268void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1269{
1270 if (!blk_mq_hctx_stopped(hctx))
1271 return;
1272
1273 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1274 blk_mq_run_hw_queue(hctx, async);
1275}
1276EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1277
Christoph Hellwig1b4a3252014-04-16 09:44:54 +02001278void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +01001279{
1280 struct blk_mq_hw_ctx *hctx;
1281 int i;
1282
Jens Axboeae911c52016-12-08 13:19:30 -07001283 queue_for_each_hw_ctx(q, hctx, i)
1284 blk_mq_start_stopped_hw_queue(hctx, async);
Jens Axboe320ae512013-10-24 09:20:05 +01001285}
1286EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1287
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001288static void blk_mq_run_work_fn(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +01001289{
1290 struct blk_mq_hw_ctx *hctx;
1291
Jens Axboe9f993732017-04-10 09:54:54 -06001292 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
Jens Axboe21c6e932017-04-10 09:54:56 -06001293
1294 /*
1295 * If we are stopped, don't run the queue. The exception is if
1296 * BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear
1297 * the STOPPED bit and run it.
1298 */
1299 if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) {
1300 if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state))
1301 return;
1302
1303 clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1304 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1305 }
Jens Axboee4043dc2014-04-09 10:18:23 -06001306
Jens Axboe320ae512013-10-24 09:20:05 +01001307 __blk_mq_run_hw_queue(hctx);
1308}
1309
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001310
1311void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1312{
Bart Van Assche5435c022017-06-20 11:15:49 -07001313 if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
Ming Lei19c66e52014-12-03 19:38:04 +08001314 return;
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001315
Jens Axboe21c6e932017-04-10 09:54:56 -06001316 /*
1317 * Stop the hw queue, then modify currently delayed work.
1318 * This should prevent us from running the queue prematurely.
1319 * Mark the queue as auto-clearing STOPPED when it runs.
1320 */
Jens Axboe7e79dad2017-01-19 07:58:59 -07001321 blk_mq_stop_hw_queue(hctx);
Jens Axboe21c6e932017-04-10 09:54:56 -06001322 set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1323 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1324 &hctx->run_work,
1325 msecs_to_jiffies(msecs));
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001326}
1327EXPORT_SYMBOL(blk_mq_delay_queue);
1328
Ming Leicfd0c552015-10-20 23:13:57 +08001329static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
Ming Leicfd0c552015-10-20 23:13:57 +08001330 struct request *rq,
1331 bool at_head)
Jens Axboe320ae512013-10-24 09:20:05 +01001332{
Jens Axboee57690f2016-08-24 15:34:35 -06001333 struct blk_mq_ctx *ctx = rq->mq_ctx;
1334
Bart Van Assche7b607812017-06-20 11:15:47 -07001335 lockdep_assert_held(&ctx->lock);
1336
Jens Axboe01b983c2013-11-19 18:59:10 -07001337 trace_block_rq_insert(hctx->queue, rq);
1338
Christoph Hellwig72a0a362014-02-07 10:22:36 -08001339 if (at_head)
1340 list_add(&rq->queuelist, &ctx->rq_list);
1341 else
1342 list_add_tail(&rq->queuelist, &ctx->rq_list);
Ming Leicfd0c552015-10-20 23:13:57 +08001343}
Jens Axboe4bb659b2014-05-09 09:36:49 -06001344
Jens Axboe2c3ad662016-12-14 14:34:47 -07001345void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1346 bool at_head)
Ming Leicfd0c552015-10-20 23:13:57 +08001347{
1348 struct blk_mq_ctx *ctx = rq->mq_ctx;
1349
Bart Van Assche7b607812017-06-20 11:15:47 -07001350 lockdep_assert_held(&ctx->lock);
1351
Jens Axboee57690f2016-08-24 15:34:35 -06001352 __blk_mq_insert_req_list(hctx, rq, at_head);
Jens Axboe320ae512013-10-24 09:20:05 +01001353 blk_mq_hctx_mark_pending(hctx, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001354}
1355
Jens Axboebd166ef2017-01-17 06:03:22 -07001356void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1357 struct list_head *list)
Jens Axboe320ae512013-10-24 09:20:05 +01001358
1359{
Jens Axboe320ae512013-10-24 09:20:05 +01001360 /*
1361 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1362 * offline now
1363 */
1364 spin_lock(&ctx->lock);
1365 while (!list_empty(list)) {
1366 struct request *rq;
1367
1368 rq = list_first_entry(list, struct request, queuelist);
Jens Axboee57690f2016-08-24 15:34:35 -06001369 BUG_ON(rq->mq_ctx != ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001370 list_del_init(&rq->queuelist);
Jens Axboee57690f2016-08-24 15:34:35 -06001371 __blk_mq_insert_req_list(hctx, rq, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001372 }
Ming Leicfd0c552015-10-20 23:13:57 +08001373 blk_mq_hctx_mark_pending(hctx, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001374 spin_unlock(&ctx->lock);
Jens Axboe320ae512013-10-24 09:20:05 +01001375}
1376
1377static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1378{
1379 struct request *rqa = container_of(a, struct request, queuelist);
1380 struct request *rqb = container_of(b, struct request, queuelist);
1381
1382 return !(rqa->mq_ctx < rqb->mq_ctx ||
1383 (rqa->mq_ctx == rqb->mq_ctx &&
1384 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1385}
1386
1387void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1388{
1389 struct blk_mq_ctx *this_ctx;
1390 struct request_queue *this_q;
1391 struct request *rq;
1392 LIST_HEAD(list);
1393 LIST_HEAD(ctx_list);
1394 unsigned int depth;
1395
1396 list_splice_init(&plug->mq_list, &list);
1397
1398 list_sort(NULL, &list, plug_ctx_cmp);
1399
1400 this_q = NULL;
1401 this_ctx = NULL;
1402 depth = 0;
1403
1404 while (!list_empty(&list)) {
1405 rq = list_entry_rq(list.next);
1406 list_del_init(&rq->queuelist);
1407 BUG_ON(!rq->q);
1408 if (rq->mq_ctx != this_ctx) {
1409 if (this_ctx) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001410 trace_block_unplug(this_q, depth, from_schedule);
1411 blk_mq_sched_insert_requests(this_q, this_ctx,
1412 &ctx_list,
1413 from_schedule);
Jens Axboe320ae512013-10-24 09:20:05 +01001414 }
1415
1416 this_ctx = rq->mq_ctx;
1417 this_q = rq->q;
1418 depth = 0;
1419 }
1420
1421 depth++;
1422 list_add_tail(&rq->queuelist, &ctx_list);
1423 }
1424
1425 /*
1426 * If 'this_ctx' is set, we know we have entries to complete
1427 * on 'ctx_list'. Do those.
1428 */
1429 if (this_ctx) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001430 trace_block_unplug(this_q, depth, from_schedule);
1431 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1432 from_schedule);
Jens Axboe320ae512013-10-24 09:20:05 +01001433 }
1434}
1435
1436static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1437{
Bart Van Asscheda8d7f02017-04-19 14:01:24 -07001438 blk_init_request_from_bio(rq, bio);
Jens Axboe4b570522014-05-29 11:00:11 -06001439
Jens Axboe6e85eaf2016-12-02 20:00:14 -07001440 blk_account_io_start(rq, true);
Jens Axboe320ae512013-10-24 09:20:05 +01001441}
1442
Jens Axboe274a5842014-08-15 12:44:08 -06001443static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1444{
1445 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1446 !blk_queue_nomerges(hctx->queue);
1447}
1448
Ming Leiab42f352017-05-26 19:53:19 +08001449static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
1450 struct blk_mq_ctx *ctx,
1451 struct request *rq)
1452{
1453 spin_lock(&ctx->lock);
1454 __blk_mq_insert_request(hctx, rq, false);
1455 spin_unlock(&ctx->lock);
Jens Axboe07068d52014-05-22 10:40:51 -06001456}
1457
Jens Axboefd2d3322017-01-12 10:04:45 -07001458static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1459{
Jens Axboebd166ef2017-01-17 06:03:22 -07001460 if (rq->tag != -1)
1461 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1462
1463 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
Jens Axboefd2d3322017-01-12 10:04:45 -07001464}
1465
Ming Leid964f042017-06-06 23:22:00 +08001466static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1467 struct request *rq,
1468 blk_qc_t *cookie, bool may_sleep)
Shaohua Lif984df12015-05-08 10:51:32 -07001469{
Shaohua Lif984df12015-05-08 10:51:32 -07001470 struct request_queue *q = rq->q;
Shaohua Lif984df12015-05-08 10:51:32 -07001471 struct blk_mq_queue_data bd = {
1472 .rq = rq,
Omar Sandovald945a362017-04-05 12:01:36 -07001473 .last = true,
Shaohua Lif984df12015-05-08 10:51:32 -07001474 };
Jens Axboebd166ef2017-01-17 06:03:22 -07001475 blk_qc_t new_cookie;
Jens Axboef06345a2017-06-12 11:22:46 -06001476 blk_status_t ret;
Ming Leid964f042017-06-06 23:22:00 +08001477 bool run_queue = true;
1478
Ming Leif4560ff2017-06-18 14:24:27 -06001479 /* RCU or SRCU read lock is needed before checking quiesced flag */
1480 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
Ming Leid964f042017-06-06 23:22:00 +08001481 run_queue = false;
1482 goto insert;
1483 }
Shaohua Lif984df12015-05-08 10:51:32 -07001484
Jens Axboebd166ef2017-01-17 06:03:22 -07001485 if (q->elevator)
Bart Van Assche2253efc2016-10-28 17:20:02 -07001486 goto insert;
1487
Ming Leid964f042017-06-06 23:22:00 +08001488 if (!blk_mq_get_driver_tag(rq, NULL, false))
Jens Axboebd166ef2017-01-17 06:03:22 -07001489 goto insert;
1490
1491 new_cookie = request_to_qc_t(hctx, rq);
1492
Shaohua Lif984df12015-05-08 10:51:32 -07001493 /*
1494 * For OK queue, we are done. For error, kill it. Any other
1495 * error (busy), just add it to our list as we previously
1496 * would have done
1497 */
1498 ret = q->mq_ops->queue_rq(hctx, &bd);
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001499 switch (ret) {
1500 case BLK_STS_OK:
Jens Axboe7b371632015-11-05 10:41:40 -07001501 *cookie = new_cookie;
Bart Van Assche2253efc2016-10-28 17:20:02 -07001502 return;
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001503 case BLK_STS_RESOURCE:
1504 __blk_mq_requeue_request(rq);
1505 goto insert;
1506 default:
Jens Axboe7b371632015-11-05 10:41:40 -07001507 *cookie = BLK_QC_T_NONE;
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001508 blk_mq_end_request(rq, ret);
Bart Van Assche2253efc2016-10-28 17:20:02 -07001509 return;
Jens Axboe7b371632015-11-05 10:41:40 -07001510 }
1511
Bart Van Assche2253efc2016-10-28 17:20:02 -07001512insert:
Ming Leid964f042017-06-06 23:22:00 +08001513 blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
Shaohua Lif984df12015-05-08 10:51:32 -07001514}
1515
Christoph Hellwig5eb61262017-03-22 15:01:51 -04001516static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1517 struct request *rq, blk_qc_t *cookie)
1518{
1519 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1520 rcu_read_lock();
Ming Leid964f042017-06-06 23:22:00 +08001521 __blk_mq_try_issue_directly(hctx, rq, cookie, false);
Christoph Hellwig5eb61262017-03-22 15:01:51 -04001522 rcu_read_unlock();
1523 } else {
Jens Axboebf4907c2017-03-30 12:30:39 -06001524 unsigned int srcu_idx;
1525
1526 might_sleep();
1527
Bart Van Assche07319672017-06-20 11:15:38 -07001528 srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
Ming Leid964f042017-06-06 23:22:00 +08001529 __blk_mq_try_issue_directly(hctx, rq, cookie, true);
Bart Van Assche07319672017-06-20 11:15:38 -07001530 srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
Christoph Hellwig5eb61262017-03-22 15:01:51 -04001531 }
1532}
1533
Jens Axboedece1632015-11-05 10:41:16 -07001534static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
Jens Axboe07068d52014-05-22 10:40:51 -06001535{
Christoph Hellwigef295ec2016-10-28 08:48:16 -06001536 const int is_sync = op_is_sync(bio->bi_opf);
Christoph Hellwigf73f44e2017-01-27 08:30:47 -07001537 const int is_flush_fua = op_is_flush(bio->bi_opf);
Jens Axboe5a797e02017-01-26 12:22:11 -07001538 struct blk_mq_alloc_data data = { .flags = 0 };
Jens Axboe07068d52014-05-22 10:40:51 -06001539 struct request *rq;
Christoph Hellwig5eb61262017-03-22 15:01:51 -04001540 unsigned int request_count = 0;
Shaohua Lif984df12015-05-08 10:51:32 -07001541 struct blk_plug *plug;
Shaohua Li5b3f3412015-05-08 10:51:33 -07001542 struct request *same_queue_rq = NULL;
Jens Axboe7b371632015-11-05 10:41:40 -07001543 blk_qc_t cookie;
Jens Axboe87760e52016-11-09 12:38:14 -07001544 unsigned int wb_acct;
Jens Axboe07068d52014-05-22 10:40:51 -06001545
1546 blk_queue_bounce(q, &bio);
1547
NeilBrownaf67c312017-06-18 14:38:57 +10001548 blk_queue_split(q, &bio);
Wen Xiongf36ea502017-05-10 08:54:11 -05001549
Jens Axboe07068d52014-05-22 10:40:51 -06001550 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001551 bio_io_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -07001552 return BLK_QC_T_NONE;
Jens Axboe07068d52014-05-22 10:40:51 -06001553 }
1554
Omar Sandoval87c279e2016-06-01 22:18:48 -07001555 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1556 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1557 return BLK_QC_T_NONE;
Shaohua Lif984df12015-05-08 10:51:32 -07001558
Jens Axboebd166ef2017-01-17 06:03:22 -07001559 if (blk_mq_sched_bio_merge(q, bio))
1560 return BLK_QC_T_NONE;
1561
Jens Axboe87760e52016-11-09 12:38:14 -07001562 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1563
Jens Axboebd166ef2017-01-17 06:03:22 -07001564 trace_block_getrq(q, bio, bio->bi_opf);
1565
Christoph Hellwigd2c0d382017-06-16 18:15:19 +02001566 rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
Jens Axboe87760e52016-11-09 12:38:14 -07001567 if (unlikely(!rq)) {
1568 __wbt_done(q->rq_wb, wb_acct);
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -05001569 if (bio->bi_opf & REQ_NOWAIT)
1570 bio_wouldblock_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -07001571 return BLK_QC_T_NONE;
Jens Axboe87760e52016-11-09 12:38:14 -07001572 }
1573
1574 wbt_track(&rq->issue_stat, wb_acct);
Jens Axboe07068d52014-05-22 10:40:51 -06001575
Jens Axboefd2d3322017-01-12 10:04:45 -07001576 cookie = request_to_qc_t(data.hctx, rq);
Jens Axboe07068d52014-05-22 10:40:51 -06001577
Shaohua Lif984df12015-05-08 10:51:32 -07001578 plug = current->plug;
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001579 if (unlikely(is_flush_fua)) {
Shaohua Lif984df12015-05-08 10:51:32 -07001580 blk_mq_put_ctx(data.ctx);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001581 blk_mq_bio_to_request(rq, bio);
1582 if (q->elevator) {
1583 blk_mq_sched_insert_request(rq, false, true, true,
1584 true);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001585 } else {
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001586 blk_insert_flush(rq);
1587 blk_mq_run_hw_queue(data.hctx, true);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001588 }
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001589 } else if (plug && q->nr_hw_queues == 1) {
Shaohua Li600271d2016-11-03 17:03:54 -07001590 struct request *last = NULL;
1591
Jens Axboeb00c53e2017-04-20 16:40:36 -06001592 blk_mq_put_ctx(data.ctx);
Jeff Moyere6c44382015-05-08 10:51:30 -07001593 blk_mq_bio_to_request(rq, bio);
Ming Lei0a6219a2016-11-16 18:07:05 +08001594
1595 /*
1596 * @request_count may become stale because of schedule
1597 * out, so check the list again.
1598 */
1599 if (list_empty(&plug->mq_list))
1600 request_count = 0;
Christoph Hellwig254d2592017-03-22 15:01:50 -04001601 else if (blk_queue_nomerges(q))
1602 request_count = blk_plug_queued_count(q);
1603
Ming Lei676d0602015-10-20 23:13:56 +08001604 if (!request_count)
Jeff Moyere6c44382015-05-08 10:51:30 -07001605 trace_block_plug(q);
Shaohua Li600271d2016-11-03 17:03:54 -07001606 else
1607 last = list_entry_rq(plug->mq_list.prev);
Jens Axboeb094f892015-11-20 20:29:45 -07001608
Shaohua Li600271d2016-11-03 17:03:54 -07001609 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1610 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
Jeff Moyere6c44382015-05-08 10:51:30 -07001611 blk_flush_plug_list(plug, false);
1612 trace_block_plug(q);
Jens Axboe320ae512013-10-24 09:20:05 +01001613 }
Jens Axboeb094f892015-11-20 20:29:45 -07001614
Jeff Moyere6c44382015-05-08 10:51:30 -07001615 list_add_tail(&rq->queuelist, &plug->mq_list);
Christoph Hellwig22997222017-03-22 15:01:52 -04001616 } else if (plug && !blk_queue_nomerges(q)) {
Jens Axboe320ae512013-10-24 09:20:05 +01001617 blk_mq_bio_to_request(rq, bio);
Jens Axboe320ae512013-10-24 09:20:05 +01001618
Jens Axboe320ae512013-10-24 09:20:05 +01001619 /*
1620 * We do limited plugging. If the bio can be merged, do that.
1621 * Otherwise the existing request in the plug list will be
1622 * issued. So the plug list will have one request at most
Christoph Hellwig22997222017-03-22 15:01:52 -04001623 * The plug list might get flushed before this. If that happens,
1624 * the plug list is empty, and same_queue_rq is invalid.
Jens Axboe320ae512013-10-24 09:20:05 +01001625 */
Christoph Hellwig22997222017-03-22 15:01:52 -04001626 if (list_empty(&plug->mq_list))
1627 same_queue_rq = NULL;
1628 if (same_queue_rq)
1629 list_del_init(&same_queue_rq->queuelist);
1630 list_add_tail(&rq->queuelist, &plug->mq_list);
1631
Jens Axboebf4907c2017-03-30 12:30:39 -06001632 blk_mq_put_ctx(data.ctx);
1633
Ming Leidad7a3b2017-06-06 23:21:59 +08001634 if (same_queue_rq) {
1635 data.hctx = blk_mq_map_queue(q,
1636 same_queue_rq->mq_ctx->cpu);
Christoph Hellwig22997222017-03-22 15:01:52 -04001637 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1638 &cookie);
Ming Leidad7a3b2017-06-06 23:21:59 +08001639 }
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001640 } else if (q->nr_hw_queues > 1 && is_sync) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001641 blk_mq_put_ctx(data.ctx);
1642 blk_mq_bio_to_request(rq, bio);
Christoph Hellwig22997222017-03-22 15:01:52 -04001643 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001644 } else if (q->elevator) {
Jens Axboeb00c53e2017-04-20 16:40:36 -06001645 blk_mq_put_ctx(data.ctx);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001646 blk_mq_bio_to_request(rq, bio);
1647 blk_mq_sched_insert_request(rq, false, true, true, true);
Ming Leiab42f352017-05-26 19:53:19 +08001648 } else {
Jens Axboeb00c53e2017-04-20 16:40:36 -06001649 blk_mq_put_ctx(data.ctx);
Ming Leiab42f352017-05-26 19:53:19 +08001650 blk_mq_bio_to_request(rq, bio);
1651 blk_mq_queue_io(data.hctx, data.ctx, rq);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001652 blk_mq_run_hw_queue(data.hctx, true);
Ming Leiab42f352017-05-26 19:53:19 +08001653 }
Jens Axboe320ae512013-10-24 09:20:05 +01001654
Jens Axboe7b371632015-11-05 10:41:40 -07001655 return cookie;
Jens Axboe320ae512013-10-24 09:20:05 +01001656}
1657
Jens Axboecc71a6f2017-01-11 14:29:56 -07001658void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1659 unsigned int hctx_idx)
Jens Axboe320ae512013-10-24 09:20:05 +01001660{
1661 struct page *page;
1662
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001663 if (tags->rqs && set->ops->exit_request) {
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001664 int i;
1665
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001666 for (i = 0; i < tags->nr_tags; i++) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001667 struct request *rq = tags->static_rqs[i];
1668
1669 if (!rq)
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001670 continue;
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001671 set->ops->exit_request(set, rq, hctx_idx);
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001672 tags->static_rqs[i] = NULL;
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001673 }
1674 }
1675
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001676 while (!list_empty(&tags->page_list)) {
1677 page = list_first_entry(&tags->page_list, struct page, lru);
Dave Hansen67534712014-01-08 20:17:46 -07001678 list_del_init(&page->lru);
Catalin Marinasf75782e2015-09-14 18:16:02 +01001679 /*
1680 * Remove kmemleak object previously allocated in
1681 * blk_mq_init_rq_map().
1682 */
1683 kmemleak_free(page_address(page));
Jens Axboe320ae512013-10-24 09:20:05 +01001684 __free_pages(page, page->private);
1685 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07001686}
Jens Axboe320ae512013-10-24 09:20:05 +01001687
Jens Axboecc71a6f2017-01-11 14:29:56 -07001688void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1689{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001690 kfree(tags->rqs);
Jens Axboecc71a6f2017-01-11 14:29:56 -07001691 tags->rqs = NULL;
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001692 kfree(tags->static_rqs);
1693 tags->static_rqs = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01001694
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001695 blk_mq_free_tags(tags);
Jens Axboe320ae512013-10-24 09:20:05 +01001696}
1697
Jens Axboecc71a6f2017-01-11 14:29:56 -07001698struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1699 unsigned int hctx_idx,
1700 unsigned int nr_tags,
1701 unsigned int reserved_tags)
Jens Axboe320ae512013-10-24 09:20:05 +01001702{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001703 struct blk_mq_tags *tags;
Shaohua Li59f082e2017-02-01 09:53:14 -08001704 int node;
Jens Axboe320ae512013-10-24 09:20:05 +01001705
Shaohua Li59f082e2017-02-01 09:53:14 -08001706 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1707 if (node == NUMA_NO_NODE)
1708 node = set->numa_node;
1709
1710 tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
Shaohua Li24391c02015-01-23 14:18:00 -07001711 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001712 if (!tags)
1713 return NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01001714
Jens Axboecc71a6f2017-01-11 14:29:56 -07001715 tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02001716 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
Shaohua Li59f082e2017-02-01 09:53:14 -08001717 node);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001718 if (!tags->rqs) {
1719 blk_mq_free_tags(tags);
1720 return NULL;
1721 }
Jens Axboe320ae512013-10-24 09:20:05 +01001722
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001723 tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1724 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
Shaohua Li59f082e2017-02-01 09:53:14 -08001725 node);
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001726 if (!tags->static_rqs) {
1727 kfree(tags->rqs);
1728 blk_mq_free_tags(tags);
1729 return NULL;
1730 }
1731
Jens Axboecc71a6f2017-01-11 14:29:56 -07001732 return tags;
1733}
1734
1735static size_t order_to_size(unsigned int order)
1736{
1737 return (size_t)PAGE_SIZE << order;
1738}
1739
1740int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1741 unsigned int hctx_idx, unsigned int depth)
1742{
1743 unsigned int i, j, entries_per_page, max_order = 4;
1744 size_t rq_size, left;
Shaohua Li59f082e2017-02-01 09:53:14 -08001745 int node;
1746
1747 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1748 if (node == NUMA_NO_NODE)
1749 node = set->numa_node;
Jens Axboecc71a6f2017-01-11 14:29:56 -07001750
1751 INIT_LIST_HEAD(&tags->page_list);
1752
Jens Axboe320ae512013-10-24 09:20:05 +01001753 /*
1754 * rq_size is the size of the request plus driver payload, rounded
1755 * to the cacheline size
1756 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001757 rq_size = round_up(sizeof(struct request) + set->cmd_size,
Jens Axboe320ae512013-10-24 09:20:05 +01001758 cache_line_size());
Jens Axboecc71a6f2017-01-11 14:29:56 -07001759 left = rq_size * depth;
Jens Axboe320ae512013-10-24 09:20:05 +01001760
Jens Axboecc71a6f2017-01-11 14:29:56 -07001761 for (i = 0; i < depth; ) {
Jens Axboe320ae512013-10-24 09:20:05 +01001762 int this_order = max_order;
1763 struct page *page;
1764 int to_do;
1765 void *p;
1766
Bartlomiej Zolnierkiewiczb3a834b2016-05-16 09:54:47 -06001767 while (this_order && left < order_to_size(this_order - 1))
Jens Axboe320ae512013-10-24 09:20:05 +01001768 this_order--;
1769
1770 do {
Shaohua Li59f082e2017-02-01 09:53:14 -08001771 page = alloc_pages_node(node,
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02001772 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
Jens Axboea5164402014-09-10 09:02:03 -06001773 this_order);
Jens Axboe320ae512013-10-24 09:20:05 +01001774 if (page)
1775 break;
1776 if (!this_order--)
1777 break;
1778 if (order_to_size(this_order) < rq_size)
1779 break;
1780 } while (1);
1781
1782 if (!page)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001783 goto fail;
Jens Axboe320ae512013-10-24 09:20:05 +01001784
1785 page->private = this_order;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001786 list_add_tail(&page->lru, &tags->page_list);
Jens Axboe320ae512013-10-24 09:20:05 +01001787
1788 p = page_address(page);
Catalin Marinasf75782e2015-09-14 18:16:02 +01001789 /*
1790 * Allow kmemleak to scan these pages as they contain pointers
1791 * to additional allocations like via ops->init_request().
1792 */
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02001793 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
Jens Axboe320ae512013-10-24 09:20:05 +01001794 entries_per_page = order_to_size(this_order) / rq_size;
Jens Axboecc71a6f2017-01-11 14:29:56 -07001795 to_do = min(entries_per_page, depth - i);
Jens Axboe320ae512013-10-24 09:20:05 +01001796 left -= to_do * rq_size;
1797 for (j = 0; j < to_do; j++) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001798 struct request *rq = p;
1799
1800 tags->static_rqs[i] = rq;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001801 if (set->ops->init_request) {
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001802 if (set->ops->init_request(set, rq, hctx_idx,
Shaohua Li59f082e2017-02-01 09:53:14 -08001803 node)) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001804 tags->static_rqs[i] = NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001805 goto fail;
Jens Axboea5164402014-09-10 09:02:03 -06001806 }
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001807 }
1808
Jens Axboe320ae512013-10-24 09:20:05 +01001809 p += rq_size;
1810 i++;
1811 }
1812 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07001813 return 0;
Jens Axboe320ae512013-10-24 09:20:05 +01001814
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001815fail:
Jens Axboecc71a6f2017-01-11 14:29:56 -07001816 blk_mq_free_rqs(set, tags, hctx_idx);
1817 return -ENOMEM;
Jens Axboe320ae512013-10-24 09:20:05 +01001818}
1819
Jens Axboee57690f2016-08-24 15:34:35 -06001820/*
1821 * 'cpu' is going away. splice any existing rq_list entries from this
1822 * software queue to the hw queue dispatch list, and ensure that it
1823 * gets run.
1824 */
Thomas Gleixner9467f852016-09-22 08:05:17 -06001825static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
Jens Axboe484b4062014-05-21 14:01:15 -06001826{
Thomas Gleixner9467f852016-09-22 08:05:17 -06001827 struct blk_mq_hw_ctx *hctx;
Jens Axboe484b4062014-05-21 14:01:15 -06001828 struct blk_mq_ctx *ctx;
1829 LIST_HEAD(tmp);
1830
Thomas Gleixner9467f852016-09-22 08:05:17 -06001831 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
Jens Axboee57690f2016-08-24 15:34:35 -06001832 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
Jens Axboe484b4062014-05-21 14:01:15 -06001833
1834 spin_lock(&ctx->lock);
1835 if (!list_empty(&ctx->rq_list)) {
1836 list_splice_init(&ctx->rq_list, &tmp);
1837 blk_mq_hctx_clear_pending(hctx, ctx);
1838 }
1839 spin_unlock(&ctx->lock);
1840
1841 if (list_empty(&tmp))
Thomas Gleixner9467f852016-09-22 08:05:17 -06001842 return 0;
Jens Axboe484b4062014-05-21 14:01:15 -06001843
Jens Axboee57690f2016-08-24 15:34:35 -06001844 spin_lock(&hctx->lock);
1845 list_splice_tail_init(&tmp, &hctx->dispatch);
1846 spin_unlock(&hctx->lock);
Jens Axboe484b4062014-05-21 14:01:15 -06001847
1848 blk_mq_run_hw_queue(hctx, true);
Thomas Gleixner9467f852016-09-22 08:05:17 -06001849 return 0;
Jens Axboe484b4062014-05-21 14:01:15 -06001850}
1851
Thomas Gleixner9467f852016-09-22 08:05:17 -06001852static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
Jens Axboe484b4062014-05-21 14:01:15 -06001853{
Thomas Gleixner9467f852016-09-22 08:05:17 -06001854 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1855 &hctx->cpuhp_dead);
Jens Axboe484b4062014-05-21 14:01:15 -06001856}
1857
Ming Leic3b4afc2015-06-04 22:25:04 +08001858/* hctx->ctxs will be freed in queue's release handler */
Ming Lei08e98fc2014-09-25 23:23:38 +08001859static void blk_mq_exit_hctx(struct request_queue *q,
1860 struct blk_mq_tag_set *set,
1861 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1862{
Omar Sandoval9c1051a2017-05-04 08:17:21 -06001863 blk_mq_debugfs_unregister_hctx(hctx);
1864
Ming Lei08e98fc2014-09-25 23:23:38 +08001865 blk_mq_tag_idle(hctx);
1866
Ming Leif70ced02014-09-25 23:23:47 +08001867 if (set->ops->exit_request)
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001868 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
Ming Leif70ced02014-09-25 23:23:47 +08001869
Omar Sandoval93252632017-04-05 12:01:31 -07001870 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
1871
Ming Lei08e98fc2014-09-25 23:23:38 +08001872 if (set->ops->exit_hctx)
1873 set->ops->exit_hctx(hctx, hctx_idx);
1874
Bart Van Assche6a83e742016-11-02 10:09:51 -06001875 if (hctx->flags & BLK_MQ_F_BLOCKING)
Bart Van Assche07319672017-06-20 11:15:38 -07001876 cleanup_srcu_struct(hctx->queue_rq_srcu);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001877
Thomas Gleixner9467f852016-09-22 08:05:17 -06001878 blk_mq_remove_cpuhp(hctx);
Ming Leif70ced02014-09-25 23:23:47 +08001879 blk_free_flush_queue(hctx->fq);
Omar Sandoval88459642016-09-17 08:38:44 -06001880 sbitmap_free(&hctx->ctx_map);
Ming Lei08e98fc2014-09-25 23:23:38 +08001881}
1882
Ming Lei624dbe42014-05-27 23:35:13 +08001883static void blk_mq_exit_hw_queues(struct request_queue *q,
1884 struct blk_mq_tag_set *set, int nr_queue)
1885{
1886 struct blk_mq_hw_ctx *hctx;
1887 unsigned int i;
1888
1889 queue_for_each_hw_ctx(q, hctx, i) {
1890 if (i == nr_queue)
1891 break;
Ming Lei08e98fc2014-09-25 23:23:38 +08001892 blk_mq_exit_hctx(q, set, hctx, i);
Ming Lei624dbe42014-05-27 23:35:13 +08001893 }
Ming Lei624dbe42014-05-27 23:35:13 +08001894}
1895
Ming Lei08e98fc2014-09-25 23:23:38 +08001896static int blk_mq_init_hctx(struct request_queue *q,
1897 struct blk_mq_tag_set *set,
1898 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1899{
1900 int node;
1901
1902 node = hctx->numa_node;
1903 if (node == NUMA_NO_NODE)
1904 node = hctx->numa_node = set->numa_node;
1905
Jens Axboe9f993732017-04-10 09:54:54 -06001906 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
Ming Lei08e98fc2014-09-25 23:23:38 +08001907 spin_lock_init(&hctx->lock);
1908 INIT_LIST_HEAD(&hctx->dispatch);
1909 hctx->queue = q;
Jeff Moyer2404e602015-11-03 10:40:06 -05001910 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
Ming Lei08e98fc2014-09-25 23:23:38 +08001911
Thomas Gleixner9467f852016-09-22 08:05:17 -06001912 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
Ming Lei08e98fc2014-09-25 23:23:38 +08001913
1914 hctx->tags = set->tags[hctx_idx];
1915
1916 /*
1917 * Allocate space for all possible cpus to avoid allocation at
1918 * runtime
1919 */
1920 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1921 GFP_KERNEL, node);
1922 if (!hctx->ctxs)
1923 goto unregister_cpu_notifier;
1924
Omar Sandoval88459642016-09-17 08:38:44 -06001925 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
1926 node))
Ming Lei08e98fc2014-09-25 23:23:38 +08001927 goto free_ctxs;
1928
1929 hctx->nr_ctx = 0;
1930
1931 if (set->ops->init_hctx &&
1932 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1933 goto free_bitmap;
1934
Omar Sandoval93252632017-04-05 12:01:31 -07001935 if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
1936 goto exit_hctx;
1937
Ming Leif70ced02014-09-25 23:23:47 +08001938 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1939 if (!hctx->fq)
Omar Sandoval93252632017-04-05 12:01:31 -07001940 goto sched_exit_hctx;
Ming Leif70ced02014-09-25 23:23:47 +08001941
1942 if (set->ops->init_request &&
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001943 set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx,
1944 node))
Ming Leif70ced02014-09-25 23:23:47 +08001945 goto free_fq;
1946
Bart Van Assche6a83e742016-11-02 10:09:51 -06001947 if (hctx->flags & BLK_MQ_F_BLOCKING)
Bart Van Assche07319672017-06-20 11:15:38 -07001948 init_srcu_struct(hctx->queue_rq_srcu);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001949
Omar Sandoval9c1051a2017-05-04 08:17:21 -06001950 blk_mq_debugfs_register_hctx(q, hctx);
1951
Ming Lei08e98fc2014-09-25 23:23:38 +08001952 return 0;
1953
Ming Leif70ced02014-09-25 23:23:47 +08001954 free_fq:
1955 kfree(hctx->fq);
Omar Sandoval93252632017-04-05 12:01:31 -07001956 sched_exit_hctx:
1957 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
Ming Leif70ced02014-09-25 23:23:47 +08001958 exit_hctx:
1959 if (set->ops->exit_hctx)
1960 set->ops->exit_hctx(hctx, hctx_idx);
Ming Lei08e98fc2014-09-25 23:23:38 +08001961 free_bitmap:
Omar Sandoval88459642016-09-17 08:38:44 -06001962 sbitmap_free(&hctx->ctx_map);
Ming Lei08e98fc2014-09-25 23:23:38 +08001963 free_ctxs:
1964 kfree(hctx->ctxs);
1965 unregister_cpu_notifier:
Thomas Gleixner9467f852016-09-22 08:05:17 -06001966 blk_mq_remove_cpuhp(hctx);
Ming Lei08e98fc2014-09-25 23:23:38 +08001967 return -1;
1968}
1969
Jens Axboe320ae512013-10-24 09:20:05 +01001970static void blk_mq_init_cpu_queues(struct request_queue *q,
1971 unsigned int nr_hw_queues)
1972{
1973 unsigned int i;
1974
1975 for_each_possible_cpu(i) {
1976 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1977 struct blk_mq_hw_ctx *hctx;
1978
Jens Axboe320ae512013-10-24 09:20:05 +01001979 __ctx->cpu = i;
1980 spin_lock_init(&__ctx->lock);
1981 INIT_LIST_HEAD(&__ctx->rq_list);
1982 __ctx->queue = q;
1983
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02001984 /* If the cpu isn't present, the cpu is mapped to first hctx */
1985 if (!cpu_present(i))
Jens Axboe320ae512013-10-24 09:20:05 +01001986 continue;
1987
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02001988 hctx = blk_mq_map_queue(q, i);
Jens Axboee4043dc2014-04-09 10:18:23 -06001989
Jens Axboe320ae512013-10-24 09:20:05 +01001990 /*
1991 * Set local node, IFF we have more than one hw queue. If
1992 * not, we remain on the home node of the device
1993 */
1994 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
Raghavendra K Tbffed452015-12-02 16:59:05 +05301995 hctx->numa_node = local_memory_node(cpu_to_node(i));
Jens Axboe320ae512013-10-24 09:20:05 +01001996 }
1997}
1998
Jens Axboecc71a6f2017-01-11 14:29:56 -07001999static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2000{
2001 int ret = 0;
2002
2003 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2004 set->queue_depth, set->reserved_tags);
2005 if (!set->tags[hctx_idx])
2006 return false;
2007
2008 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2009 set->queue_depth);
2010 if (!ret)
2011 return true;
2012
2013 blk_mq_free_rq_map(set->tags[hctx_idx]);
2014 set->tags[hctx_idx] = NULL;
2015 return false;
2016}
2017
2018static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2019 unsigned int hctx_idx)
2020{
Jens Axboebd166ef2017-01-17 06:03:22 -07002021 if (set->tags[hctx_idx]) {
2022 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2023 blk_mq_free_rq_map(set->tags[hctx_idx]);
2024 set->tags[hctx_idx] = NULL;
2025 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07002026}
2027
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002028static void blk_mq_map_swqueue(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +01002029{
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002030 unsigned int i, hctx_idx;
Jens Axboe320ae512013-10-24 09:20:05 +01002031 struct blk_mq_hw_ctx *hctx;
2032 struct blk_mq_ctx *ctx;
Ming Lei2a34c082015-04-21 10:00:20 +08002033 struct blk_mq_tag_set *set = q->tag_set;
Jens Axboe320ae512013-10-24 09:20:05 +01002034
Akinobu Mita60de0742015-09-27 02:09:25 +09002035 /*
2036 * Avoid others reading imcomplete hctx->cpumask through sysfs
2037 */
2038 mutex_lock(&q->sysfs_lock);
2039
Jens Axboe320ae512013-10-24 09:20:05 +01002040 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboee4043dc2014-04-09 10:18:23 -06002041 cpumask_clear(hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01002042 hctx->nr_ctx = 0;
2043 }
2044
2045 /*
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002046 * Map software to hardware queues.
2047 *
2048 * If the cpu isn't present, the cpu is mapped to first hctx.
Jens Axboe320ae512013-10-24 09:20:05 +01002049 */
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002050 for_each_present_cpu(i) {
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002051 hctx_idx = q->mq_map[i];
2052 /* unmapped hw queue can be remapped after CPU topo changed */
Jens Axboecc71a6f2017-01-11 14:29:56 -07002053 if (!set->tags[hctx_idx] &&
2054 !__blk_mq_alloc_rq_map(set, hctx_idx)) {
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002055 /*
2056 * If tags initialization fail for some hctx,
2057 * that hctx won't be brought online. In this
2058 * case, remap the current ctx to hctx[0] which
2059 * is guaranteed to always have tags allocated
2060 */
Jens Axboecc71a6f2017-01-11 14:29:56 -07002061 q->mq_map[i] = 0;
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002062 }
2063
Thomas Gleixner897bb0c2016-03-19 11:30:33 +01002064 ctx = per_cpu_ptr(q->queue_ctx, i);
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02002065 hctx = blk_mq_map_queue(q, i);
Keith Busch868f2f02015-12-17 17:08:14 -07002066
Jens Axboee4043dc2014-04-09 10:18:23 -06002067 cpumask_set_cpu(i, hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01002068 ctx->index_hw = hctx->nr_ctx;
2069 hctx->ctxs[hctx->nr_ctx++] = ctx;
2070 }
Jens Axboe506e9312014-05-07 10:26:44 -06002071
Akinobu Mita60de0742015-09-27 02:09:25 +09002072 mutex_unlock(&q->sysfs_lock);
2073
Jens Axboe506e9312014-05-07 10:26:44 -06002074 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe484b4062014-05-21 14:01:15 -06002075 /*
Jens Axboea68aafa2014-08-15 13:19:15 -06002076 * If no software queues are mapped to this hardware queue,
2077 * disable it and free the request entries.
Jens Axboe484b4062014-05-21 14:01:15 -06002078 */
2079 if (!hctx->nr_ctx) {
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002080 /* Never unmap queue 0. We need it as a
2081 * fallback in case of a new remap fails
2082 * allocation
2083 */
Jens Axboecc71a6f2017-01-11 14:29:56 -07002084 if (i && set->tags[i])
2085 blk_mq_free_map_and_requests(set, i);
2086
Ming Lei2a34c082015-04-21 10:00:20 +08002087 hctx->tags = NULL;
Jens Axboe484b4062014-05-21 14:01:15 -06002088 continue;
2089 }
2090
Ming Lei2a34c082015-04-21 10:00:20 +08002091 hctx->tags = set->tags[i];
2092 WARN_ON(!hctx->tags);
2093
Jens Axboe484b4062014-05-21 14:01:15 -06002094 /*
Chong Yuan889fa312015-04-15 11:39:29 -06002095 * Set the map size to the number of mapped software queues.
2096 * This is more accurate and more efficient than looping
2097 * over all possibly mapped software queues.
2098 */
Omar Sandoval88459642016-09-17 08:38:44 -06002099 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
Chong Yuan889fa312015-04-15 11:39:29 -06002100
2101 /*
Jens Axboe484b4062014-05-21 14:01:15 -06002102 * Initialize batch roundrobin counts
2103 */
Jens Axboe506e9312014-05-07 10:26:44 -06002104 hctx->next_cpu = cpumask_first(hctx->cpumask);
2105 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2106 }
Jens Axboe320ae512013-10-24 09:20:05 +01002107}
2108
Jens Axboe8e8320c2017-06-20 17:56:13 -06002109/*
2110 * Caller needs to ensure that we're either frozen/quiesced, or that
2111 * the queue isn't live yet.
2112 */
Jeff Moyer2404e602015-11-03 10:40:06 -05002113static void queue_set_hctx_shared(struct request_queue *q, bool shared)
Jens Axboe0d2602c2014-05-13 15:10:52 -06002114{
2115 struct blk_mq_hw_ctx *hctx;
Jens Axboe0d2602c2014-05-13 15:10:52 -06002116 int i;
2117
Jeff Moyer2404e602015-11-03 10:40:06 -05002118 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe8e8320c2017-06-20 17:56:13 -06002119 if (shared) {
2120 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2121 atomic_inc(&q->shared_hctx_restart);
Jeff Moyer2404e602015-11-03 10:40:06 -05002122 hctx->flags |= BLK_MQ_F_TAG_SHARED;
Jens Axboe8e8320c2017-06-20 17:56:13 -06002123 } else {
2124 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2125 atomic_dec(&q->shared_hctx_restart);
Jeff Moyer2404e602015-11-03 10:40:06 -05002126 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
Jens Axboe8e8320c2017-06-20 17:56:13 -06002127 }
Jeff Moyer2404e602015-11-03 10:40:06 -05002128 }
2129}
2130
Jens Axboe8e8320c2017-06-20 17:56:13 -06002131static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2132 bool shared)
Jeff Moyer2404e602015-11-03 10:40:06 -05002133{
2134 struct request_queue *q;
Jens Axboe0d2602c2014-05-13 15:10:52 -06002135
Bart Van Assche705cda92017-04-07 11:16:49 -07002136 lockdep_assert_held(&set->tag_list_lock);
2137
Jens Axboe0d2602c2014-05-13 15:10:52 -06002138 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2139 blk_mq_freeze_queue(q);
Jeff Moyer2404e602015-11-03 10:40:06 -05002140 queue_set_hctx_shared(q, shared);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002141 blk_mq_unfreeze_queue(q);
2142 }
2143}
2144
2145static void blk_mq_del_queue_tag_set(struct request_queue *q)
2146{
2147 struct blk_mq_tag_set *set = q->tag_set;
2148
Jens Axboe0d2602c2014-05-13 15:10:52 -06002149 mutex_lock(&set->tag_list_lock);
Bart Van Assche705cda92017-04-07 11:16:49 -07002150 list_del_rcu(&q->tag_set_list);
2151 INIT_LIST_HEAD(&q->tag_set_list);
Jeff Moyer2404e602015-11-03 10:40:06 -05002152 if (list_is_singular(&set->tag_list)) {
2153 /* just transitioned to unshared */
2154 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2155 /* update existing queue */
2156 blk_mq_update_tag_set_depth(set, false);
2157 }
Jens Axboe0d2602c2014-05-13 15:10:52 -06002158 mutex_unlock(&set->tag_list_lock);
Bart Van Assche705cda92017-04-07 11:16:49 -07002159
2160 synchronize_rcu();
Jens Axboe0d2602c2014-05-13 15:10:52 -06002161}
2162
2163static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2164 struct request_queue *q)
2165{
2166 q->tag_set = set;
2167
2168 mutex_lock(&set->tag_list_lock);
Jeff Moyer2404e602015-11-03 10:40:06 -05002169
2170 /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2171 if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2172 set->flags |= BLK_MQ_F_TAG_SHARED;
2173 /* update existing queue */
2174 blk_mq_update_tag_set_depth(set, true);
2175 }
2176 if (set->flags & BLK_MQ_F_TAG_SHARED)
2177 queue_set_hctx_shared(q, true);
Bart Van Assche705cda92017-04-07 11:16:49 -07002178 list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
Jeff Moyer2404e602015-11-03 10:40:06 -05002179
Jens Axboe0d2602c2014-05-13 15:10:52 -06002180 mutex_unlock(&set->tag_list_lock);
2181}
2182
Ming Leie09aae72015-01-29 20:17:27 +08002183/*
2184 * It is the actual release handler for mq, but we do it from
2185 * request queue's release handler for avoiding use-after-free
2186 * and headache because q->mq_kobj shouldn't have been introduced,
2187 * but we can't group ctx/kctx kobj without it.
2188 */
2189void blk_mq_release(struct request_queue *q)
2190{
2191 struct blk_mq_hw_ctx *hctx;
2192 unsigned int i;
2193
2194 /* hctx kobj stays in hctx */
Ming Leic3b4afc2015-06-04 22:25:04 +08002195 queue_for_each_hw_ctx(q, hctx, i) {
2196 if (!hctx)
2197 continue;
Ming Lei6c8b2322017-02-22 18:14:01 +08002198 kobject_put(&hctx->kobj);
Ming Leic3b4afc2015-06-04 22:25:04 +08002199 }
Ming Leie09aae72015-01-29 20:17:27 +08002200
Akinobu Mitaa723bab2015-09-27 02:09:21 +09002201 q->mq_map = NULL;
2202
Ming Leie09aae72015-01-29 20:17:27 +08002203 kfree(q->queue_hw_ctx);
2204
Ming Lei7ea5fe32017-02-22 18:14:00 +08002205 /*
2206 * release .mq_kobj and sw queue's kobject now because
2207 * both share lifetime with request queue.
2208 */
2209 blk_mq_sysfs_deinit(q);
2210
Ming Leie09aae72015-01-29 20:17:27 +08002211 free_percpu(q->queue_ctx);
2212}
2213
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002214struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
Jens Axboe320ae512013-10-24 09:20:05 +01002215{
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002216 struct request_queue *uninit_q, *q;
2217
2218 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2219 if (!uninit_q)
2220 return ERR_PTR(-ENOMEM);
2221
2222 q = blk_mq_init_allocated_queue(set, uninit_q);
2223 if (IS_ERR(q))
2224 blk_cleanup_queue(uninit_q);
2225
2226 return q;
2227}
2228EXPORT_SYMBOL(blk_mq_init_queue);
2229
Bart Van Assche07319672017-06-20 11:15:38 -07002230static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2231{
2232 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2233
2234 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu),
2235 __alignof__(struct blk_mq_hw_ctx)) !=
2236 sizeof(struct blk_mq_hw_ctx));
2237
2238 if (tag_set->flags & BLK_MQ_F_BLOCKING)
2239 hw_ctx_size += sizeof(struct srcu_struct);
2240
2241 return hw_ctx_size;
2242}
2243
Keith Busch868f2f02015-12-17 17:08:14 -07002244static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2245 struct request_queue *q)
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002246{
Keith Busch868f2f02015-12-17 17:08:14 -07002247 int i, j;
2248 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +01002249
Keith Busch868f2f02015-12-17 17:08:14 -07002250 blk_mq_sysfs_unregister(q);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002251 for (i = 0; i < set->nr_hw_queues; i++) {
Keith Busch868f2f02015-12-17 17:08:14 -07002252 int node;
Jens Axboef14bbe72014-05-27 12:06:53 -06002253
Keith Busch868f2f02015-12-17 17:08:14 -07002254 if (hctxs[i])
2255 continue;
2256
2257 node = blk_mq_hw_queue_to_node(q->mq_map, i);
Bart Van Assche07319672017-06-20 11:15:38 -07002258 hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
Christoph Hellwigcdef54d2014-05-28 18:11:06 +02002259 GFP_KERNEL, node);
Jens Axboe320ae512013-10-24 09:20:05 +01002260 if (!hctxs[i])
Keith Busch868f2f02015-12-17 17:08:14 -07002261 break;
Jens Axboe320ae512013-10-24 09:20:05 +01002262
Jens Axboea86073e2014-10-13 15:41:54 -06002263 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
Keith Busch868f2f02015-12-17 17:08:14 -07002264 node)) {
2265 kfree(hctxs[i]);
2266 hctxs[i] = NULL;
2267 break;
2268 }
Jens Axboee4043dc2014-04-09 10:18:23 -06002269
Jens Axboe0d2602c2014-05-13 15:10:52 -06002270 atomic_set(&hctxs[i]->nr_active, 0);
Jens Axboef14bbe72014-05-27 12:06:53 -06002271 hctxs[i]->numa_node = node;
Jens Axboe320ae512013-10-24 09:20:05 +01002272 hctxs[i]->queue_num = i;
Keith Busch868f2f02015-12-17 17:08:14 -07002273
2274 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2275 free_cpumask_var(hctxs[i]->cpumask);
2276 kfree(hctxs[i]);
2277 hctxs[i] = NULL;
2278 break;
2279 }
2280 blk_mq_hctx_kobj_init(hctxs[i]);
Jens Axboe320ae512013-10-24 09:20:05 +01002281 }
Keith Busch868f2f02015-12-17 17:08:14 -07002282 for (j = i; j < q->nr_hw_queues; j++) {
2283 struct blk_mq_hw_ctx *hctx = hctxs[j];
2284
2285 if (hctx) {
Jens Axboecc71a6f2017-01-11 14:29:56 -07002286 if (hctx->tags)
2287 blk_mq_free_map_and_requests(set, j);
Keith Busch868f2f02015-12-17 17:08:14 -07002288 blk_mq_exit_hctx(q, set, hctx, j);
Keith Busch868f2f02015-12-17 17:08:14 -07002289 kobject_put(&hctx->kobj);
Keith Busch868f2f02015-12-17 17:08:14 -07002290 hctxs[j] = NULL;
2291
2292 }
2293 }
2294 q->nr_hw_queues = i;
2295 blk_mq_sysfs_register(q);
2296}
2297
2298struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2299 struct request_queue *q)
2300{
Ming Lei66841672016-02-12 15:27:00 +08002301 /* mark the queue as mq asap */
2302 q->mq_ops = set->ops;
2303
Omar Sandoval34dbad52017-03-21 08:56:08 -07002304 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
Stephen Bates720b8cc2017-04-07 06:24:03 -06002305 blk_mq_poll_stats_bkt,
2306 BLK_MQ_POLL_STATS_BKTS, q);
Omar Sandoval34dbad52017-03-21 08:56:08 -07002307 if (!q->poll_cb)
2308 goto err_exit;
2309
Keith Busch868f2f02015-12-17 17:08:14 -07002310 q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2311 if (!q->queue_ctx)
Ming Linc7de5722016-05-25 23:23:27 -07002312 goto err_exit;
Keith Busch868f2f02015-12-17 17:08:14 -07002313
Ming Lei737f98c2017-02-22 18:13:59 +08002314 /* init q->mq_kobj and sw queues' kobjects */
2315 blk_mq_sysfs_init(q);
2316
Keith Busch868f2f02015-12-17 17:08:14 -07002317 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2318 GFP_KERNEL, set->numa_node);
2319 if (!q->queue_hw_ctx)
2320 goto err_percpu;
2321
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002322 q->mq_map = set->mq_map;
Keith Busch868f2f02015-12-17 17:08:14 -07002323
2324 blk_mq_realloc_hw_ctxs(set, q);
2325 if (!q->nr_hw_queues)
2326 goto err_hctxs;
Jens Axboe320ae512013-10-24 09:20:05 +01002327
Christoph Hellwig287922e2015-10-30 20:57:30 +08002328 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
Ming Leie56f6982015-07-16 19:53:22 +08002329 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
Jens Axboe320ae512013-10-24 09:20:05 +01002330
2331 q->nr_queues = nr_cpu_ids;
Jens Axboe320ae512013-10-24 09:20:05 +01002332
Jens Axboe94eddfb2013-11-19 09:25:07 -07002333 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
Jens Axboe320ae512013-10-24 09:20:05 +01002334
Jens Axboe05f1dd52014-05-29 09:53:32 -06002335 if (!(set->flags & BLK_MQ_F_SG_MERGE))
2336 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2337
Christoph Hellwig1be036e2014-02-07 10:22:39 -08002338 q->sg_reserved_size = INT_MAX;
2339
Mike Snitzer28494502016-09-14 13:28:30 -04002340 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -06002341 INIT_LIST_HEAD(&q->requeue_list);
2342 spin_lock_init(&q->requeue_lock);
2343
Christoph Hellwig254d2592017-03-22 15:01:50 -04002344 blk_queue_make_request(q, blk_mq_make_request);
Jens Axboe07068d52014-05-22 10:40:51 -06002345
Jens Axboeeba71762014-05-20 15:17:27 -06002346 /*
2347 * Do this after blk_queue_make_request() overrides it...
2348 */
2349 q->nr_requests = set->queue_depth;
2350
Jens Axboe64f1c212016-11-14 13:03:03 -07002351 /*
2352 * Default to classic polling
2353 */
2354 q->poll_nsec = -1;
2355
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002356 if (set->ops->complete)
2357 blk_queue_softirq_done(q, set->ops->complete);
Christoph Hellwig30a91cb2014-02-10 03:24:38 -08002358
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002359 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002360 blk_mq_add_queue_tag_set(set, q);
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002361 blk_mq_map_swqueue(q);
Akinobu Mita4593fdb2015-09-27 02:09:20 +09002362
Jens Axboed3484992017-01-13 14:43:58 -07002363 if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2364 int ret;
2365
2366 ret = blk_mq_sched_init(q);
2367 if (ret)
2368 return ERR_PTR(ret);
2369 }
2370
Jens Axboe320ae512013-10-24 09:20:05 +01002371 return q;
Christoph Hellwig18741982014-02-10 09:29:00 -07002372
Jens Axboe320ae512013-10-24 09:20:05 +01002373err_hctxs:
Keith Busch868f2f02015-12-17 17:08:14 -07002374 kfree(q->queue_hw_ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01002375err_percpu:
Keith Busch868f2f02015-12-17 17:08:14 -07002376 free_percpu(q->queue_ctx);
Ming Linc7de5722016-05-25 23:23:27 -07002377err_exit:
2378 q->mq_ops = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01002379 return ERR_PTR(-ENOMEM);
2380}
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002381EXPORT_SYMBOL(blk_mq_init_allocated_queue);
Jens Axboe320ae512013-10-24 09:20:05 +01002382
2383void blk_mq_free_queue(struct request_queue *q)
2384{
Ming Lei624dbe42014-05-27 23:35:13 +08002385 struct blk_mq_tag_set *set = q->tag_set;
Jens Axboe320ae512013-10-24 09:20:05 +01002386
Jens Axboe0d2602c2014-05-13 15:10:52 -06002387 blk_mq_del_queue_tag_set(q);
Ming Lei624dbe42014-05-27 23:35:13 +08002388 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01002389}
Jens Axboe320ae512013-10-24 09:20:05 +01002390
2391/* Basically redo blk_mq_init_queue with queue frozen */
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002392static void blk_mq_queue_reinit(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +01002393{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +02002394 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
Jens Axboe320ae512013-10-24 09:20:05 +01002395
Omar Sandoval9c1051a2017-05-04 08:17:21 -06002396 blk_mq_debugfs_unregister_hctxs(q);
Jens Axboe67aec142014-05-30 08:25:36 -06002397 blk_mq_sysfs_unregister(q);
2398
Jens Axboe320ae512013-10-24 09:20:05 +01002399 /*
2400 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2401 * we should change hctx numa_node according to new topology (this
2402 * involves free and re-allocate memory, worthy doing?)
2403 */
2404
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002405 blk_mq_map_swqueue(q);
Jens Axboe320ae512013-10-24 09:20:05 +01002406
Jens Axboe67aec142014-05-30 08:25:36 -06002407 blk_mq_sysfs_register(q);
Omar Sandoval9c1051a2017-05-04 08:17:21 -06002408 blk_mq_debugfs_register_hctxs(q);
Jens Axboe320ae512013-10-24 09:20:05 +01002409}
2410
Jens Axboea5164402014-09-10 09:02:03 -06002411static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2412{
2413 int i;
2414
Jens Axboecc71a6f2017-01-11 14:29:56 -07002415 for (i = 0; i < set->nr_hw_queues; i++)
2416 if (!__blk_mq_alloc_rq_map(set, i))
Jens Axboea5164402014-09-10 09:02:03 -06002417 goto out_unwind;
Jens Axboea5164402014-09-10 09:02:03 -06002418
2419 return 0;
2420
2421out_unwind:
2422 while (--i >= 0)
Jens Axboecc71a6f2017-01-11 14:29:56 -07002423 blk_mq_free_rq_map(set->tags[i]);
Jens Axboea5164402014-09-10 09:02:03 -06002424
Jens Axboea5164402014-09-10 09:02:03 -06002425 return -ENOMEM;
2426}
2427
2428/*
2429 * Allocate the request maps associated with this tag_set. Note that this
2430 * may reduce the depth asked for, if memory is tight. set->queue_depth
2431 * will be updated to reflect the allocated depth.
2432 */
2433static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2434{
2435 unsigned int depth;
2436 int err;
2437
2438 depth = set->queue_depth;
2439 do {
2440 err = __blk_mq_alloc_rq_maps(set);
2441 if (!err)
2442 break;
2443
2444 set->queue_depth >>= 1;
2445 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2446 err = -ENOMEM;
2447 break;
2448 }
2449 } while (set->queue_depth);
2450
2451 if (!set->queue_depth || err) {
2452 pr_err("blk-mq: failed to allocate request map\n");
2453 return -ENOMEM;
2454 }
2455
2456 if (depth != set->queue_depth)
2457 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2458 depth, set->queue_depth);
2459
2460 return 0;
2461}
2462
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002463static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2464{
2465 if (set->ops->map_queues)
2466 return set->ops->map_queues(set);
2467 else
2468 return blk_mq_map_queues(set);
2469}
2470
Jens Axboea4391c62014-06-05 15:21:56 -06002471/*
2472 * Alloc a tag set to be associated with one or more request queues.
2473 * May fail with EINVAL for various error conditions. May adjust the
2474 * requested depth down, if if it too large. In that case, the set
2475 * value will be stored in set->queue_depth.
2476 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002477int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2478{
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002479 int ret;
2480
Bart Van Assche205fb5f2014-10-30 14:45:11 +01002481 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2482
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002483 if (!set->nr_hw_queues)
2484 return -EINVAL;
Jens Axboea4391c62014-06-05 15:21:56 -06002485 if (!set->queue_depth)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002486 return -EINVAL;
2487 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2488 return -EINVAL;
2489
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02002490 if (!set->ops->queue_rq)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002491 return -EINVAL;
2492
Jens Axboea4391c62014-06-05 15:21:56 -06002493 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2494 pr_info("blk-mq: reduced tag depth to %u\n",
2495 BLK_MQ_MAX_DEPTH);
2496 set->queue_depth = BLK_MQ_MAX_DEPTH;
2497 }
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002498
Shaohua Li6637fad2014-11-30 16:00:58 -08002499 /*
2500 * If a crashdump is active, then we are potentially in a very
2501 * memory constrained environment. Limit us to 1 queue and
2502 * 64 tags to prevent using too much memory.
2503 */
2504 if (is_kdump_kernel()) {
2505 set->nr_hw_queues = 1;
2506 set->queue_depth = min(64U, set->queue_depth);
2507 }
Keith Busch868f2f02015-12-17 17:08:14 -07002508 /*
2509 * There is no use for more h/w queues than cpus.
2510 */
2511 if (set->nr_hw_queues > nr_cpu_ids)
2512 set->nr_hw_queues = nr_cpu_ids;
Shaohua Li6637fad2014-11-30 16:00:58 -08002513
Keith Busch868f2f02015-12-17 17:08:14 -07002514 set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002515 GFP_KERNEL, set->numa_node);
2516 if (!set->tags)
Jens Axboea5164402014-09-10 09:02:03 -06002517 return -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002518
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002519 ret = -ENOMEM;
2520 set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2521 GFP_KERNEL, set->numa_node);
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002522 if (!set->mq_map)
2523 goto out_free_tags;
2524
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002525 ret = blk_mq_update_queue_map(set);
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002526 if (ret)
2527 goto out_free_mq_map;
2528
2529 ret = blk_mq_alloc_rq_maps(set);
2530 if (ret)
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002531 goto out_free_mq_map;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002532
Jens Axboe0d2602c2014-05-13 15:10:52 -06002533 mutex_init(&set->tag_list_lock);
2534 INIT_LIST_HEAD(&set->tag_list);
2535
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002536 return 0;
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002537
2538out_free_mq_map:
2539 kfree(set->mq_map);
2540 set->mq_map = NULL;
2541out_free_tags:
Robert Elliott5676e7b2014-09-02 11:38:44 -05002542 kfree(set->tags);
2543 set->tags = NULL;
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002544 return ret;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002545}
2546EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2547
2548void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2549{
2550 int i;
2551
Jens Axboecc71a6f2017-01-11 14:29:56 -07002552 for (i = 0; i < nr_cpu_ids; i++)
2553 blk_mq_free_map_and_requests(set, i);
Jens Axboe484b4062014-05-21 14:01:15 -06002554
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002555 kfree(set->mq_map);
2556 set->mq_map = NULL;
2557
Ming Lei981bd182014-04-24 00:07:34 +08002558 kfree(set->tags);
Robert Elliott5676e7b2014-09-02 11:38:44 -05002559 set->tags = NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002560}
2561EXPORT_SYMBOL(blk_mq_free_tag_set);
2562
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002563int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2564{
2565 struct blk_mq_tag_set *set = q->tag_set;
2566 struct blk_mq_hw_ctx *hctx;
2567 int i, ret;
2568
Jens Axboebd166ef2017-01-17 06:03:22 -07002569 if (!set)
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002570 return -EINVAL;
2571
Jens Axboe70f36b62017-01-19 10:59:07 -07002572 blk_mq_freeze_queue(q);
Jens Axboe70f36b62017-01-19 10:59:07 -07002573
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002574 ret = 0;
2575 queue_for_each_hw_ctx(q, hctx, i) {
Keith Busche9137d42016-02-18 14:56:35 -07002576 if (!hctx->tags)
2577 continue;
Jens Axboebd166ef2017-01-17 06:03:22 -07002578 /*
2579 * If we're using an MQ scheduler, just update the scheduler
2580 * queue depth. This is similar to what the old code would do.
2581 */
Jens Axboe70f36b62017-01-19 10:59:07 -07002582 if (!hctx->sched_tags) {
2583 ret = blk_mq_tag_update_depth(hctx, &hctx->tags,
2584 min(nr, set->queue_depth),
2585 false);
2586 } else {
2587 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2588 nr, true);
2589 }
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002590 if (ret)
2591 break;
2592 }
2593
2594 if (!ret)
2595 q->nr_requests = nr;
2596
Jens Axboe70f36b62017-01-19 10:59:07 -07002597 blk_mq_unfreeze_queue(q);
Jens Axboe70f36b62017-01-19 10:59:07 -07002598
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002599 return ret;
2600}
2601
Keith Busche4dc2b32017-05-30 14:39:11 -04002602static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2603 int nr_hw_queues)
Keith Busch868f2f02015-12-17 17:08:14 -07002604{
2605 struct request_queue *q;
2606
Bart Van Assche705cda92017-04-07 11:16:49 -07002607 lockdep_assert_held(&set->tag_list_lock);
2608
Keith Busch868f2f02015-12-17 17:08:14 -07002609 if (nr_hw_queues > nr_cpu_ids)
2610 nr_hw_queues = nr_cpu_ids;
2611 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2612 return;
2613
2614 list_for_each_entry(q, &set->tag_list, tag_set_list)
2615 blk_mq_freeze_queue(q);
2616
2617 set->nr_hw_queues = nr_hw_queues;
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002618 blk_mq_update_queue_map(set);
Keith Busch868f2f02015-12-17 17:08:14 -07002619 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2620 blk_mq_realloc_hw_ctxs(set, q);
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002621 blk_mq_queue_reinit(q);
Keith Busch868f2f02015-12-17 17:08:14 -07002622 }
2623
2624 list_for_each_entry(q, &set->tag_list, tag_set_list)
2625 blk_mq_unfreeze_queue(q);
2626}
Keith Busche4dc2b32017-05-30 14:39:11 -04002627
2628void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2629{
2630 mutex_lock(&set->tag_list_lock);
2631 __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
2632 mutex_unlock(&set->tag_list_lock);
2633}
Keith Busch868f2f02015-12-17 17:08:14 -07002634EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2635
Omar Sandoval34dbad52017-03-21 08:56:08 -07002636/* Enable polling stats and return whether they were already enabled. */
2637static bool blk_poll_stats_enable(struct request_queue *q)
2638{
2639 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2640 test_and_set_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
2641 return true;
2642 blk_stat_add_callback(q, q->poll_cb);
2643 return false;
2644}
2645
2646static void blk_mq_poll_stats_start(struct request_queue *q)
2647{
2648 /*
2649 * We don't arm the callback if polling stats are not enabled or the
2650 * callback is already active.
2651 */
2652 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2653 blk_stat_is_active(q->poll_cb))
2654 return;
2655
2656 blk_stat_activate_msecs(q->poll_cb, 100);
2657}
2658
2659static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
2660{
2661 struct request_queue *q = cb->data;
Stephen Bates720b8cc2017-04-07 06:24:03 -06002662 int bucket;
Omar Sandoval34dbad52017-03-21 08:56:08 -07002663
Stephen Bates720b8cc2017-04-07 06:24:03 -06002664 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
2665 if (cb->stat[bucket].nr_samples)
2666 q->poll_stat[bucket] = cb->stat[bucket];
2667 }
Omar Sandoval34dbad52017-03-21 08:56:08 -07002668}
2669
Jens Axboe64f1c212016-11-14 13:03:03 -07002670static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2671 struct blk_mq_hw_ctx *hctx,
2672 struct request *rq)
2673{
Jens Axboe64f1c212016-11-14 13:03:03 -07002674 unsigned long ret = 0;
Stephen Bates720b8cc2017-04-07 06:24:03 -06002675 int bucket;
Jens Axboe64f1c212016-11-14 13:03:03 -07002676
2677 /*
2678 * If stats collection isn't on, don't sleep but turn it on for
2679 * future users
2680 */
Omar Sandoval34dbad52017-03-21 08:56:08 -07002681 if (!blk_poll_stats_enable(q))
Jens Axboe64f1c212016-11-14 13:03:03 -07002682 return 0;
2683
2684 /*
Jens Axboe64f1c212016-11-14 13:03:03 -07002685 * As an optimistic guess, use half of the mean service time
2686 * for this type of request. We can (and should) make this smarter.
2687 * For instance, if the completion latencies are tight, we can
2688 * get closer than just half the mean. This is especially
2689 * important on devices where the completion latencies are longer
Stephen Bates720b8cc2017-04-07 06:24:03 -06002690 * than ~10 usec. We do use the stats for the relevant IO size
2691 * if available which does lead to better estimates.
Jens Axboe64f1c212016-11-14 13:03:03 -07002692 */
Stephen Bates720b8cc2017-04-07 06:24:03 -06002693 bucket = blk_mq_poll_stats_bkt(rq);
2694 if (bucket < 0)
2695 return ret;
2696
2697 if (q->poll_stat[bucket].nr_samples)
2698 ret = (q->poll_stat[bucket].mean + 1) / 2;
Jens Axboe64f1c212016-11-14 13:03:03 -07002699
2700 return ret;
2701}
2702
Jens Axboe06426ad2016-11-14 13:01:59 -07002703static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
Jens Axboe64f1c212016-11-14 13:03:03 -07002704 struct blk_mq_hw_ctx *hctx,
Jens Axboe06426ad2016-11-14 13:01:59 -07002705 struct request *rq)
2706{
2707 struct hrtimer_sleeper hs;
2708 enum hrtimer_mode mode;
Jens Axboe64f1c212016-11-14 13:03:03 -07002709 unsigned int nsecs;
Jens Axboe06426ad2016-11-14 13:01:59 -07002710 ktime_t kt;
2711
Jens Axboe64f1c212016-11-14 13:03:03 -07002712 if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2713 return false;
2714
2715 /*
2716 * poll_nsec can be:
2717 *
2718 * -1: don't ever hybrid sleep
2719 * 0: use half of prev avg
2720 * >0: use this specific value
2721 */
2722 if (q->poll_nsec == -1)
2723 return false;
2724 else if (q->poll_nsec > 0)
2725 nsecs = q->poll_nsec;
2726 else
2727 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2728
2729 if (!nsecs)
Jens Axboe06426ad2016-11-14 13:01:59 -07002730 return false;
2731
2732 set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2733
2734 /*
2735 * This will be replaced with the stats tracking code, using
2736 * 'avg_completion_time / 2' as the pre-sleep target.
2737 */
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01002738 kt = nsecs;
Jens Axboe06426ad2016-11-14 13:01:59 -07002739
2740 mode = HRTIMER_MODE_REL;
2741 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2742 hrtimer_set_expires(&hs.timer, kt);
2743
2744 hrtimer_init_sleeper(&hs, current);
2745 do {
2746 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2747 break;
2748 set_current_state(TASK_UNINTERRUPTIBLE);
2749 hrtimer_start_expires(&hs.timer, mode);
2750 if (hs.task)
2751 io_schedule();
2752 hrtimer_cancel(&hs.timer);
2753 mode = HRTIMER_MODE_ABS;
2754 } while (hs.task && !signal_pending(current));
2755
2756 __set_current_state(TASK_RUNNING);
2757 destroy_hrtimer_on_stack(&hs.timer);
2758 return true;
2759}
2760
Jens Axboebbd7bb72016-11-04 09:34:34 -06002761static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2762{
2763 struct request_queue *q = hctx->queue;
2764 long state;
2765
Jens Axboe06426ad2016-11-14 13:01:59 -07002766 /*
2767 * If we sleep, have the caller restart the poll loop to reset
2768 * the state. Like for the other success return cases, the
2769 * caller is responsible for checking if the IO completed. If
2770 * the IO isn't complete, we'll get called again and will go
2771 * straight to the busy poll loop.
2772 */
Jens Axboe64f1c212016-11-14 13:03:03 -07002773 if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
Jens Axboe06426ad2016-11-14 13:01:59 -07002774 return true;
2775
Jens Axboebbd7bb72016-11-04 09:34:34 -06002776 hctx->poll_considered++;
2777
2778 state = current->state;
2779 while (!need_resched()) {
2780 int ret;
2781
2782 hctx->poll_invoked++;
2783
2784 ret = q->mq_ops->poll(hctx, rq->tag);
2785 if (ret > 0) {
2786 hctx->poll_success++;
2787 set_current_state(TASK_RUNNING);
2788 return true;
2789 }
2790
2791 if (signal_pending_state(state, current))
2792 set_current_state(TASK_RUNNING);
2793
2794 if (current->state == TASK_RUNNING)
2795 return true;
2796 if (ret < 0)
2797 break;
2798 cpu_relax();
2799 }
2800
2801 return false;
2802}
2803
2804bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2805{
2806 struct blk_mq_hw_ctx *hctx;
2807 struct blk_plug *plug;
2808 struct request *rq;
2809
2810 if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2811 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2812 return false;
2813
2814 plug = current->plug;
2815 if (plug)
2816 blk_flush_plug_list(plug, false);
2817
2818 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
Jens Axboebd166ef2017-01-17 06:03:22 -07002819 if (!blk_qc_t_is_internal(cookie))
2820 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
Jens Axboe3a07bb12017-04-20 14:53:28 -06002821 else {
Jens Axboebd166ef2017-01-17 06:03:22 -07002822 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
Jens Axboe3a07bb12017-04-20 14:53:28 -06002823 /*
2824 * With scheduling, if the request has completed, we'll
2825 * get a NULL return here, as we clear the sched tag when
2826 * that happens. The request still remains valid, like always,
2827 * so we should be safe with just the NULL check.
2828 */
2829 if (!rq)
2830 return false;
2831 }
Jens Axboebbd7bb72016-11-04 09:34:34 -06002832
2833 return __blk_mq_poll(hctx, rq);
2834}
2835EXPORT_SYMBOL_GPL(blk_mq_poll);
2836
Jens Axboe320ae512013-10-24 09:20:05 +01002837static int __init blk_mq_init(void)
2838{
Thomas Gleixner9467f852016-09-22 08:05:17 -06002839 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2840 blk_mq_hctx_notify_dead);
Jens Axboe320ae512013-10-24 09:20:05 +01002841 return 0;
2842}
2843subsys_initcall(blk_mq_init);