blob: a52703c98b7736ef69b9678bcee90691861ce2de [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Jens Axboe320ae512013-10-24 09:20:05 +01002#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
Jens Axboecf43e6b2016-11-07 21:32:37 -07005#include "blk-stat.h"
Ming Lei244c65a2017-11-04 12:39:57 -06006#include "blk-mq-tag.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -07007
Christoph Hellwig24d2f902014-04-15 14:14:00 -06008struct blk_mq_tag_set;
9
Ming Lei1db49092018-11-20 09:44:35 +080010struct blk_mq_ctxs {
11 struct kobject kobj;
12 struct blk_mq_ctx __percpu *queue_ctx;
13};
14
Linus Walleijfe644072018-04-20 10:29:51 +020015/**
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17 */
Jens Axboe320ae512013-10-24 09:20:05 +010018struct blk_mq_ctx {
19 struct {
20 spinlock_t lock;
Ming Leic16d6b52018-12-17 08:44:05 -070021 struct list_head rq_lists[HCTX_MAX_TYPES];
22 } ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010023
24 unsigned int cpu;
Jens Axboef31967f2018-10-29 13:13:29 -060025 unsigned short index_hw[HCTX_MAX_TYPES];
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +080026 struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
Jens Axboe320ae512013-10-24 09:20:05 +010027
28 /* incremented at dispatch time */
29 unsigned long rq_dispatched[2];
30 unsigned long rq_merged;
31
32 /* incremented at completion time */
33 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
34
35 struct request_queue *queue;
Ming Lei1db49092018-11-20 09:44:35 +080036 struct blk_mq_ctxs *ctxs;
Jens Axboe320ae512013-10-24 09:20:05 +010037 struct kobject kobj;
Jens Axboe4bb659b2014-05-09 09:36:49 -060038} ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010039
Ming Leic7e2d942019-04-30 09:52:25 +080040void blk_mq_exit_queue(struct request_queue *q);
Jens Axboee3a2b3f2014-05-20 11:49:02 -060041int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
Jens Axboeaed3ea92014-12-22 14:04:42 -070042void blk_mq_wake_waiters(struct request_queue *q);
Ming Lei1fd40b52020-06-30 18:25:00 +080043bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
44 unsigned int);
Bart Van Asschee6c98712019-03-20 13:14:37 -070045void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
46 bool kick_requeue_list);
Jens Axboe2c3ad662016-12-14 14:34:47 -070047void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
Ming Leib3476892017-10-14 17:22:30 +080048struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
49 struct blk_mq_ctx *start);
Jens Axboe2c3ad662016-12-14 14:34:47 -070050
51/*
52 * Internal helpers for allocating/freeing the request map
53 */
Jens Axboecc71a6f2017-01-11 14:29:56 -070054void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
55 unsigned int hctx_idx);
John Garry1c0706a2020-08-19 23:20:22 +080056void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
Jens Axboecc71a6f2017-01-11 14:29:56 -070057struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
58 unsigned int hctx_idx,
59 unsigned int nr_tags,
John Garry1c0706a2020-08-19 23:20:22 +080060 unsigned int reserved_tags,
61 unsigned int flags);
Jens Axboecc71a6f2017-01-11 14:29:56 -070062int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
63 unsigned int hctx_idx, unsigned int depth);
Jens Axboe2c3ad662016-12-14 14:34:47 -070064
65/*
66 * Internal helpers for request insertion into sw queues
67 */
68void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
69 bool at_head);
Ming Lei01e99ae2020-02-25 09:04:32 +080070void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
71 bool run_queue);
Jens Axboebd166ef2017-01-17 06:03:22 -070072void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
73 struct list_head *list);
Jens Axboe320ae512013-10-24 09:20:05 +010074
Bart Van Asschefd9c40f2019-04-04 10:08:43 -070075/* Used by blk_insert_cloned_request() to issue request directly */
76blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
Ming Lei6ce3dd62018-07-10 09:03:31 +080077void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
78 struct list_head *list);
Ming Lei396eaf22018-01-17 11:25:57 -050079
Jens Axboe320ae512013-10-24 09:20:05 +010080/*
81 * CPU -> queue mappings
82 */
Jens Axboeed76e322018-10-29 13:06:14 -060083extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010084
Jens Axboeb3c661b2018-10-30 10:36:06 -060085/*
86 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
87 * @q: request queue
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010088 * @type: the hctx type index
Jens Axboeb3c661b2018-10-30 10:36:06 -060089 * @cpu: CPU
90 */
Jens Axboeff2c5662018-10-29 13:07:33 -060091static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010092 enum hctx_type type,
Jens Axboeff2c5662018-10-29 13:07:33 -060093 unsigned int cpu)
94{
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010095 return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
Jens Axboeb3c661b2018-10-30 10:36:06 -060096}
97
98/*
99 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
100 * @q: request queue
101 * @flags: request command flags
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +0800102 * @cpu: cpu ctx
Jens Axboeb3c661b2018-10-30 10:36:06 -0600103 */
104static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
105 unsigned int flags,
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +0800106 struct blk_mq_ctx *ctx)
Jens Axboeb3c661b2018-10-30 10:36:06 -0600107{
Christoph Hellwige20ba6e2018-12-02 17:46:16 +0100108 enum hctx_type type = HCTX_TYPE_DEFAULT;
Jens Axboeb3c661b2018-10-30 10:36:06 -0600109
Jianchao Wangbb94aea2019-01-24 18:25:33 +0800110 /*
111 * The caller ensure that if REQ_HIPRI, poll must be enabled.
112 */
113 if (flags & REQ_HIPRI)
Christoph Hellwige20ba6e2018-12-02 17:46:16 +0100114 type = HCTX_TYPE_POLL;
Jianchao Wangbb94aea2019-01-24 18:25:33 +0800115 else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
Christoph Hellwige20ba6e2018-12-02 17:46:16 +0100116 type = HCTX_TYPE_READ;
Christoph Hellwig5aceaeb2018-12-17 12:16:26 +0100117
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +0800118 return ctx->hctxs[type];
Jens Axboeff2c5662018-10-29 13:07:33 -0600119}
120
Jens Axboee93ecf62014-05-19 09:17:48 -0600121/*
Jens Axboe67aec142014-05-30 08:25:36 -0600122 * sysfs helpers
123 */
Ming Lei737f98c2017-02-22 18:13:59 +0800124extern void blk_mq_sysfs_init(struct request_queue *q);
Ming Lei7ea5fe32017-02-22 18:14:00 +0800125extern void blk_mq_sysfs_deinit(struct request_queue *q);
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700126extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
Jens Axboe67aec142014-05-30 08:25:36 -0600127extern int blk_mq_sysfs_register(struct request_queue *q);
128extern void blk_mq_sysfs_unregister(struct request_queue *q);
Keith Busch868f2f02015-12-17 17:08:14 -0700129extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
Jens Axboe67aec142014-05-30 08:25:36 -0600130
Ming Leie09aae72015-01-29 20:17:27 +0800131void blk_mq_release(struct request_queue *q);
132
Ming Lei1aecfe42014-06-01 00:43:36 +0800133static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
134 unsigned int cpu)
135{
136 return per_cpu_ptr(q->queue_ctx, cpu);
137}
138
139/*
140 * This assumes per-cpu software queueing queues. They could be per-node
141 * as well, for instance. For now this is hardcoded as-is. Note that we don't
142 * care about preemption, since we know the ctx's are persistent. This does
143 * mean that we can't rely on ctx always matching the currently running CPU.
144 */
145static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
146{
Bart Van Asschec05f4222019-07-01 08:47:29 -0700147 return __blk_mq_get_ctx(q, raw_smp_processor_id());
Ming Lei1aecfe42014-06-01 00:43:36 +0800148}
149
Ming Leicb96a42c2014-06-01 00:43:37 +0800150struct blk_mq_alloc_data {
151 /* input parameter */
152 struct request_queue *q;
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800153 blk_mq_req_flags_t flags;
Omar Sandoval229a92872017-04-14 00:59:59 -0700154 unsigned int shallow_depth;
Jens Axboef9afca42018-10-29 13:11:38 -0600155 unsigned int cmd_flags;
Ming Leicb96a42c2014-06-01 00:43:37 +0800156
157 /* input & output parameter */
158 struct blk_mq_ctx *ctx;
159 struct blk_mq_hw_ctx *hctx;
160};
161
John Garry32bc15a2020-08-19 23:20:24 +0800162static inline bool blk_mq_is_sbitmap_shared(unsigned int flags)
163{
164 return flags & BLK_MQ_F_TAG_HCTX_SHARED;
165}
166
Jens Axboe49411152017-01-13 08:09:05 -0700167static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
168{
Christoph Hellwig42fdc5e2020-06-29 17:08:34 +0200169 if (data->q->elevator)
Jens Axboebd166ef2017-01-17 06:03:22 -0700170 return data->hctx->sched_tags;
171
Jens Axboe49411152017-01-13 08:09:05 -0700172 return data->hctx->tags;
173}
174
Bart Van Assche5d1b25c2016-10-28 17:19:15 -0700175static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
176{
177 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
178}
179
Ming Lei19c66e52014-12-03 19:38:04 +0800180static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
181{
182 return hctx->nr_ctx && hctx->tags;
183}
184
Mikulas Patockae016b782018-12-06 11:41:21 -0500185unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
Omar Sandovalbf0ddab2018-04-26 00:21:59 -0700186void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
187 unsigned int inflight[2]);
Jens Axboef299b7c2017-08-08 17:51:45 -0600188
Ming Lei65c76362020-06-30 18:24:56 +0800189static inline void blk_mq_put_dispatch_budget(struct request_queue *q)
Ming Leide148292017-10-14 17:22:29 +0800190{
Ming Leide148292017-10-14 17:22:29 +0800191 if (q->mq_ops->put_budget)
Ming Lei65c76362020-06-30 18:24:56 +0800192 q->mq_ops->put_budget(q);
Ming Leide148292017-10-14 17:22:29 +0800193}
194
Ming Lei65c76362020-06-30 18:24:56 +0800195static inline bool blk_mq_get_dispatch_budget(struct request_queue *q)
Ming Leide148292017-10-14 17:22:29 +0800196{
Ming Leide148292017-10-14 17:22:29 +0800197 if (q->mq_ops->get_budget)
Ming Lei65c76362020-06-30 18:24:56 +0800198 return q->mq_ops->get_budget(q);
Ming Lei88022d72017-11-05 02:21:12 +0800199 return true;
Ming Leide148292017-10-14 17:22:29 +0800200}
201
John Garrybccf5e22020-08-19 23:20:26 +0800202static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
203{
204 if (blk_mq_is_sbitmap_shared(hctx->flags))
205 atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap);
206 else
207 atomic_inc(&hctx->nr_active);
208}
209
210static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
211{
212 if (blk_mq_is_sbitmap_shared(hctx->flags))
213 atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap);
214 else
215 atomic_dec(&hctx->nr_active);
216}
217
218static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
219{
220 if (blk_mq_is_sbitmap_shared(hctx->flags))
221 return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap);
222 return atomic_read(&hctx->nr_active);
223}
Jens Axboe4e2f62e52020-07-01 22:58:32 -0600224static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
225 struct request *rq)
226{
227 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
228 rq->tag = BLK_MQ_NO_TAG;
229
230 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
231 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
John Garrybccf5e22020-08-19 23:20:26 +0800232 __blk_mq_dec_active_requests(hctx);
Jens Axboe4e2f62e52020-07-01 22:58:32 -0600233 }
234}
235
236static inline void blk_mq_put_driver_tag(struct request *rq)
237{
238 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
239 return;
240
241 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
242}
243
Jens Axboeed76e322018-10-29 13:06:14 -0600244static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
Minwoo Im0da73d02018-07-02 23:46:43 +0900245{
246 int cpu;
247
248 for_each_possible_cpu(cpu)
Jens Axboeed76e322018-10-29 13:06:14 -0600249 qmap->mq_map[cpu] = 0;
Minwoo Im0da73d02018-07-02 23:46:43 +0900250}
251
Damien Le Moalb49773e72019-07-11 01:18:31 +0900252/*
253 * blk_mq_plug() - Get caller context plug
254 * @q: request queue
255 * @bio : the bio being submitted by the caller context
256 *
257 * Plugging, by design, may delay the insertion of BIOs into the elevator in
258 * order to increase BIO merging opportunities. This however can cause BIO
259 * insertion order to change from the order in which submit_bio() is being
260 * executed in the case of multiple contexts concurrently issuing BIOs to a
261 * device, even if these context are synchronized to tightly control BIO issuing
262 * order. While this is not a problem with regular block devices, this ordering
263 * change can cause write BIO failures with zoned block devices as these
264 * require sequential write patterns to zones. Prevent this from happening by
265 * ignoring the plug state of a BIO issuing context if the target request queue
266 * is for a zoned block device and the BIO to plug is a write operation.
267 *
268 * Return current->plug if the bio can be plugged and NULL otherwise
269 */
270static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
271 struct bio *bio)
272{
273 /*
274 * For regular block devices or read operations, use the context plug
275 * which may be NULL if blk_start_plug() was not executed.
276 */
277 if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
278 return current->plug;
279
280 /* Zoned block device write operation case: do not plug the BIO */
281 return NULL;
282}
283
John Garrya0235d22020-08-19 23:20:25 +0800284/*
285 * For shared tag users, we track the number of currently active users
286 * and attempt to provide a fair share of the tag depth for each of them.
287 */
288static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
289 struct sbitmap_queue *bt)
290{
291 unsigned int depth, users;
292
293 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
294 return true;
John Garrya0235d22020-08-19 23:20:25 +0800295
296 /*
297 * Don't try dividing an ant
298 */
299 if (bt->sb.depth == 1)
300 return true;
301
John Garryf1b49fd2020-08-19 23:20:27 +0800302 if (blk_mq_is_sbitmap_shared(hctx->flags)) {
303 struct request_queue *q = hctx->queue;
304 struct blk_mq_tag_set *set = q->tag_set;
305
306 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &q->queue_flags))
307 return true;
308 users = atomic_read(&set->active_queues_shared_sbitmap);
309 } else {
310 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
311 return true;
312 users = atomic_read(&hctx->tags->active_queues);
313 }
314
John Garrya0235d22020-08-19 23:20:25 +0800315 if (!users)
316 return true;
317
318 /*
319 * Allow at least some tags
320 */
321 depth = max((bt->sb.depth + users - 1) / users, 4U);
John Garrybccf5e22020-08-19 23:20:26 +0800322 return __blk_mq_active_requests(hctx) < depth;
John Garrya0235d22020-08-19 23:20:25 +0800323}
324
325
Jens Axboe320ae512013-10-24 09:20:05 +0100326#endif