blob: 8acfa650f575156ce27f7b79860df4c75a158e3d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Jens Axboe320ae512013-10-24 09:20:05 +01002#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
Jens Axboecf43e6b2016-11-07 21:32:37 -07005#include "blk-stat.h"
Ming Lei244c65a2017-11-04 12:39:57 -06006#include "blk-mq-tag.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -07007
Christoph Hellwig24d2f902014-04-15 14:14:00 -06008struct blk_mq_tag_set;
9
Ming Lei1db49092018-11-20 09:44:35 +080010struct blk_mq_ctxs {
11 struct kobject kobj;
12 struct blk_mq_ctx __percpu *queue_ctx;
13};
14
Linus Walleijfe644072018-04-20 10:29:51 +020015/**
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17 */
Jens Axboe320ae512013-10-24 09:20:05 +010018struct blk_mq_ctx {
19 struct {
20 spinlock_t lock;
Ming Leic16d6b52018-12-17 08:44:05 -070021 struct list_head rq_lists[HCTX_MAX_TYPES];
22 } ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010023
24 unsigned int cpu;
Jens Axboef31967f2018-10-29 13:13:29 -060025 unsigned short index_hw[HCTX_MAX_TYPES];
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +080026 struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
Jens Axboe320ae512013-10-24 09:20:05 +010027
Jens Axboe320ae512013-10-24 09:20:05 +010028 struct request_queue *queue;
Ming Lei1db49092018-11-20 09:44:35 +080029 struct blk_mq_ctxs *ctxs;
Jens Axboe320ae512013-10-24 09:20:05 +010030 struct kobject kobj;
Jens Axboe4bb659b2014-05-09 09:36:49 -060031} ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010032
Christoph Hellwig3e087732021-10-12 13:12:24 +020033void blk_mq_submit_bio(struct bio *bio);
Jens Axboe5a72e892021-10-12 09:24:29 -060034int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
35 unsigned int flags);
Ming Leic7e2d942019-04-30 09:52:25 +080036void blk_mq_exit_queue(struct request_queue *q);
Jens Axboee3a2b3f2014-05-20 11:49:02 -060037int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
Jens Axboeaed3ea92014-12-22 14:04:42 -070038void blk_mq_wake_waiters(struct request_queue *q);
Ming Lei1fd40b52020-06-30 18:25:00 +080039bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
40 unsigned int);
Bart Van Asschee6c98712019-03-20 13:14:37 -070041void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
42 bool kick_requeue_list);
Jens Axboe2c3ad662016-12-14 14:34:47 -070043void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
Ming Leib3476892017-10-14 17:22:30 +080044struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
45 struct blk_mq_ctx *start);
Ming Lei2e315dc2021-05-11 23:22:34 +080046void blk_mq_put_rq_ref(struct request *rq);
Jens Axboe2c3ad662016-12-14 14:34:47 -070047
48/*
49 * Internal helpers for allocating/freeing the request map
50 */
Jens Axboecc71a6f2017-01-11 14:29:56 -070051void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
52 unsigned int hctx_idx);
John Garrye155b0c2021-10-05 18:23:37 +080053void blk_mq_free_rq_map(struct blk_mq_tags *tags);
John Garry63064be2021-10-05 18:23:35 +080054struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
55 unsigned int hctx_idx, unsigned int depth);
John Garry645db342021-10-05 18:23:36 +080056void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
57 struct blk_mq_tags *tags,
58 unsigned int hctx_idx);
Jens Axboe2c3ad662016-12-14 14:34:47 -070059/*
60 * Internal helpers for request insertion into sw queues
61 */
62void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
63 bool at_head);
Ming Lei01e99ae2020-02-25 09:04:32 +080064void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
65 bool run_queue);
Jens Axboebd166ef2017-01-17 06:03:22 -070066void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
67 struct list_head *list);
Jens Axboe320ae512013-10-24 09:20:05 +010068
Bart Van Asschefd9c40f2019-04-04 10:08:43 -070069/* Used by blk_insert_cloned_request() to issue request directly */
70blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
Ming Lei6ce3dd62018-07-10 09:03:31 +080071void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
72 struct list_head *list);
Ming Lei396eaf22018-01-17 11:25:57 -050073
Jens Axboe320ae512013-10-24 09:20:05 +010074/*
75 * CPU -> queue mappings
76 */
Jens Axboeed76e322018-10-29 13:06:14 -060077extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010078
Jens Axboeb3c661b2018-10-30 10:36:06 -060079/*
80 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
81 * @q: request queue
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010082 * @type: the hctx type index
Jens Axboeb3c661b2018-10-30 10:36:06 -060083 * @cpu: CPU
84 */
Jens Axboeff2c5662018-10-29 13:07:33 -060085static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010086 enum hctx_type type,
Jens Axboeff2c5662018-10-29 13:07:33 -060087 unsigned int cpu)
88{
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010089 return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
Jens Axboeb3c661b2018-10-30 10:36:06 -060090}
91
Ming Leib6371082021-11-12 20:47:15 +080092static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags)
Jens Axboeb3c661b2018-10-30 10:36:06 -060093{
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010094 enum hctx_type type = HCTX_TYPE_DEFAULT;
Jens Axboeb3c661b2018-10-30 10:36:06 -060095
Jianchao Wangbb94aea2019-01-24 18:25:33 +080096 /*
Christoph Hellwig6ce913f2021-10-12 13:12:21 +020097 * The caller ensure that if REQ_POLLED, poll must be enabled.
Jianchao Wangbb94aea2019-01-24 18:25:33 +080098 */
Christoph Hellwig6ce913f2021-10-12 13:12:21 +020099 if (flags & REQ_POLLED)
Christoph Hellwige20ba6e2018-12-02 17:46:16 +0100100 type = HCTX_TYPE_POLL;
Jianchao Wangbb94aea2019-01-24 18:25:33 +0800101 else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
Christoph Hellwige20ba6e2018-12-02 17:46:16 +0100102 type = HCTX_TYPE_READ;
Ming Leib6371082021-11-12 20:47:15 +0800103 return type;
104}
105
106/*
107 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
108 * @q: request queue
109 * @flags: request command flags
110 * @ctx: software queue cpu ctx
111 */
112static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
113 unsigned int flags,
114 struct blk_mq_ctx *ctx)
115{
116 return ctx->hctxs[blk_mq_get_hctx_type(flags)];
Jens Axboeff2c5662018-10-29 13:07:33 -0600117}
118
Jens Axboee93ecf62014-05-19 09:17:48 -0600119/*
Jens Axboe67aec142014-05-30 08:25:36 -0600120 * sysfs helpers
121 */
Ming Lei737f98c2017-02-22 18:13:59 +0800122extern void blk_mq_sysfs_init(struct request_queue *q);
Ming Lei7ea5fe32017-02-22 18:14:00 +0800123extern void blk_mq_sysfs_deinit(struct request_queue *q);
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700124extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
Jens Axboe67aec142014-05-30 08:25:36 -0600125extern int blk_mq_sysfs_register(struct request_queue *q);
126extern void blk_mq_sysfs_unregister(struct request_queue *q);
Keith Busch868f2f02015-12-17 17:08:14 -0700127extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
Jens Axboe47c122e2021-10-06 06:34:11 -0600128void blk_mq_free_plug_rqs(struct blk_plug *plug);
Christoph Hellwigdbb6f762021-10-20 16:41:17 +0200129void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
Jens Axboe67aec142014-05-30 08:25:36 -0600130
Ming Leie09aae72015-01-29 20:17:27 +0800131void blk_mq_release(struct request_queue *q);
132
Ming Lei1aecfe42014-06-01 00:43:36 +0800133static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
134 unsigned int cpu)
135{
136 return per_cpu_ptr(q->queue_ctx, cpu);
137}
138
139/*
140 * This assumes per-cpu software queueing queues. They could be per-node
141 * as well, for instance. For now this is hardcoded as-is. Note that we don't
142 * care about preemption, since we know the ctx's are persistent. This does
143 * mean that we can't rely on ctx always matching the currently running CPU.
144 */
145static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
146{
Bart Van Asschec05f4222019-07-01 08:47:29 -0700147 return __blk_mq_get_ctx(q, raw_smp_processor_id());
Ming Lei1aecfe42014-06-01 00:43:36 +0800148}
149
Ming Leicb96a42c2014-06-01 00:43:37 +0800150struct blk_mq_alloc_data {
151 /* input parameter */
152 struct request_queue *q;
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800153 blk_mq_req_flags_t flags;
Omar Sandoval229a92872017-04-14 00:59:59 -0700154 unsigned int shallow_depth;
Jens Axboef9afca42018-10-29 13:11:38 -0600155 unsigned int cmd_flags;
Jens Axboeecaf97f2021-11-09 15:08:11 -0700156 req_flags_t rq_flags;
Ming Leicb96a42c2014-06-01 00:43:37 +0800157
Jens Axboe47c122e2021-10-06 06:34:11 -0600158 /* allocate multiple requests/tags in one go */
159 unsigned int nr_tags;
160 struct request **cached_rq;
161
Ming Leicb96a42c2014-06-01 00:43:37 +0800162 /* input & output parameter */
163 struct blk_mq_ctx *ctx;
164 struct blk_mq_hw_ctx *hctx;
165};
166
John Garry079a2e32021-10-05 18:23:39 +0800167static inline bool blk_mq_is_shared_tags(unsigned int flags)
John Garry32bc15a2020-08-19 23:20:24 +0800168{
169 return flags & BLK_MQ_F_TAG_HCTX_SHARED;
170}
171
Jens Axboe49411152017-01-13 08:09:05 -0700172static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
173{
Jens Axboe56f8da62021-10-19 09:32:57 -0600174 if (!(data->rq_flags & RQF_ELV))
175 return data->hctx->tags;
176 return data->hctx->sched_tags;
Jens Axboe49411152017-01-13 08:09:05 -0700177}
178
Bart Van Assche5d1b25c2016-10-28 17:19:15 -0700179static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
180{
181 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
182}
183
Ming Lei19c66e52014-12-03 19:38:04 +0800184static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
185{
186 return hctx->nr_ctx && hctx->tags;
187}
188
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100189unsigned int blk_mq_in_flight(struct request_queue *q,
190 struct block_device *part);
191void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
192 unsigned int inflight[2]);
Jens Axboef299b7c2017-08-08 17:51:45 -0600193
Ming Lei2a5a24a2021-01-22 10:33:12 +0800194static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
195 int budget_token)
Ming Leide148292017-10-14 17:22:29 +0800196{
Ming Leide148292017-10-14 17:22:29 +0800197 if (q->mq_ops->put_budget)
Ming Lei2a5a24a2021-01-22 10:33:12 +0800198 q->mq_ops->put_budget(q, budget_token);
Ming Leide148292017-10-14 17:22:29 +0800199}
200
Ming Lei2a5a24a2021-01-22 10:33:12 +0800201static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
Ming Leide148292017-10-14 17:22:29 +0800202{
Ming Leide148292017-10-14 17:22:29 +0800203 if (q->mq_ops->get_budget)
Ming Lei65c76362020-06-30 18:24:56 +0800204 return q->mq_ops->get_budget(q);
Ming Lei2a5a24a2021-01-22 10:33:12 +0800205 return 0;
206}
207
208static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
209{
210 if (token < 0)
211 return;
212
213 if (rq->q->mq_ops->set_rq_budget_token)
214 rq->q->mq_ops->set_rq_budget_token(rq, token);
215}
216
217static inline int blk_mq_get_rq_budget_token(struct request *rq)
218{
219 if (rq->q->mq_ops->get_rq_budget_token)
220 return rq->q->mq_ops->get_rq_budget_token(rq);
221 return -1;
Ming Leide148292017-10-14 17:22:29 +0800222}
223
John Garrybccf5e22020-08-19 23:20:26 +0800224static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
225{
John Garry079a2e32021-10-05 18:23:39 +0800226 if (blk_mq_is_shared_tags(hctx->flags))
227 atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
John Garrybccf5e22020-08-19 23:20:26 +0800228 else
229 atomic_inc(&hctx->nr_active);
230}
231
Ming Lei3b87c6e2021-11-02 23:36:19 +0800232static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
233 int val)
John Garrybccf5e22020-08-19 23:20:26 +0800234{
John Garry079a2e32021-10-05 18:23:39 +0800235 if (blk_mq_is_shared_tags(hctx->flags))
Ming Lei3b87c6e2021-11-02 23:36:19 +0800236 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
John Garrybccf5e22020-08-19 23:20:26 +0800237 else
Ming Lei3b87c6e2021-11-02 23:36:19 +0800238 atomic_sub(val, &hctx->nr_active);
239}
240
241static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
242{
243 __blk_mq_sub_active_requests(hctx, 1);
John Garrybccf5e22020-08-19 23:20:26 +0800244}
245
246static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
247{
John Garry079a2e32021-10-05 18:23:39 +0800248 if (blk_mq_is_shared_tags(hctx->flags))
249 return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
John Garrybccf5e22020-08-19 23:20:26 +0800250 return atomic_read(&hctx->nr_active);
251}
Jens Axboe4e2f62e52020-07-01 22:58:32 -0600252static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
253 struct request *rq)
254{
255 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
256 rq->tag = BLK_MQ_NO_TAG;
257
258 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
259 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
John Garrybccf5e22020-08-19 23:20:26 +0800260 __blk_mq_dec_active_requests(hctx);
Jens Axboe4e2f62e52020-07-01 22:58:32 -0600261 }
262}
263
264static inline void blk_mq_put_driver_tag(struct request *rq)
265{
266 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
267 return;
268
269 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
270}
271
Jens Axboea808a9d2021-10-13 08:28:14 -0600272bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
273
274static inline bool blk_mq_get_driver_tag(struct request *rq)
275{
276 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
277
278 if (rq->tag != BLK_MQ_NO_TAG &&
279 !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
280 hctx->tags->rqs[rq->tag] = rq;
281 return true;
282 }
283
284 return __blk_mq_get_driver_tag(hctx, rq);
285}
Jan Kara613471542021-06-03 12:47:21 +0200286
Jens Axboeed76e322018-10-29 13:06:14 -0600287static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
Minwoo Im0da73d02018-07-02 23:46:43 +0900288{
289 int cpu;
290
291 for_each_possible_cpu(cpu)
Jens Axboeed76e322018-10-29 13:06:14 -0600292 qmap->mq_map[cpu] = 0;
Minwoo Im0da73d02018-07-02 23:46:43 +0900293}
294
Damien Le Moalb49773e72019-07-11 01:18:31 +0900295/*
296 * blk_mq_plug() - Get caller context plug
297 * @q: request queue
298 * @bio : the bio being submitted by the caller context
299 *
300 * Plugging, by design, may delay the insertion of BIOs into the elevator in
301 * order to increase BIO merging opportunities. This however can cause BIO
302 * insertion order to change from the order in which submit_bio() is being
303 * executed in the case of multiple contexts concurrently issuing BIOs to a
304 * device, even if these context are synchronized to tightly control BIO issuing
305 * order. While this is not a problem with regular block devices, this ordering
306 * change can cause write BIO failures with zoned block devices as these
307 * require sequential write patterns to zones. Prevent this from happening by
308 * ignoring the plug state of a BIO issuing context if the target request queue
309 * is for a zoned block device and the BIO to plug is a write operation.
310 *
311 * Return current->plug if the bio can be plugged and NULL otherwise
312 */
313static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
314 struct bio *bio)
315{
316 /*
317 * For regular block devices or read operations, use the context plug
318 * which may be NULL if blk_start_plug() was not executed.
319 */
320 if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
321 return current->plug;
322
323 /* Zoned block device write operation case: do not plug the BIO */
324 return NULL;
325}
326
Jan Karafd2ef392021-06-23 11:36:34 +0200327/* Free all requests on the list */
328static inline void blk_mq_free_requests(struct list_head *list)
329{
330 while (!list_empty(list)) {
331 struct request *rq = list_entry_rq(list->next);
332
333 list_del_init(&rq->queuelist);
334 blk_mq_free_request(rq);
335 }
336}
337
John Garrya0235d22020-08-19 23:20:25 +0800338/*
339 * For shared tag users, we track the number of currently active users
340 * and attempt to provide a fair share of the tag depth for each of them.
341 */
342static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
343 struct sbitmap_queue *bt)
344{
345 unsigned int depth, users;
346
347 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
348 return true;
John Garrya0235d22020-08-19 23:20:25 +0800349
350 /*
351 * Don't try dividing an ant
352 */
353 if (bt->sb.depth == 1)
354 return true;
355
John Garry079a2e32021-10-05 18:23:39 +0800356 if (blk_mq_is_shared_tags(hctx->flags)) {
John Garryf1b49fd2020-08-19 23:20:27 +0800357 struct request_queue *q = hctx->queue;
John Garryf1b49fd2020-08-19 23:20:27 +0800358
Ming Lei25690632020-12-27 19:34:58 +0800359 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
John Garryf1b49fd2020-08-19 23:20:27 +0800360 return true;
John Garryf1b49fd2020-08-19 23:20:27 +0800361 } else {
362 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
363 return true;
John Garryf1b49fd2020-08-19 23:20:27 +0800364 }
365
John Garrye155b0c2021-10-05 18:23:37 +0800366 users = atomic_read(&hctx->tags->active_queues);
367
John Garrya0235d22020-08-19 23:20:25 +0800368 if (!users)
369 return true;
370
371 /*
372 * Allow at least some tags
373 */
374 depth = max((bt->sb.depth + users - 1) / users, 4U);
John Garrybccf5e22020-08-19 23:20:26 +0800375 return __blk_mq_active_requests(hctx) < depth;
John Garrya0235d22020-08-19 23:20:25 +0800376}
377
378
Jens Axboe320ae512013-10-24 09:20:05 +0100379#endif