blob: 08fb5922e611b06dce9b329dc2e036af1b57d867 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Jens Axboe320ae512013-10-24 09:20:05 +01002#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
Jens Axboecf43e6b2016-11-07 21:32:37 -07005#include "blk-stat.h"
Ming Lei244c65a2017-11-04 12:39:57 -06006#include "blk-mq-tag.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -07007
Christoph Hellwig24d2f902014-04-15 14:14:00 -06008struct blk_mq_tag_set;
9
Ming Lei1db49092018-11-20 09:44:35 +080010struct blk_mq_ctxs {
11 struct kobject kobj;
12 struct blk_mq_ctx __percpu *queue_ctx;
13};
14
Linus Walleijfe644072018-04-20 10:29:51 +020015/**
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17 */
Jens Axboe320ae512013-10-24 09:20:05 +010018struct blk_mq_ctx {
19 struct {
20 spinlock_t lock;
Ming Leic16d6b52018-12-17 08:44:05 -070021 struct list_head rq_lists[HCTX_MAX_TYPES];
22 } ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010023
24 unsigned int cpu;
Jens Axboef31967f2018-10-29 13:13:29 -060025 unsigned short index_hw[HCTX_MAX_TYPES];
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +080026 struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
Jens Axboe320ae512013-10-24 09:20:05 +010027
Jens Axboe320ae512013-10-24 09:20:05 +010028 struct request_queue *queue;
Ming Lei1db49092018-11-20 09:44:35 +080029 struct blk_mq_ctxs *ctxs;
Jens Axboe320ae512013-10-24 09:20:05 +010030 struct kobject kobj;
Jens Axboe4bb659b2014-05-09 09:36:49 -060031} ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010032
Christoph Hellwig3e087732021-10-12 13:12:24 +020033void blk_mq_submit_bio(struct bio *bio);
Jens Axboe5a72e892021-10-12 09:24:29 -060034int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
35 unsigned int flags);
Ming Leic7e2d942019-04-30 09:52:25 +080036void blk_mq_exit_queue(struct request_queue *q);
Jens Axboee3a2b3f2014-05-20 11:49:02 -060037int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
Jens Axboeaed3ea92014-12-22 14:04:42 -070038void blk_mq_wake_waiters(struct request_queue *q);
Ming Lei1fd40b52020-06-30 18:25:00 +080039bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
40 unsigned int);
Bart Van Asschee6c98712019-03-20 13:14:37 -070041void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
42 bool kick_requeue_list);
Jens Axboe2c3ad662016-12-14 14:34:47 -070043void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
Ming Leib3476892017-10-14 17:22:30 +080044struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
45 struct blk_mq_ctx *start);
Ming Lei2e315dc2021-05-11 23:22:34 +080046void blk_mq_put_rq_ref(struct request *rq);
Jens Axboe2c3ad662016-12-14 14:34:47 -070047
48/*
49 * Internal helpers for allocating/freeing the request map
50 */
Jens Axboecc71a6f2017-01-11 14:29:56 -070051void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
52 unsigned int hctx_idx);
John Garrye155b0c2021-10-05 18:23:37 +080053void blk_mq_free_rq_map(struct blk_mq_tags *tags);
John Garry63064be2021-10-05 18:23:35 +080054struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
55 unsigned int hctx_idx, unsigned int depth);
John Garry645db342021-10-05 18:23:36 +080056void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
57 struct blk_mq_tags *tags,
58 unsigned int hctx_idx);
Jens Axboe2c3ad662016-12-14 14:34:47 -070059/*
60 * Internal helpers for request insertion into sw queues
61 */
62void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
63 bool at_head);
Ming Lei01e99ae2020-02-25 09:04:32 +080064void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
65 bool run_queue);
Jens Axboebd166ef2017-01-17 06:03:22 -070066void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
67 struct list_head *list);
Jens Axboe320ae512013-10-24 09:20:05 +010068
Bart Van Asschefd9c40f2019-04-04 10:08:43 -070069/* Used by blk_insert_cloned_request() to issue request directly */
70blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
Ming Lei6ce3dd62018-07-10 09:03:31 +080071void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
72 struct list_head *list);
Ming Lei396eaf22018-01-17 11:25:57 -050073
Jens Axboe320ae512013-10-24 09:20:05 +010074/*
75 * CPU -> queue mappings
76 */
Jens Axboeed76e322018-10-29 13:06:14 -060077extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010078
Jens Axboeb3c661b2018-10-30 10:36:06 -060079/*
80 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
81 * @q: request queue
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010082 * @type: the hctx type index
Jens Axboeb3c661b2018-10-30 10:36:06 -060083 * @cpu: CPU
84 */
Jens Axboeff2c5662018-10-29 13:07:33 -060085static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010086 enum hctx_type type,
Jens Axboeff2c5662018-10-29 13:07:33 -060087 unsigned int cpu)
88{
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010089 return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
Jens Axboeb3c661b2018-10-30 10:36:06 -060090}
91
92/*
93 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
94 * @q: request queue
95 * @flags: request command flags
Minwoo Imd220a212020-12-05 00:20:54 +090096 * @ctx: software queue cpu ctx
Jens Axboeb3c661b2018-10-30 10:36:06 -060097 */
98static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
99 unsigned int flags,
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +0800100 struct blk_mq_ctx *ctx)
Jens Axboeb3c661b2018-10-30 10:36:06 -0600101{
Christoph Hellwige20ba6e2018-12-02 17:46:16 +0100102 enum hctx_type type = HCTX_TYPE_DEFAULT;
Jens Axboeb3c661b2018-10-30 10:36:06 -0600103
Jianchao Wangbb94aea2019-01-24 18:25:33 +0800104 /*
Christoph Hellwig6ce913f2021-10-12 13:12:21 +0200105 * The caller ensure that if REQ_POLLED, poll must be enabled.
Jianchao Wangbb94aea2019-01-24 18:25:33 +0800106 */
Christoph Hellwig6ce913f2021-10-12 13:12:21 +0200107 if (flags & REQ_POLLED)
Christoph Hellwige20ba6e2018-12-02 17:46:16 +0100108 type = HCTX_TYPE_POLL;
Jianchao Wangbb94aea2019-01-24 18:25:33 +0800109 else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
Christoph Hellwige20ba6e2018-12-02 17:46:16 +0100110 type = HCTX_TYPE_READ;
Christoph Hellwig5aceaeb2018-12-17 12:16:26 +0100111
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +0800112 return ctx->hctxs[type];
Jens Axboeff2c5662018-10-29 13:07:33 -0600113}
114
Jens Axboee93ecf62014-05-19 09:17:48 -0600115/*
Jens Axboe67aec142014-05-30 08:25:36 -0600116 * sysfs helpers
117 */
Ming Lei737f98c2017-02-22 18:13:59 +0800118extern void blk_mq_sysfs_init(struct request_queue *q);
Ming Lei7ea5fe32017-02-22 18:14:00 +0800119extern void blk_mq_sysfs_deinit(struct request_queue *q);
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700120extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
Jens Axboe67aec142014-05-30 08:25:36 -0600121extern int blk_mq_sysfs_register(struct request_queue *q);
122extern void blk_mq_sysfs_unregister(struct request_queue *q);
Keith Busch868f2f02015-12-17 17:08:14 -0700123extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
Jens Axboe47c122e2021-10-06 06:34:11 -0600124void blk_mq_free_plug_rqs(struct blk_plug *plug);
Christoph Hellwigdbb6f762021-10-20 16:41:17 +0200125void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
Jens Axboe67aec142014-05-30 08:25:36 -0600126
Ming Leie09aae72015-01-29 20:17:27 +0800127void blk_mq_release(struct request_queue *q);
128
Ming Lei1aecfe42014-06-01 00:43:36 +0800129static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
130 unsigned int cpu)
131{
132 return per_cpu_ptr(q->queue_ctx, cpu);
133}
134
135/*
136 * This assumes per-cpu software queueing queues. They could be per-node
137 * as well, for instance. For now this is hardcoded as-is. Note that we don't
138 * care about preemption, since we know the ctx's are persistent. This does
139 * mean that we can't rely on ctx always matching the currently running CPU.
140 */
141static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
142{
Bart Van Asschec05f4222019-07-01 08:47:29 -0700143 return __blk_mq_get_ctx(q, raw_smp_processor_id());
Ming Lei1aecfe42014-06-01 00:43:36 +0800144}
145
Ming Leicb96a42c2014-06-01 00:43:37 +0800146struct blk_mq_alloc_data {
147 /* input parameter */
148 struct request_queue *q;
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800149 blk_mq_req_flags_t flags;
Omar Sandoval229a92872017-04-14 00:59:59 -0700150 unsigned int shallow_depth;
Jens Axboef9afca42018-10-29 13:11:38 -0600151 unsigned int cmd_flags;
Ming Leicb96a42c2014-06-01 00:43:37 +0800152
Jens Axboe47c122e2021-10-06 06:34:11 -0600153 /* allocate multiple requests/tags in one go */
154 unsigned int nr_tags;
155 struct request **cached_rq;
156
Ming Leicb96a42c2014-06-01 00:43:37 +0800157 /* input & output parameter */
158 struct blk_mq_ctx *ctx;
159 struct blk_mq_hw_ctx *hctx;
160};
161
John Garry079a2e32021-10-05 18:23:39 +0800162static inline bool blk_mq_is_shared_tags(unsigned int flags)
John Garry32bc15a2020-08-19 23:20:24 +0800163{
164 return flags & BLK_MQ_F_TAG_HCTX_SHARED;
165}
166
Jens Axboe49411152017-01-13 08:09:05 -0700167static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
168{
Christoph Hellwig42fdc5e2020-06-29 17:08:34 +0200169 if (data->q->elevator)
Jens Axboebd166ef2017-01-17 06:03:22 -0700170 return data->hctx->sched_tags;
171
Jens Axboe49411152017-01-13 08:09:05 -0700172 return data->hctx->tags;
173}
174
Bart Van Assche5d1b25c2016-10-28 17:19:15 -0700175static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
176{
177 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
178}
179
Ming Lei19c66e52014-12-03 19:38:04 +0800180static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
181{
182 return hctx->nr_ctx && hctx->tags;
183}
184
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100185unsigned int blk_mq_in_flight(struct request_queue *q,
186 struct block_device *part);
187void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
188 unsigned int inflight[2]);
Jens Axboef299b7c2017-08-08 17:51:45 -0600189
Ming Lei2a5a24a2021-01-22 10:33:12 +0800190static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
191 int budget_token)
Ming Leide148292017-10-14 17:22:29 +0800192{
Ming Leide148292017-10-14 17:22:29 +0800193 if (q->mq_ops->put_budget)
Ming Lei2a5a24a2021-01-22 10:33:12 +0800194 q->mq_ops->put_budget(q, budget_token);
Ming Leide148292017-10-14 17:22:29 +0800195}
196
Ming Lei2a5a24a2021-01-22 10:33:12 +0800197static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
Ming Leide148292017-10-14 17:22:29 +0800198{
Ming Leide148292017-10-14 17:22:29 +0800199 if (q->mq_ops->get_budget)
Ming Lei65c76362020-06-30 18:24:56 +0800200 return q->mq_ops->get_budget(q);
Ming Lei2a5a24a2021-01-22 10:33:12 +0800201 return 0;
202}
203
204static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
205{
206 if (token < 0)
207 return;
208
209 if (rq->q->mq_ops->set_rq_budget_token)
210 rq->q->mq_ops->set_rq_budget_token(rq, token);
211}
212
213static inline int blk_mq_get_rq_budget_token(struct request *rq)
214{
215 if (rq->q->mq_ops->get_rq_budget_token)
216 return rq->q->mq_ops->get_rq_budget_token(rq);
217 return -1;
Ming Leide148292017-10-14 17:22:29 +0800218}
219
John Garrybccf5e22020-08-19 23:20:26 +0800220static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
221{
John Garry079a2e32021-10-05 18:23:39 +0800222 if (blk_mq_is_shared_tags(hctx->flags))
223 atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
John Garrybccf5e22020-08-19 23:20:26 +0800224 else
225 atomic_inc(&hctx->nr_active);
226}
227
228static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
229{
John Garry079a2e32021-10-05 18:23:39 +0800230 if (blk_mq_is_shared_tags(hctx->flags))
231 atomic_dec(&hctx->queue->nr_active_requests_shared_tags);
John Garrybccf5e22020-08-19 23:20:26 +0800232 else
233 atomic_dec(&hctx->nr_active);
234}
235
236static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
237{
John Garry079a2e32021-10-05 18:23:39 +0800238 if (blk_mq_is_shared_tags(hctx->flags))
239 return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
John Garrybccf5e22020-08-19 23:20:26 +0800240 return atomic_read(&hctx->nr_active);
241}
Jens Axboe4e2f62e52020-07-01 22:58:32 -0600242static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
243 struct request *rq)
244{
245 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
246 rq->tag = BLK_MQ_NO_TAG;
247
248 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
249 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
John Garrybccf5e22020-08-19 23:20:26 +0800250 __blk_mq_dec_active_requests(hctx);
Jens Axboe4e2f62e52020-07-01 22:58:32 -0600251 }
252}
253
254static inline void blk_mq_put_driver_tag(struct request *rq)
255{
256 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
257 return;
258
259 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
260}
261
Jens Axboea808a9d2021-10-13 08:28:14 -0600262bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
263
264static inline bool blk_mq_get_driver_tag(struct request *rq)
265{
266 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
267
268 if (rq->tag != BLK_MQ_NO_TAG &&
269 !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
270 hctx->tags->rqs[rq->tag] = rq;
271 return true;
272 }
273
274 return __blk_mq_get_driver_tag(hctx, rq);
275}
Jan Kara613471542021-06-03 12:47:21 +0200276
Jens Axboeed76e322018-10-29 13:06:14 -0600277static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
Minwoo Im0da73d02018-07-02 23:46:43 +0900278{
279 int cpu;
280
281 for_each_possible_cpu(cpu)
Jens Axboeed76e322018-10-29 13:06:14 -0600282 qmap->mq_map[cpu] = 0;
Minwoo Im0da73d02018-07-02 23:46:43 +0900283}
284
Damien Le Moalb49773e72019-07-11 01:18:31 +0900285/*
286 * blk_mq_plug() - Get caller context plug
287 * @q: request queue
288 * @bio : the bio being submitted by the caller context
289 *
290 * Plugging, by design, may delay the insertion of BIOs into the elevator in
291 * order to increase BIO merging opportunities. This however can cause BIO
292 * insertion order to change from the order in which submit_bio() is being
293 * executed in the case of multiple contexts concurrently issuing BIOs to a
294 * device, even if these context are synchronized to tightly control BIO issuing
295 * order. While this is not a problem with regular block devices, this ordering
296 * change can cause write BIO failures with zoned block devices as these
297 * require sequential write patterns to zones. Prevent this from happening by
298 * ignoring the plug state of a BIO issuing context if the target request queue
299 * is for a zoned block device and the BIO to plug is a write operation.
300 *
301 * Return current->plug if the bio can be plugged and NULL otherwise
302 */
303static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
304 struct bio *bio)
305{
306 /*
307 * For regular block devices or read operations, use the context plug
308 * which may be NULL if blk_start_plug() was not executed.
309 */
310 if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
311 return current->plug;
312
313 /* Zoned block device write operation case: do not plug the BIO */
314 return NULL;
315}
316
Jan Karafd2ef392021-06-23 11:36:34 +0200317/* Free all requests on the list */
318static inline void blk_mq_free_requests(struct list_head *list)
319{
320 while (!list_empty(list)) {
321 struct request *rq = list_entry_rq(list->next);
322
323 list_del_init(&rq->queuelist);
324 blk_mq_free_request(rq);
325 }
326}
327
John Garrya0235d22020-08-19 23:20:25 +0800328/*
329 * For shared tag users, we track the number of currently active users
330 * and attempt to provide a fair share of the tag depth for each of them.
331 */
332static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
333 struct sbitmap_queue *bt)
334{
335 unsigned int depth, users;
336
337 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
338 return true;
John Garrya0235d22020-08-19 23:20:25 +0800339
340 /*
341 * Don't try dividing an ant
342 */
343 if (bt->sb.depth == 1)
344 return true;
345
John Garry079a2e32021-10-05 18:23:39 +0800346 if (blk_mq_is_shared_tags(hctx->flags)) {
John Garryf1b49fd2020-08-19 23:20:27 +0800347 struct request_queue *q = hctx->queue;
John Garryf1b49fd2020-08-19 23:20:27 +0800348
Ming Lei25690632020-12-27 19:34:58 +0800349 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
John Garryf1b49fd2020-08-19 23:20:27 +0800350 return true;
John Garryf1b49fd2020-08-19 23:20:27 +0800351 } else {
352 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
353 return true;
John Garryf1b49fd2020-08-19 23:20:27 +0800354 }
355
John Garrye155b0c2021-10-05 18:23:37 +0800356 users = atomic_read(&hctx->tags->active_queues);
357
John Garrya0235d22020-08-19 23:20:25 +0800358 if (!users)
359 return true;
360
361 /*
362 * Allow at least some tags
363 */
364 depth = max((bt->sb.depth + users - 1) / users, 4U);
John Garrybccf5e22020-08-19 23:20:26 +0800365 return __blk_mq_active_requests(hctx) < depth;
John Garrya0235d22020-08-19 23:20:25 +0800366}
367
368
Jens Axboe320ae512013-10-24 09:20:05 +0100369#endif