blob: d08779f77a26501871de87600e539ae1d15112ef [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Jens Axboe320ae512013-10-24 09:20:05 +01002#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
Jens Axboecf43e6b2016-11-07 21:32:37 -07005#include "blk-stat.h"
Ming Lei244c65a2017-11-04 12:39:57 -06006#include "blk-mq-tag.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -07007
Christoph Hellwig24d2f902014-04-15 14:14:00 -06008struct blk_mq_tag_set;
9
Ming Lei1db49092018-11-20 09:44:35 +080010struct blk_mq_ctxs {
11 struct kobject kobj;
12 struct blk_mq_ctx __percpu *queue_ctx;
13};
14
Linus Walleijfe644072018-04-20 10:29:51 +020015/**
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17 */
Jens Axboe320ae512013-10-24 09:20:05 +010018struct blk_mq_ctx {
19 struct {
20 spinlock_t lock;
Ming Leic16d6b52018-12-17 08:44:05 -070021 struct list_head rq_lists[HCTX_MAX_TYPES];
22 } ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010023
24 unsigned int cpu;
Jens Axboef31967f2018-10-29 13:13:29 -060025 unsigned short index_hw[HCTX_MAX_TYPES];
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +080026 struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
Jens Axboe320ae512013-10-24 09:20:05 +010027
28 /* incremented at dispatch time */
29 unsigned long rq_dispatched[2];
30 unsigned long rq_merged;
31
32 /* incremented at completion time */
33 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
34
35 struct request_queue *queue;
Ming Lei1db49092018-11-20 09:44:35 +080036 struct blk_mq_ctxs *ctxs;
Jens Axboe320ae512013-10-24 09:20:05 +010037 struct kobject kobj;
Jens Axboe4bb659b2014-05-09 09:36:49 -060038} ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010039
Ming Leic7e2d942019-04-30 09:52:25 +080040void blk_mq_exit_queue(struct request_queue *q);
Jens Axboee3a2b3f2014-05-20 11:49:02 -060041int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
Jens Axboeaed3ea92014-12-22 14:04:42 -070042void blk_mq_wake_waiters(struct request_queue *q);
Ming Lei1fd40b52020-06-30 18:25:00 +080043bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
44 unsigned int);
Bart Van Asschee6c98712019-03-20 13:14:37 -070045void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
46 bool kick_requeue_list);
Jens Axboe2c3ad662016-12-14 14:34:47 -070047void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
Ming Leib3476892017-10-14 17:22:30 +080048struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
49 struct blk_mq_ctx *start);
Ming Lei2e315dc2021-05-11 23:22:34 +080050void blk_mq_put_rq_ref(struct request *rq);
Jens Axboe2c3ad662016-12-14 14:34:47 -070051
52/*
53 * Internal helpers for allocating/freeing the request map
54 */
Jens Axboecc71a6f2017-01-11 14:29:56 -070055void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
56 unsigned int hctx_idx);
John Garry1c0706a2020-08-19 23:20:22 +080057void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
Jens Axboecc71a6f2017-01-11 14:29:56 -070058struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
59 unsigned int hctx_idx,
60 unsigned int nr_tags,
John Garry1c0706a2020-08-19 23:20:22 +080061 unsigned int reserved_tags,
62 unsigned int flags);
Jens Axboecc71a6f2017-01-11 14:29:56 -070063int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
64 unsigned int hctx_idx, unsigned int depth);
Jens Axboe2c3ad662016-12-14 14:34:47 -070065
66/*
67 * Internal helpers for request insertion into sw queues
68 */
69void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
70 bool at_head);
Ming Lei01e99ae2020-02-25 09:04:32 +080071void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
72 bool run_queue);
Jens Axboebd166ef2017-01-17 06:03:22 -070073void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
74 struct list_head *list);
Jens Axboe320ae512013-10-24 09:20:05 +010075
Bart Van Asschefd9c40f2019-04-04 10:08:43 -070076/* Used by blk_insert_cloned_request() to issue request directly */
77blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
Ming Lei6ce3dd62018-07-10 09:03:31 +080078void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
79 struct list_head *list);
Ming Lei396eaf22018-01-17 11:25:57 -050080
Jens Axboe320ae512013-10-24 09:20:05 +010081/*
82 * CPU -> queue mappings
83 */
Jens Axboeed76e322018-10-29 13:06:14 -060084extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010085
Jens Axboeb3c661b2018-10-30 10:36:06 -060086/*
87 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
88 * @q: request queue
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010089 * @type: the hctx type index
Jens Axboeb3c661b2018-10-30 10:36:06 -060090 * @cpu: CPU
91 */
Jens Axboeff2c5662018-10-29 13:07:33 -060092static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010093 enum hctx_type type,
Jens Axboeff2c5662018-10-29 13:07:33 -060094 unsigned int cpu)
95{
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010096 return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
Jens Axboeb3c661b2018-10-30 10:36:06 -060097}
98
99/*
100 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
101 * @q: request queue
102 * @flags: request command flags
Minwoo Imd220a212020-12-05 00:20:54 +0900103 * @ctx: software queue cpu ctx
Jens Axboeb3c661b2018-10-30 10:36:06 -0600104 */
105static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
106 unsigned int flags,
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +0800107 struct blk_mq_ctx *ctx)
Jens Axboeb3c661b2018-10-30 10:36:06 -0600108{
Christoph Hellwige20ba6e2018-12-02 17:46:16 +0100109 enum hctx_type type = HCTX_TYPE_DEFAULT;
Jens Axboeb3c661b2018-10-30 10:36:06 -0600110
Jianchao Wangbb94aea2019-01-24 18:25:33 +0800111 /*
112 * The caller ensure that if REQ_HIPRI, poll must be enabled.
113 */
114 if (flags & REQ_HIPRI)
Christoph Hellwige20ba6e2018-12-02 17:46:16 +0100115 type = HCTX_TYPE_POLL;
Jianchao Wangbb94aea2019-01-24 18:25:33 +0800116 else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
Christoph Hellwige20ba6e2018-12-02 17:46:16 +0100117 type = HCTX_TYPE_READ;
Christoph Hellwig5aceaeb2018-12-17 12:16:26 +0100118
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +0800119 return ctx->hctxs[type];
Jens Axboeff2c5662018-10-29 13:07:33 -0600120}
121
Jens Axboee93ecf62014-05-19 09:17:48 -0600122/*
Jens Axboe67aec142014-05-30 08:25:36 -0600123 * sysfs helpers
124 */
Ming Lei737f98c2017-02-22 18:13:59 +0800125extern void blk_mq_sysfs_init(struct request_queue *q);
Ming Lei7ea5fe32017-02-22 18:14:00 +0800126extern void blk_mq_sysfs_deinit(struct request_queue *q);
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700127extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
Jens Axboe67aec142014-05-30 08:25:36 -0600128extern int blk_mq_sysfs_register(struct request_queue *q);
129extern void blk_mq_sysfs_unregister(struct request_queue *q);
Keith Busch868f2f02015-12-17 17:08:14 -0700130extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
Jens Axboe67aec142014-05-30 08:25:36 -0600131
Ming Leie09aae72015-01-29 20:17:27 +0800132void blk_mq_release(struct request_queue *q);
133
Ming Lei1aecfe42014-06-01 00:43:36 +0800134static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
135 unsigned int cpu)
136{
137 return per_cpu_ptr(q->queue_ctx, cpu);
138}
139
140/*
141 * This assumes per-cpu software queueing queues. They could be per-node
142 * as well, for instance. For now this is hardcoded as-is. Note that we don't
143 * care about preemption, since we know the ctx's are persistent. This does
144 * mean that we can't rely on ctx always matching the currently running CPU.
145 */
146static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
147{
Bart Van Asschec05f4222019-07-01 08:47:29 -0700148 return __blk_mq_get_ctx(q, raw_smp_processor_id());
Ming Lei1aecfe42014-06-01 00:43:36 +0800149}
150
Ming Leicb96a42c2014-06-01 00:43:37 +0800151struct blk_mq_alloc_data {
152 /* input parameter */
153 struct request_queue *q;
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800154 blk_mq_req_flags_t flags;
Omar Sandoval229a92872017-04-14 00:59:59 -0700155 unsigned int shallow_depth;
Jens Axboef9afca42018-10-29 13:11:38 -0600156 unsigned int cmd_flags;
Ming Leicb96a42c2014-06-01 00:43:37 +0800157
158 /* input & output parameter */
159 struct blk_mq_ctx *ctx;
160 struct blk_mq_hw_ctx *hctx;
161};
162
John Garry32bc15a2020-08-19 23:20:24 +0800163static inline bool blk_mq_is_sbitmap_shared(unsigned int flags)
164{
165 return flags & BLK_MQ_F_TAG_HCTX_SHARED;
166}
167
Jens Axboe49411152017-01-13 08:09:05 -0700168static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
169{
Christoph Hellwig42fdc5e2020-06-29 17:08:34 +0200170 if (data->q->elevator)
Jens Axboebd166ef2017-01-17 06:03:22 -0700171 return data->hctx->sched_tags;
172
Jens Axboe49411152017-01-13 08:09:05 -0700173 return data->hctx->tags;
174}
175
Bart Van Assche5d1b25c2016-10-28 17:19:15 -0700176static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
177{
178 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
179}
180
Ming Lei19c66e52014-12-03 19:38:04 +0800181static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
182{
183 return hctx->nr_ctx && hctx->tags;
184}
185
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100186unsigned int blk_mq_in_flight(struct request_queue *q,
187 struct block_device *part);
188void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
189 unsigned int inflight[2]);
Jens Axboef299b7c2017-08-08 17:51:45 -0600190
Ming Lei2a5a24a2021-01-22 10:33:12 +0800191static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
192 int budget_token)
Ming Leide148292017-10-14 17:22:29 +0800193{
Ming Leide148292017-10-14 17:22:29 +0800194 if (q->mq_ops->put_budget)
Ming Lei2a5a24a2021-01-22 10:33:12 +0800195 q->mq_ops->put_budget(q, budget_token);
Ming Leide148292017-10-14 17:22:29 +0800196}
197
Ming Lei2a5a24a2021-01-22 10:33:12 +0800198static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
Ming Leide148292017-10-14 17:22:29 +0800199{
Ming Leide148292017-10-14 17:22:29 +0800200 if (q->mq_ops->get_budget)
Ming Lei65c76362020-06-30 18:24:56 +0800201 return q->mq_ops->get_budget(q);
Ming Lei2a5a24a2021-01-22 10:33:12 +0800202 return 0;
203}
204
205static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
206{
207 if (token < 0)
208 return;
209
210 if (rq->q->mq_ops->set_rq_budget_token)
211 rq->q->mq_ops->set_rq_budget_token(rq, token);
212}
213
214static inline int blk_mq_get_rq_budget_token(struct request *rq)
215{
216 if (rq->q->mq_ops->get_rq_budget_token)
217 return rq->q->mq_ops->get_rq_budget_token(rq);
218 return -1;
Ming Leide148292017-10-14 17:22:29 +0800219}
220
John Garrybccf5e22020-08-19 23:20:26 +0800221static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
222{
223 if (blk_mq_is_sbitmap_shared(hctx->flags))
224 atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap);
225 else
226 atomic_inc(&hctx->nr_active);
227}
228
229static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
230{
231 if (blk_mq_is_sbitmap_shared(hctx->flags))
232 atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap);
233 else
234 atomic_dec(&hctx->nr_active);
235}
236
237static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
238{
239 if (blk_mq_is_sbitmap_shared(hctx->flags))
240 return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap);
241 return atomic_read(&hctx->nr_active);
242}
Jens Axboe4e2f62e52020-07-01 22:58:32 -0600243static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
244 struct request *rq)
245{
246 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
247 rq->tag = BLK_MQ_NO_TAG;
248
249 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
250 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
John Garrybccf5e22020-08-19 23:20:26 +0800251 __blk_mq_dec_active_requests(hctx);
Jens Axboe4e2f62e52020-07-01 22:58:32 -0600252 }
253}
254
255static inline void blk_mq_put_driver_tag(struct request *rq)
256{
257 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
258 return;
259
260 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
261}
262
Jan Kara613471542021-06-03 12:47:21 +0200263bool blk_mq_get_driver_tag(struct request *rq);
264
Jens Axboeed76e322018-10-29 13:06:14 -0600265static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
Minwoo Im0da73d02018-07-02 23:46:43 +0900266{
267 int cpu;
268
269 for_each_possible_cpu(cpu)
Jens Axboeed76e322018-10-29 13:06:14 -0600270 qmap->mq_map[cpu] = 0;
Minwoo Im0da73d02018-07-02 23:46:43 +0900271}
272
Damien Le Moalb49773e72019-07-11 01:18:31 +0900273/*
274 * blk_mq_plug() - Get caller context plug
275 * @q: request queue
276 * @bio : the bio being submitted by the caller context
277 *
278 * Plugging, by design, may delay the insertion of BIOs into the elevator in
279 * order to increase BIO merging opportunities. This however can cause BIO
280 * insertion order to change from the order in which submit_bio() is being
281 * executed in the case of multiple contexts concurrently issuing BIOs to a
282 * device, even if these context are synchronized to tightly control BIO issuing
283 * order. While this is not a problem with regular block devices, this ordering
284 * change can cause write BIO failures with zoned block devices as these
285 * require sequential write patterns to zones. Prevent this from happening by
286 * ignoring the plug state of a BIO issuing context if the target request queue
287 * is for a zoned block device and the BIO to plug is a write operation.
288 *
289 * Return current->plug if the bio can be plugged and NULL otherwise
290 */
291static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
292 struct bio *bio)
293{
294 /*
295 * For regular block devices or read operations, use the context plug
296 * which may be NULL if blk_start_plug() was not executed.
297 */
298 if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
299 return current->plug;
300
301 /* Zoned block device write operation case: do not plug the BIO */
302 return NULL;
303}
304
Jan Karafd2ef392021-06-23 11:36:34 +0200305/* Free all requests on the list */
306static inline void blk_mq_free_requests(struct list_head *list)
307{
308 while (!list_empty(list)) {
309 struct request *rq = list_entry_rq(list->next);
310
311 list_del_init(&rq->queuelist);
312 blk_mq_free_request(rq);
313 }
314}
315
John Garrya0235d22020-08-19 23:20:25 +0800316/*
317 * For shared tag users, we track the number of currently active users
318 * and attempt to provide a fair share of the tag depth for each of them.
319 */
320static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
321 struct sbitmap_queue *bt)
322{
323 unsigned int depth, users;
324
325 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
326 return true;
John Garrya0235d22020-08-19 23:20:25 +0800327
328 /*
329 * Don't try dividing an ant
330 */
331 if (bt->sb.depth == 1)
332 return true;
333
John Garryf1b49fd2020-08-19 23:20:27 +0800334 if (blk_mq_is_sbitmap_shared(hctx->flags)) {
335 struct request_queue *q = hctx->queue;
336 struct blk_mq_tag_set *set = q->tag_set;
337
Ming Lei25690632020-12-27 19:34:58 +0800338 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
John Garryf1b49fd2020-08-19 23:20:27 +0800339 return true;
340 users = atomic_read(&set->active_queues_shared_sbitmap);
341 } else {
342 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
343 return true;
344 users = atomic_read(&hctx->tags->active_queues);
345 }
346
John Garrya0235d22020-08-19 23:20:25 +0800347 if (!users)
348 return true;
349
350 /*
351 * Allow at least some tags
352 */
353 depth = max((bt->sb.depth + users - 1) / users, 4U);
John Garrybccf5e22020-08-19 23:20:26 +0800354 return __blk_mq_active_requests(hctx) < depth;
John Garrya0235d22020-08-19 23:20:25 +0800355}
356
357
Jens Axboe320ae512013-10-24 09:20:05 +0100358#endif