blob: 0ed8e5a8729fccd39d5da0e58854bdb5c5de42da [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Jens Axboe320ae512013-10-24 09:20:05 +01002#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
Jens Axboecf43e6b2016-11-07 21:32:37 -07005#include "blk-stat.h"
Ming Lei244c65a2017-11-04 12:39:57 -06006#include "blk-mq-tag.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -07007
Christoph Hellwig24d2f902014-04-15 14:14:00 -06008struct blk_mq_tag_set;
9
Ming Lei1db49092018-11-20 09:44:35 +080010struct blk_mq_ctxs {
11 struct kobject kobj;
12 struct blk_mq_ctx __percpu *queue_ctx;
13};
14
Linus Walleijfe644072018-04-20 10:29:51 +020015/**
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17 */
Jens Axboe320ae512013-10-24 09:20:05 +010018struct blk_mq_ctx {
19 struct {
20 spinlock_t lock;
Ming Leic16d6b52018-12-17 08:44:05 -070021 struct list_head rq_lists[HCTX_MAX_TYPES];
22 } ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010023
24 unsigned int cpu;
Jens Axboef31967f2018-10-29 13:13:29 -060025 unsigned short index_hw[HCTX_MAX_TYPES];
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +080026 struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
Jens Axboe320ae512013-10-24 09:20:05 +010027
28 /* incremented at dispatch time */
29 unsigned long rq_dispatched[2];
30 unsigned long rq_merged;
31
32 /* incremented at completion time */
33 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
34
35 struct request_queue *queue;
Ming Lei1db49092018-11-20 09:44:35 +080036 struct blk_mq_ctxs *ctxs;
Jens Axboe320ae512013-10-24 09:20:05 +010037 struct kobject kobj;
Jens Axboe4bb659b2014-05-09 09:36:49 -060038} ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010039
Ming Lei3edcc0c2013-12-26 21:31:38 +080040void blk_mq_free_queue(struct request_queue *q);
Jens Axboee3a2b3f2014-05-20 11:49:02 -060041int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
Jens Axboeaed3ea92014-12-22 14:04:42 -070042void blk_mq_wake_waiters(struct request_queue *q);
Ming Leide148292017-10-14 17:22:29 +080043bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
Bart Van Asschee6c98712019-03-20 13:14:37 -070044void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
45 bool kick_requeue_list);
Jens Axboe2c3ad662016-12-14 14:34:47 -070046void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
Ming Lei8ab6bb9e2018-06-25 19:31:45 +080047bool blk_mq_get_driver_tag(struct request *rq);
Ming Leib3476892017-10-14 17:22:30 +080048struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
49 struct blk_mq_ctx *start);
Jens Axboe2c3ad662016-12-14 14:34:47 -070050
51/*
52 * Internal helpers for allocating/freeing the request map
53 */
Jens Axboecc71a6f2017-01-11 14:29:56 -070054void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
55 unsigned int hctx_idx);
56void blk_mq_free_rq_map(struct blk_mq_tags *tags);
57struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
58 unsigned int hctx_idx,
59 unsigned int nr_tags,
60 unsigned int reserved_tags);
61int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
62 unsigned int hctx_idx, unsigned int depth);
Jens Axboe2c3ad662016-12-14 14:34:47 -070063
64/*
65 * Internal helpers for request insertion into sw queues
66 */
67void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
68 bool at_head);
Ming Leib0850292017-11-02 23:24:34 +080069void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
Jens Axboebd166ef2017-01-17 06:03:22 -070070void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
71 struct list_head *list);
Jens Axboe320ae512013-10-24 09:20:05 +010072
Jianchao Wangd6a51a92018-12-14 09:28:20 +080073blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
74 struct request *rq,
75 blk_qc_t *cookie,
76 bool bypass, bool last);
Ming Lei6ce3dd62018-07-10 09:03:31 +080077void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
78 struct list_head *list);
Ming Lei396eaf22018-01-17 11:25:57 -050079
Jens Axboe320ae512013-10-24 09:20:05 +010080/*
81 * CPU -> queue mappings
82 */
Jens Axboeed76e322018-10-29 13:06:14 -060083extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010084
Jens Axboeb3c661b2018-10-30 10:36:06 -060085/*
86 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
87 * @q: request queue
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010088 * @type: the hctx type index
Jens Axboeb3c661b2018-10-30 10:36:06 -060089 * @cpu: CPU
90 */
Jens Axboeff2c5662018-10-29 13:07:33 -060091static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010092 enum hctx_type type,
Jens Axboeff2c5662018-10-29 13:07:33 -060093 unsigned int cpu)
94{
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010095 return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
Jens Axboeb3c661b2018-10-30 10:36:06 -060096}
97
98/*
99 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
100 * @q: request queue
101 * @flags: request command flags
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +0800102 * @cpu: cpu ctx
Jens Axboeb3c661b2018-10-30 10:36:06 -0600103 */
104static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
105 unsigned int flags,
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +0800106 struct blk_mq_ctx *ctx)
Jens Axboeb3c661b2018-10-30 10:36:06 -0600107{
Christoph Hellwige20ba6e2018-12-02 17:46:16 +0100108 enum hctx_type type = HCTX_TYPE_DEFAULT;
Jens Axboeb3c661b2018-10-30 10:36:06 -0600109
Jianchao Wangbb94aea2019-01-24 18:25:33 +0800110 /*
111 * The caller ensure that if REQ_HIPRI, poll must be enabled.
112 */
113 if (flags & REQ_HIPRI)
Christoph Hellwige20ba6e2018-12-02 17:46:16 +0100114 type = HCTX_TYPE_POLL;
Jianchao Wangbb94aea2019-01-24 18:25:33 +0800115 else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
Christoph Hellwige20ba6e2018-12-02 17:46:16 +0100116 type = HCTX_TYPE_READ;
Christoph Hellwig5aceaeb2018-12-17 12:16:26 +0100117
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +0800118 return ctx->hctxs[type];
Jens Axboeff2c5662018-10-29 13:07:33 -0600119}
120
Jens Axboee93ecf62014-05-19 09:17:48 -0600121/*
Jens Axboe67aec142014-05-30 08:25:36 -0600122 * sysfs helpers
123 */
Ming Lei737f98c2017-02-22 18:13:59 +0800124extern void blk_mq_sysfs_init(struct request_queue *q);
Ming Lei7ea5fe32017-02-22 18:14:00 +0800125extern void blk_mq_sysfs_deinit(struct request_queue *q);
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700126extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
Jens Axboe67aec142014-05-30 08:25:36 -0600127extern int blk_mq_sysfs_register(struct request_queue *q);
128extern void blk_mq_sysfs_unregister(struct request_queue *q);
Keith Busch868f2f02015-12-17 17:08:14 -0700129extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
Jens Axboe67aec142014-05-30 08:25:36 -0600130
Ming Leie09aae72015-01-29 20:17:27 +0800131void blk_mq_release(struct request_queue *q);
132
Tejun Heo1d9bd512018-01-09 08:29:48 -0800133/**
134 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
135 * @rq: target request.
136 */
Keith Busch12f5b932018-05-29 15:52:28 +0200137static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
Tejun Heo1d9bd512018-01-09 08:29:48 -0800138{
Keith Busch12f5b932018-05-29 15:52:28 +0200139 return READ_ONCE(rq->state);
Tejun Heo1d9bd512018-01-09 08:29:48 -0800140}
141
Ming Lei1aecfe42014-06-01 00:43:36 +0800142static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
143 unsigned int cpu)
144{
145 return per_cpu_ptr(q->queue_ctx, cpu);
146}
147
148/*
149 * This assumes per-cpu software queueing queues. They could be per-node
150 * as well, for instance. For now this is hardcoded as-is. Note that we don't
151 * care about preemption, since we know the ctx's are persistent. This does
152 * mean that we can't rely on ctx always matching the currently running CPU.
153 */
154static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
155{
156 return __blk_mq_get_ctx(q, get_cpu());
157}
158
159static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
160{
161 put_cpu();
162}
163
Ming Leicb96a42c2014-06-01 00:43:37 +0800164struct blk_mq_alloc_data {
165 /* input parameter */
166 struct request_queue *q;
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800167 blk_mq_req_flags_t flags;
Omar Sandoval229a92872017-04-14 00:59:59 -0700168 unsigned int shallow_depth;
Jens Axboef9afca42018-10-29 13:11:38 -0600169 unsigned int cmd_flags;
Ming Leicb96a42c2014-06-01 00:43:37 +0800170
171 /* input & output parameter */
172 struct blk_mq_ctx *ctx;
173 struct blk_mq_hw_ctx *hctx;
174};
175
Jens Axboe49411152017-01-13 08:09:05 -0700176static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
177{
Jens Axboebd166ef2017-01-17 06:03:22 -0700178 if (data->flags & BLK_MQ_REQ_INTERNAL)
179 return data->hctx->sched_tags;
180
Jens Axboe49411152017-01-13 08:09:05 -0700181 return data->hctx->tags;
182}
183
Bart Van Assche5d1b25c2016-10-28 17:19:15 -0700184static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
185{
186 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
187}
188
Ming Lei19c66e52014-12-03 19:38:04 +0800189static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
190{
191 return hctx->nr_ctx && hctx->tags;
192}
193
Mikulas Patockae016b782018-12-06 11:41:21 -0500194unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
Omar Sandovalbf0ddab2018-04-26 00:21:59 -0700195void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
196 unsigned int inflight[2]);
Jens Axboef299b7c2017-08-08 17:51:45 -0600197
Ming Leide148292017-10-14 17:22:29 +0800198static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
199{
200 struct request_queue *q = hctx->queue;
201
202 if (q->mq_ops->put_budget)
203 q->mq_ops->put_budget(hctx);
204}
205
Ming Lei88022d72017-11-05 02:21:12 +0800206static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
Ming Leide148292017-10-14 17:22:29 +0800207{
208 struct request_queue *q = hctx->queue;
209
210 if (q->mq_ops->get_budget)
211 return q->mq_ops->get_budget(hctx);
Ming Lei88022d72017-11-05 02:21:12 +0800212 return true;
Ming Leide148292017-10-14 17:22:29 +0800213}
214
Ming Lei244c65a2017-11-04 12:39:57 -0600215static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
216 struct request *rq)
217{
218 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
219 rq->tag = -1;
220
221 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
222 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
223 atomic_dec(&hctx->nr_active);
224 }
225}
226
227static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
228 struct request *rq)
229{
230 if (rq->tag == -1 || rq->internal_tag == -1)
231 return;
232
233 __blk_mq_put_driver_tag(hctx, rq);
234}
235
236static inline void blk_mq_put_driver_tag(struct request *rq)
237{
Ming Lei244c65a2017-11-04 12:39:57 -0600238 if (rq->tag == -1 || rq->internal_tag == -1)
239 return;
240
Jens Axboeea4f9952018-10-29 15:06:13 -0600241 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
Ming Lei244c65a2017-11-04 12:39:57 -0600242}
243
Jens Axboeed76e322018-10-29 13:06:14 -0600244static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
Minwoo Im0da73d02018-07-02 23:46:43 +0900245{
246 int cpu;
247
248 for_each_possible_cpu(cpu)
Jens Axboeed76e322018-10-29 13:06:14 -0600249 qmap->mq_map[cpu] = 0;
Minwoo Im0da73d02018-07-02 23:46:43 +0900250}
251
Jens Axboe320ae512013-10-24 09:20:05 +0100252#endif