blob: 7e6f2e467696e1c1bd22f4d5839f0a9b788084e9 [file] [log] [blame]
Jens Axboe320ae512013-10-24 09:20:05 +01001#ifndef INT_BLK_MQ_H
2#define INT_BLK_MQ_H
3
Jens Axboecf43e6b2016-11-07 21:32:37 -07004#include "blk-stat.h"
5
Christoph Hellwig24d2f902014-04-15 14:14:00 -06006struct blk_mq_tag_set;
7
Jens Axboe320ae512013-10-24 09:20:05 +01008struct blk_mq_ctx {
9 struct {
10 spinlock_t lock;
11 struct list_head rq_list;
12 } ____cacheline_aligned_in_smp;
13
14 unsigned int cpu;
15 unsigned int index_hw;
Jens Axboe320ae512013-10-24 09:20:05 +010016
17 /* incremented at dispatch time */
18 unsigned long rq_dispatched[2];
19 unsigned long rq_merged;
20
21 /* incremented at completion time */
22 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
23
24 struct request_queue *queue;
25 struct kobject kobj;
Jens Axboe4bb659b2014-05-09 09:36:49 -060026} ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010027
Jens Axboe320ae512013-10-24 09:20:05 +010028void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
Tejun Heo780db202014-07-01 10:31:13 -060029void blk_mq_freeze_queue(struct request_queue *q);
Ming Lei3edcc0c2013-12-26 21:31:38 +080030void blk_mq_free_queue(struct request_queue *q);
Jens Axboee3a2b3f2014-05-20 11:49:02 -060031int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
Jens Axboeaed3ea92014-12-22 14:04:42 -070032void blk_mq_wake_waiters(struct request_queue *q);
Omar Sandoval81380ca2017-04-07 08:56:26 -060033bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *);
Jens Axboe2c3ad662016-12-14 14:34:47 -070034void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
Jens Axboe50e1dab2017-01-26 14:42:34 -070035bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
Jens Axboebd6737f2017-01-27 01:00:47 -070036bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
37 bool wait);
Jens Axboe2c3ad662016-12-14 14:34:47 -070038
39/*
40 * Internal helpers for allocating/freeing the request map
41 */
Jens Axboecc71a6f2017-01-11 14:29:56 -070042void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
43 unsigned int hctx_idx);
44void blk_mq_free_rq_map(struct blk_mq_tags *tags);
45struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
46 unsigned int hctx_idx,
47 unsigned int nr_tags,
48 unsigned int reserved_tags);
49int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
50 unsigned int hctx_idx, unsigned int depth);
Jens Axboe2c3ad662016-12-14 14:34:47 -070051
52/*
53 * Internal helpers for request insertion into sw queues
54 */
55void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
56 bool at_head);
Jens Axboebd166ef2017-01-17 06:03:22 -070057void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
58 struct list_head *list);
Jens Axboe320ae512013-10-24 09:20:05 +010059/*
60 * CPU hotplug helpers
61 */
Jens Axboe676141e2014-03-20 13:29:18 -060062void blk_mq_enable_hotplug(void);
63void blk_mq_disable_hotplug(void);
Jens Axboe320ae512013-10-24 09:20:05 +010064
65/*
66 * CPU -> queue mappings
67 */
Jens Axboef14bbe72014-05-27 12:06:53 -060068extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010069
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +020070static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
71 int cpu)
72{
73 return q->queue_hw_ctx[q->mq_map[cpu]];
74}
75
Jens Axboee93ecf62014-05-19 09:17:48 -060076/*
Jens Axboe67aec142014-05-30 08:25:36 -060077 * sysfs helpers
78 */
Ming Lei737f98c2017-02-22 18:13:59 +080079extern void blk_mq_sysfs_init(struct request_queue *q);
Ming Lei7ea5fe32017-02-22 18:14:00 +080080extern void blk_mq_sysfs_deinit(struct request_queue *q);
Jens Axboe67aec142014-05-30 08:25:36 -060081extern int blk_mq_sysfs_register(struct request_queue *q);
82extern void blk_mq_sysfs_unregister(struct request_queue *q);
Keith Busch868f2f02015-12-17 17:08:14 -070083extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
Jens Axboe67aec142014-05-30 08:25:36 -060084
Omar Sandoval07e4fea2017-01-25 08:06:40 -080085/*
86 * debugfs helpers
87 */
Omar Sandoval400f73b2017-01-27 15:03:01 -070088#ifdef CONFIG_BLK_DEBUG_FS
Omar Sandoval07e4fea2017-01-25 08:06:40 -080089int blk_mq_debugfs_register(struct request_queue *q, const char *name);
90void blk_mq_debugfs_unregister(struct request_queue *q);
91int blk_mq_debugfs_register_hctxs(struct request_queue *q);
92void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
93#else
Omar Sandoval400f73b2017-01-27 15:03:01 -070094static inline int blk_mq_debugfs_register(struct request_queue *q,
95 const char *name)
Omar Sandoval07e4fea2017-01-25 08:06:40 -080096{
97 return 0;
98}
99
Omar Sandoval400f73b2017-01-27 15:03:01 -0700100static inline void blk_mq_debugfs_unregister(struct request_queue *q)
Omar Sandoval07e4fea2017-01-25 08:06:40 -0800101{
102}
103
Omar Sandoval400f73b2017-01-27 15:03:01 -0700104static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q)
Omar Sandoval07e4fea2017-01-25 08:06:40 -0800105{
106 return 0;
107}
108
Omar Sandoval400f73b2017-01-27 15:03:01 -0700109static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
Omar Sandoval07e4fea2017-01-25 08:06:40 -0800110{
111}
112#endif
113
Christoph Hellwig90415832014-09-22 10:21:48 -0600114extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
115
Ming Leie09aae72015-01-29 20:17:27 +0800116void blk_mq_release(struct request_queue *q);
117
Ming Lei1aecfe42014-06-01 00:43:36 +0800118static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
119 unsigned int cpu)
120{
121 return per_cpu_ptr(q->queue_ctx, cpu);
122}
123
124/*
125 * This assumes per-cpu software queueing queues. They could be per-node
126 * as well, for instance. For now this is hardcoded as-is. Note that we don't
127 * care about preemption, since we know the ctx's are persistent. This does
128 * mean that we can't rely on ctx always matching the currently running CPU.
129 */
130static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
131{
132 return __blk_mq_get_ctx(q, get_cpu());
133}
134
135static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
136{
137 put_cpu();
138}
139
Ming Leicb96a42c2014-06-01 00:43:37 +0800140struct blk_mq_alloc_data {
141 /* input parameter */
142 struct request_queue *q;
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100143 unsigned int flags;
Ming Leicb96a42c2014-06-01 00:43:37 +0800144
145 /* input & output parameter */
146 struct blk_mq_ctx *ctx;
147 struct blk_mq_hw_ctx *hctx;
148};
149
Jens Axboe49411152017-01-13 08:09:05 -0700150static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
151{
Jens Axboebd166ef2017-01-17 06:03:22 -0700152 if (data->flags & BLK_MQ_REQ_INTERNAL)
153 return data->hctx->sched_tags;
154
Jens Axboe49411152017-01-13 08:09:05 -0700155 return data->hctx->tags;
156}
157
Jens Axboe2c3ad662016-12-14 14:34:47 -0700158/*
159 * Internal helpers for request allocation/init/free
160 */
161void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
162 struct request *rq, unsigned int op);
Jens Axboebd166ef2017-01-17 06:03:22 -0700163void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
Jens Axboe2c3ad662016-12-14 14:34:47 -0700164 struct request *rq);
Jens Axboebd166ef2017-01-17 06:03:22 -0700165void blk_mq_finish_request(struct request *rq);
Jens Axboe2c3ad662016-12-14 14:34:47 -0700166struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
167 unsigned int op);
168
Bart Van Assche5d1b25c2016-10-28 17:19:15 -0700169static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
170{
171 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
172}
173
Ming Lei19c66e52014-12-03 19:38:04 +0800174static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
175{
176 return hctx->nr_ctx && hctx->tags;
177}
178
Jens Axboe320ae512013-10-24 09:20:05 +0100179#endif