blob: 253c857cba47c31c1ad746b2713e7b7b2abb544f [file] [log] [blame]
Christoph Hellwig3dcf60b2019-04-30 14:42:43 -04001// SPDX-License-Identifier: GPL-2.0
Jens Axboe320ae512013-10-24 09:20:05 +01002#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/backing-dev.h>
5#include <linux/bio.h>
6#include <linux/blkdev.h>
7#include <linux/mm.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/workqueue.h>
11#include <linux/smp.h>
12
13#include <linux/blk-mq.h>
Ming Leic7e2d942019-04-30 09:52:25 +080014#include "blk.h"
Jens Axboe320ae512013-10-24 09:20:05 +010015#include "blk-mq.h"
16#include "blk-mq-tag.h"
17
18static void blk_mq_sysfs_release(struct kobject *kobj)
19{
Ming Lei1db49092018-11-20 09:44:35 +080020 struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
21
22 free_percpu(ctxs->queue_ctx);
23 kfree(ctxs);
24}
25
26static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
27{
28 struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
29
30 /* ctx->ctxs won't be released until all ctx are freed */
31 kobject_put(&ctx->ctxs->kobj);
Jens Axboe320ae512013-10-24 09:20:05 +010032}
33
Ming Lei6c8b2322017-02-22 18:14:01 +080034static void blk_mq_hw_sysfs_release(struct kobject *kobj)
35{
36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
37 kobj);
Ming Leic7e2d942019-04-30 09:52:25 +080038
39 if (hctx->flags & BLK_MQ_F_BLOCKING)
40 cleanup_srcu_struct(hctx->srcu);
41 blk_free_flush_queue(hctx->fq);
42 sbitmap_free(&hctx->ctx_map);
Ming Lei01388df2017-02-22 18:14:02 +080043 free_cpumask_var(hctx->cpumask);
Ming Lei6c8b2322017-02-22 18:14:01 +080044 kfree(hctx->ctxs);
45 kfree(hctx);
46}
47
Jens Axboe320ae512013-10-24 09:20:05 +010048struct blk_mq_hw_ctx_sysfs_entry {
49 struct attribute attr;
50 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
51 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
52};
53
Jens Axboe320ae512013-10-24 09:20:05 +010054static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
55 struct attribute *attr, char *page)
56{
57 struct blk_mq_hw_ctx_sysfs_entry *entry;
58 struct blk_mq_hw_ctx *hctx;
59 struct request_queue *q;
60 ssize_t res;
61
62 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
63 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
64 q = hctx->queue;
65
66 if (!entry->show)
67 return -EIO;
68
Jens Axboe320ae512013-10-24 09:20:05 +010069 mutex_lock(&q->sysfs_lock);
Bart Van Asschebae85c12019-09-30 16:00:43 -070070 res = entry->show(hctx, page);
Jens Axboe320ae512013-10-24 09:20:05 +010071 mutex_unlock(&q->sysfs_lock);
72 return res;
73}
74
75static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
76 struct attribute *attr, const char *page,
77 size_t length)
78{
79 struct blk_mq_hw_ctx_sysfs_entry *entry;
80 struct blk_mq_hw_ctx *hctx;
81 struct request_queue *q;
82 ssize_t res;
83
84 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
85 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
86 q = hctx->queue;
87
88 if (!entry->store)
89 return -EIO;
90
Jens Axboe320ae512013-10-24 09:20:05 +010091 mutex_lock(&q->sysfs_lock);
Bart Van Asschebae85c12019-09-30 16:00:43 -070092 res = entry->store(hctx, page, length);
Jens Axboe320ae512013-10-24 09:20:05 +010093 mutex_unlock(&q->sysfs_lock);
94 return res;
95}
96
Omar Sandovald96b37c2017-01-25 08:06:46 -080097static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
98 char *page)
Jens Axboebd166ef2017-01-17 06:03:22 -070099{
Omar Sandovald96b37c2017-01-25 08:06:46 -0800100 return sprintf(page, "%u\n", hctx->tags->nr_tags);
Jens Axboebd166ef2017-01-17 06:03:22 -0700101}
102
Omar Sandovald96b37c2017-01-25 08:06:46 -0800103static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
104 char *page)
Jens Axboe320ae512013-10-24 09:20:05 +0100105{
Omar Sandovald96b37c2017-01-25 08:06:46 -0800106 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
Jens Axboe320ae512013-10-24 09:20:05 +0100107}
108
Jens Axboe676141e2014-03-20 13:29:18 -0600109static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
110{
Ming Lei89628422019-11-02 16:02:15 +0800111 const size_t size = PAGE_SIZE - 1;
Jens Axboecb2da432014-04-09 10:53:21 -0600112 unsigned int i, first = 1;
Ming Lei89628422019-11-02 16:02:15 +0800113 int ret = 0, pos = 0;
Jens Axboe676141e2014-03-20 13:29:18 -0600114
Jens Axboecb2da432014-04-09 10:53:21 -0600115 for_each_cpu(i, hctx->cpumask) {
Jens Axboe676141e2014-03-20 13:29:18 -0600116 if (first)
Ming Lei89628422019-11-02 16:02:15 +0800117 ret = snprintf(pos + page, size - pos, "%u", i);
Jens Axboe676141e2014-03-20 13:29:18 -0600118 else
Ming Lei89628422019-11-02 16:02:15 +0800119 ret = snprintf(pos + page, size - pos, ", %u", i);
120
121 if (ret >= size - pos)
122 break;
Jens Axboe676141e2014-03-20 13:29:18 -0600123
124 first = 0;
Ming Lei89628422019-11-02 16:02:15 +0800125 pos += ret;
Jens Axboe676141e2014-03-20 13:29:18 -0600126 }
127
Ming Leid2c9be82019-11-04 16:26:53 +0800128 ret = snprintf(pos + page, size + 1 - pos, "\n");
Ming Lei89628422019-11-02 16:02:15 +0800129 return pos + ret;
Jens Axboe676141e2014-03-20 13:29:18 -0600130}
131
Omar Sandovald96b37c2017-01-25 08:06:46 -0800132static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
Joe Perches5657a812018-05-24 13:38:59 -0600133 .attr = {.name = "nr_tags", .mode = 0444 },
Omar Sandovald96b37c2017-01-25 08:06:46 -0800134 .show = blk_mq_hw_sysfs_nr_tags_show,
135};
136static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
Joe Perches5657a812018-05-24 13:38:59 -0600137 .attr = {.name = "nr_reserved_tags", .mode = 0444 },
Omar Sandovald96b37c2017-01-25 08:06:46 -0800138 .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
139};
Jens Axboe676141e2014-03-20 13:29:18 -0600140static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
Joe Perches5657a812018-05-24 13:38:59 -0600141 .attr = {.name = "cpu_list", .mode = 0444 },
Jens Axboe676141e2014-03-20 13:29:18 -0600142 .show = blk_mq_hw_sysfs_cpus_show,
143};
Jens Axboe320ae512013-10-24 09:20:05 +0100144
145static struct attribute *default_hw_ctx_attrs[] = {
Omar Sandovald96b37c2017-01-25 08:06:46 -0800146 &blk_mq_hw_sysfs_nr_tags.attr,
147 &blk_mq_hw_sysfs_nr_reserved_tags.attr,
Jens Axboe676141e2014-03-20 13:29:18 -0600148 &blk_mq_hw_sysfs_cpus.attr,
Jens Axboe320ae512013-10-24 09:20:05 +0100149 NULL,
150};
Kimberly Brown800f5aa2019-04-01 22:51:30 -0400151ATTRIBUTE_GROUPS(default_hw_ctx);
Jens Axboe320ae512013-10-24 09:20:05 +0100152
Jens Axboe320ae512013-10-24 09:20:05 +0100153static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
154 .show = blk_mq_hw_sysfs_show,
155 .store = blk_mq_hw_sysfs_store,
156};
157
158static struct kobj_type blk_mq_ktype = {
Jens Axboe320ae512013-10-24 09:20:05 +0100159 .release = blk_mq_sysfs_release,
160};
161
162static struct kobj_type blk_mq_ctx_ktype = {
Ming Lei1db49092018-11-20 09:44:35 +0800163 .release = blk_mq_ctx_sysfs_release,
Jens Axboe320ae512013-10-24 09:20:05 +0100164};
165
166static struct kobj_type blk_mq_hw_ktype = {
167 .sysfs_ops = &blk_mq_hw_sysfs_ops,
Kimberly Brown800f5aa2019-04-01 22:51:30 -0400168 .default_groups = default_hw_ctx_groups,
Ming Lei6c8b2322017-02-22 18:14:01 +0800169 .release = blk_mq_hw_sysfs_release,
Jens Axboe320ae512013-10-24 09:20:05 +0100170};
171
Fengguang Wuee3c5db2014-05-30 10:31:13 -0600172static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
Jens Axboe67aec142014-05-30 08:25:36 -0600173{
174 struct blk_mq_ctx *ctx;
175 int i;
176
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900177 if (!hctx->nr_ctx)
Jens Axboe67aec142014-05-30 08:25:36 -0600178 return;
179
180 hctx_for_each_ctx(hctx, ctx, i)
181 kobject_del(&ctx->kobj);
182
183 kobject_del(&hctx->kobj);
184}
185
Fengguang Wuee3c5db2014-05-30 10:31:13 -0600186static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
Jens Axboe67aec142014-05-30 08:25:36 -0600187{
188 struct request_queue *q = hctx->queue;
189 struct blk_mq_ctx *ctx;
190 int i, ret;
191
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900192 if (!hctx->nr_ctx)
Jens Axboe67aec142014-05-30 08:25:36 -0600193 return 0;
194
Ming Lei1db49092018-11-20 09:44:35 +0800195 ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
Jens Axboe67aec142014-05-30 08:25:36 -0600196 if (ret)
197 return ret;
198
199 hctx_for_each_ctx(hctx, ctx, i) {
200 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
201 if (ret)
202 break;
203 }
204
205 return ret;
206}
207
Mike Snitzer667257e2018-01-11 14:11:01 -0500208void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +0100209{
Andrey Vagin85157362013-12-06 09:06:41 +0400210 struct blk_mq_hw_ctx *hctx;
Ming Lei7ea5fe32017-02-22 18:14:00 +0800211 int i;
Andrey Vagin85157362013-12-06 09:06:41 +0400212
Ming Leicecf5d82019-08-27 19:01:48 +0800213 lockdep_assert_held(&q->sysfs_dir_lock);
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700214
Ming Lei6c8b2322017-02-22 18:14:01 +0800215 queue_for_each_hw_ctx(q, hctx, i)
Jens Axboe67aec142014-05-30 08:25:36 -0600216 blk_mq_unregister_hctx(hctx);
217
Ming Lei1db49092018-11-20 09:44:35 +0800218 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
219 kobject_del(q->mq_kobj);
Matias Bjørlingb21d5b32016-09-16 14:25:06 +0200220 kobject_put(&dev->kobj);
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900221
222 q->mq_sysfs_init_done = false;
Jens Axboec0f3fd22016-08-02 08:45:44 -0600223}
224
Keith Busch868f2f02015-12-17 17:08:14 -0700225void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
226{
227 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
228}
229
Ming Lei7ea5fe32017-02-22 18:14:00 +0800230void blk_mq_sysfs_deinit(struct request_queue *q)
231{
232 struct blk_mq_ctx *ctx;
233 int cpu;
234
235 for_each_possible_cpu(cpu) {
236 ctx = per_cpu_ptr(q->queue_ctx, cpu);
237 kobject_put(&ctx->kobj);
238 }
Ming Lei1db49092018-11-20 09:44:35 +0800239 kobject_put(q->mq_kobj);
Ming Lei7ea5fe32017-02-22 18:14:00 +0800240}
241
Ming Lei737f98c2017-02-22 18:13:59 +0800242void blk_mq_sysfs_init(struct request_queue *q)
Jens Axboe67aec142014-05-30 08:25:36 -0600243{
Jens Axboe67aec142014-05-30 08:25:36 -0600244 struct blk_mq_ctx *ctx;
Thomas Gleixner897bb0c2016-03-19 11:30:33 +0100245 int cpu;
Jens Axboe67aec142014-05-30 08:25:36 -0600246
Ming Lei1db49092018-11-20 09:44:35 +0800247 kobject_init(q->mq_kobj, &blk_mq_ktype);
Jens Axboe67aec142014-05-30 08:25:36 -0600248
Thomas Gleixner897bb0c2016-03-19 11:30:33 +0100249 for_each_possible_cpu(cpu) {
250 ctx = per_cpu_ptr(q->queue_ctx, cpu);
Ming Lei1db49092018-11-20 09:44:35 +0800251
252 kobject_get(q->mq_kobj);
Takashi Iwai06a41a92014-12-10 16:38:30 +0100253 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
Thomas Gleixner897bb0c2016-03-19 11:30:33 +0100254 }
Jens Axboe67aec142014-05-30 08:25:36 -0600255}
256
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700257int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +0100258{
Jens Axboe320ae512013-10-24 09:20:05 +0100259 struct blk_mq_hw_ctx *hctx;
Jens Axboe67aec142014-05-30 08:25:36 -0600260 int ret, i;
Jens Axboe320ae512013-10-24 09:20:05 +0100261
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700262 WARN_ON_ONCE(!q->kobj.parent);
Ming Leicecf5d82019-08-27 19:01:48 +0800263 lockdep_assert_held(&q->sysfs_dir_lock);
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900264
Ming Lei1db49092018-11-20 09:44:35 +0800265 ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
Jens Axboe320ae512013-10-24 09:20:05 +0100266 if (ret < 0)
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900267 goto out;
Jens Axboe320ae512013-10-24 09:20:05 +0100268
Ming Lei1db49092018-11-20 09:44:35 +0800269 kobject_uevent(q->mq_kobj, KOBJ_ADD);
Jens Axboe320ae512013-10-24 09:20:05 +0100270
271 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe67aec142014-05-30 08:25:36 -0600272 ret = blk_mq_register_hctx(hctx);
Jens Axboe320ae512013-10-24 09:20:05 +0100273 if (ret)
Bart Van Asschef05d1ba2017-04-26 13:47:51 -0700274 goto unreg;
Jens Axboe320ae512013-10-24 09:20:05 +0100275 }
276
Bart Van Asschef05d1ba2017-04-26 13:47:51 -0700277 q->mq_sysfs_init_done = true;
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700278
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900279out:
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700280 return ret;
Bart Van Asschef05d1ba2017-04-26 13:47:51 -0700281
282unreg:
283 while (--i >= 0)
284 blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
285
Ming Lei1db49092018-11-20 09:44:35 +0800286 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
287 kobject_del(q->mq_kobj);
Bart Van Asschef05d1ba2017-04-26 13:47:51 -0700288 kobject_put(&dev->kobj);
289 return ret;
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700290}
291
Jens Axboe67aec142014-05-30 08:25:36 -0600292void blk_mq_sysfs_unregister(struct request_queue *q)
293{
294 struct blk_mq_hw_ctx *hctx;
295 int i;
296
Ming Leicecf5d82019-08-27 19:01:48 +0800297 mutex_lock(&q->sysfs_dir_lock);
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900298 if (!q->mq_sysfs_init_done)
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700299 goto unlock;
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900300
Jens Axboe67aec142014-05-30 08:25:36 -0600301 queue_for_each_hw_ctx(q, hctx, i)
302 blk_mq_unregister_hctx(hctx);
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700303
304unlock:
Ming Leicecf5d82019-08-27 19:01:48 +0800305 mutex_unlock(&q->sysfs_dir_lock);
Jens Axboe67aec142014-05-30 08:25:36 -0600306}
307
308int blk_mq_sysfs_register(struct request_queue *q)
309{
310 struct blk_mq_hw_ctx *hctx;
311 int i, ret = 0;
312
Ming Leicecf5d82019-08-27 19:01:48 +0800313 mutex_lock(&q->sysfs_dir_lock);
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900314 if (!q->mq_sysfs_init_done)
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700315 goto unlock;
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900316
Jens Axboe67aec142014-05-30 08:25:36 -0600317 queue_for_each_hw_ctx(q, hctx, i) {
318 ret = blk_mq_register_hctx(hctx);
319 if (ret)
320 break;
321 }
322
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700323unlock:
Ming Leicecf5d82019-08-27 19:01:48 +0800324 mutex_unlock(&q->sysfs_dir_lock);
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700325
Jens Axboe67aec142014-05-30 08:25:36 -0600326 return ret;
327}