blob: 7b52e7657b2d1b2f3cb207b616da4365600def59 [file] [log] [blame]
Christoph Hellwig3dcf60b2019-04-30 14:42:43 -04001// SPDX-License-Identifier: GPL-2.0
Jens Axboe320ae512013-10-24 09:20:05 +01002#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/backing-dev.h>
5#include <linux/bio.h>
6#include <linux/blkdev.h>
7#include <linux/mm.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/workqueue.h>
11#include <linux/smp.h>
12
13#include <linux/blk-mq.h>
Ming Leic7e2d942019-04-30 09:52:25 +080014#include "blk.h"
Jens Axboe320ae512013-10-24 09:20:05 +010015#include "blk-mq.h"
16#include "blk-mq-tag.h"
17
18static void blk_mq_sysfs_release(struct kobject *kobj)
19{
Ming Lei1db49092018-11-20 09:44:35 +080020 struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
21
22 free_percpu(ctxs->queue_ctx);
23 kfree(ctxs);
24}
25
26static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
27{
28 struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
29
30 /* ctx->ctxs won't be released until all ctx are freed */
31 kobject_put(&ctx->ctxs->kobj);
Jens Axboe320ae512013-10-24 09:20:05 +010032}
33
Ming Lei6c8b2322017-02-22 18:14:01 +080034static void blk_mq_hw_sysfs_release(struct kobject *kobj)
35{
36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
37 kobj);
Ming Leic7e2d942019-04-30 09:52:25 +080038
39 if (hctx->flags & BLK_MQ_F_BLOCKING)
40 cleanup_srcu_struct(hctx->srcu);
41 blk_free_flush_queue(hctx->fq);
42 sbitmap_free(&hctx->ctx_map);
Ming Lei01388df2017-02-22 18:14:02 +080043 free_cpumask_var(hctx->cpumask);
Ming Lei6c8b2322017-02-22 18:14:01 +080044 kfree(hctx->ctxs);
45 kfree(hctx);
46}
47
Jens Axboe320ae512013-10-24 09:20:05 +010048struct blk_mq_ctx_sysfs_entry {
49 struct attribute attr;
50 ssize_t (*show)(struct blk_mq_ctx *, char *);
51 ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
52};
53
54struct blk_mq_hw_ctx_sysfs_entry {
55 struct attribute attr;
56 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
57 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
58};
59
60static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
61 char *page)
62{
63 struct blk_mq_ctx_sysfs_entry *entry;
64 struct blk_mq_ctx *ctx;
65 struct request_queue *q;
66 ssize_t res;
67
68 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
69 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
70 q = ctx->queue;
71
72 if (!entry->show)
73 return -EIO;
74
Jens Axboe320ae512013-10-24 09:20:05 +010075 mutex_lock(&q->sysfs_lock);
Bart Van Asschebae85c12019-09-30 16:00:43 -070076 res = entry->show(ctx, page);
Jens Axboe320ae512013-10-24 09:20:05 +010077 mutex_unlock(&q->sysfs_lock);
78 return res;
79}
80
81static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
82 const char *page, size_t length)
83{
84 struct blk_mq_ctx_sysfs_entry *entry;
85 struct blk_mq_ctx *ctx;
86 struct request_queue *q;
87 ssize_t res;
88
89 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
90 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
91 q = ctx->queue;
92
93 if (!entry->store)
94 return -EIO;
95
Jens Axboe320ae512013-10-24 09:20:05 +010096 mutex_lock(&q->sysfs_lock);
Bart Van Asschebae85c12019-09-30 16:00:43 -070097 res = entry->store(ctx, page, length);
Jens Axboe320ae512013-10-24 09:20:05 +010098 mutex_unlock(&q->sysfs_lock);
99 return res;
100}
101
102static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
103 struct attribute *attr, char *page)
104{
105 struct blk_mq_hw_ctx_sysfs_entry *entry;
106 struct blk_mq_hw_ctx *hctx;
107 struct request_queue *q;
108 ssize_t res;
109
110 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
111 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
112 q = hctx->queue;
113
114 if (!entry->show)
115 return -EIO;
116
Jens Axboe320ae512013-10-24 09:20:05 +0100117 mutex_lock(&q->sysfs_lock);
Bart Van Asschebae85c12019-09-30 16:00:43 -0700118 res = entry->show(hctx, page);
Jens Axboe320ae512013-10-24 09:20:05 +0100119 mutex_unlock(&q->sysfs_lock);
120 return res;
121}
122
123static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
124 struct attribute *attr, const char *page,
125 size_t length)
126{
127 struct blk_mq_hw_ctx_sysfs_entry *entry;
128 struct blk_mq_hw_ctx *hctx;
129 struct request_queue *q;
130 ssize_t res;
131
132 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
133 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
134 q = hctx->queue;
135
136 if (!entry->store)
137 return -EIO;
138
Jens Axboe320ae512013-10-24 09:20:05 +0100139 mutex_lock(&q->sysfs_lock);
Bart Van Asschebae85c12019-09-30 16:00:43 -0700140 res = entry->store(hctx, page, length);
Jens Axboe320ae512013-10-24 09:20:05 +0100141 mutex_unlock(&q->sysfs_lock);
142 return res;
143}
144
Omar Sandovald96b37c2017-01-25 08:06:46 -0800145static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
146 char *page)
Jens Axboebd166ef2017-01-17 06:03:22 -0700147{
Omar Sandovald96b37c2017-01-25 08:06:46 -0800148 return sprintf(page, "%u\n", hctx->tags->nr_tags);
Jens Axboebd166ef2017-01-17 06:03:22 -0700149}
150
Omar Sandovald96b37c2017-01-25 08:06:46 -0800151static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
152 char *page)
Jens Axboe320ae512013-10-24 09:20:05 +0100153{
Omar Sandovald96b37c2017-01-25 08:06:46 -0800154 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
Jens Axboe320ae512013-10-24 09:20:05 +0100155}
156
Jens Axboe676141e2014-03-20 13:29:18 -0600157static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
158{
Ming Lei89628422019-11-02 16:02:15 +0800159 const size_t size = PAGE_SIZE - 1;
Jens Axboecb2da432014-04-09 10:53:21 -0600160 unsigned int i, first = 1;
Ming Lei89628422019-11-02 16:02:15 +0800161 int ret = 0, pos = 0;
Jens Axboe676141e2014-03-20 13:29:18 -0600162
Jens Axboecb2da432014-04-09 10:53:21 -0600163 for_each_cpu(i, hctx->cpumask) {
Jens Axboe676141e2014-03-20 13:29:18 -0600164 if (first)
Ming Lei89628422019-11-02 16:02:15 +0800165 ret = snprintf(pos + page, size - pos, "%u", i);
Jens Axboe676141e2014-03-20 13:29:18 -0600166 else
Ming Lei89628422019-11-02 16:02:15 +0800167 ret = snprintf(pos + page, size - pos, ", %u", i);
168
169 if (ret >= size - pos)
170 break;
Jens Axboe676141e2014-03-20 13:29:18 -0600171
172 first = 0;
Ming Lei89628422019-11-02 16:02:15 +0800173 pos += ret;
Jens Axboe676141e2014-03-20 13:29:18 -0600174 }
175
Ming Leid2c9be82019-11-04 16:26:53 +0800176 ret = snprintf(pos + page, size + 1 - pos, "\n");
Ming Lei89628422019-11-02 16:02:15 +0800177 return pos + ret;
Jens Axboe676141e2014-03-20 13:29:18 -0600178}
179
Omar Sandovald96b37c2017-01-25 08:06:46 -0800180static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
Joe Perches5657a812018-05-24 13:38:59 -0600181 .attr = {.name = "nr_tags", .mode = 0444 },
Omar Sandovald96b37c2017-01-25 08:06:46 -0800182 .show = blk_mq_hw_sysfs_nr_tags_show,
183};
184static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
Joe Perches5657a812018-05-24 13:38:59 -0600185 .attr = {.name = "nr_reserved_tags", .mode = 0444 },
Omar Sandovald96b37c2017-01-25 08:06:46 -0800186 .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
187};
Jens Axboe676141e2014-03-20 13:29:18 -0600188static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
Joe Perches5657a812018-05-24 13:38:59 -0600189 .attr = {.name = "cpu_list", .mode = 0444 },
Jens Axboe676141e2014-03-20 13:29:18 -0600190 .show = blk_mq_hw_sysfs_cpus_show,
191};
Jens Axboe320ae512013-10-24 09:20:05 +0100192
193static struct attribute *default_hw_ctx_attrs[] = {
Omar Sandovald96b37c2017-01-25 08:06:46 -0800194 &blk_mq_hw_sysfs_nr_tags.attr,
195 &blk_mq_hw_sysfs_nr_reserved_tags.attr,
Jens Axboe676141e2014-03-20 13:29:18 -0600196 &blk_mq_hw_sysfs_cpus.attr,
Jens Axboe320ae512013-10-24 09:20:05 +0100197 NULL,
198};
Kimberly Brown800f5aa2019-04-01 22:51:30 -0400199ATTRIBUTE_GROUPS(default_hw_ctx);
Jens Axboe320ae512013-10-24 09:20:05 +0100200
201static const struct sysfs_ops blk_mq_sysfs_ops = {
202 .show = blk_mq_sysfs_show,
203 .store = blk_mq_sysfs_store,
204};
205
206static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
207 .show = blk_mq_hw_sysfs_show,
208 .store = blk_mq_hw_sysfs_store,
209};
210
211static struct kobj_type blk_mq_ktype = {
212 .sysfs_ops = &blk_mq_sysfs_ops,
213 .release = blk_mq_sysfs_release,
214};
215
216static struct kobj_type blk_mq_ctx_ktype = {
217 .sysfs_ops = &blk_mq_sysfs_ops,
Ming Lei1db49092018-11-20 09:44:35 +0800218 .release = blk_mq_ctx_sysfs_release,
Jens Axboe320ae512013-10-24 09:20:05 +0100219};
220
221static struct kobj_type blk_mq_hw_ktype = {
222 .sysfs_ops = &blk_mq_hw_sysfs_ops,
Kimberly Brown800f5aa2019-04-01 22:51:30 -0400223 .default_groups = default_hw_ctx_groups,
Ming Lei6c8b2322017-02-22 18:14:01 +0800224 .release = blk_mq_hw_sysfs_release,
Jens Axboe320ae512013-10-24 09:20:05 +0100225};
226
Fengguang Wuee3c5db2014-05-30 10:31:13 -0600227static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
Jens Axboe67aec142014-05-30 08:25:36 -0600228{
229 struct blk_mq_ctx *ctx;
230 int i;
231
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900232 if (!hctx->nr_ctx)
Jens Axboe67aec142014-05-30 08:25:36 -0600233 return;
234
235 hctx_for_each_ctx(hctx, ctx, i)
236 kobject_del(&ctx->kobj);
237
238 kobject_del(&hctx->kobj);
239}
240
Fengguang Wuee3c5db2014-05-30 10:31:13 -0600241static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
Jens Axboe67aec142014-05-30 08:25:36 -0600242{
243 struct request_queue *q = hctx->queue;
244 struct blk_mq_ctx *ctx;
245 int i, ret;
246
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900247 if (!hctx->nr_ctx)
Jens Axboe67aec142014-05-30 08:25:36 -0600248 return 0;
249
Ming Lei1db49092018-11-20 09:44:35 +0800250 ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
Jens Axboe67aec142014-05-30 08:25:36 -0600251 if (ret)
252 return ret;
253
254 hctx_for_each_ctx(hctx, ctx, i) {
255 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
256 if (ret)
257 break;
258 }
259
260 return ret;
261}
262
Mike Snitzer667257e2018-01-11 14:11:01 -0500263void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +0100264{
Andrey Vagin85157362013-12-06 09:06:41 +0400265 struct blk_mq_hw_ctx *hctx;
Ming Lei7ea5fe32017-02-22 18:14:00 +0800266 int i;
Andrey Vagin85157362013-12-06 09:06:41 +0400267
Ming Leicecf5d82019-08-27 19:01:48 +0800268 lockdep_assert_held(&q->sysfs_dir_lock);
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700269
Ming Lei6c8b2322017-02-22 18:14:01 +0800270 queue_for_each_hw_ctx(q, hctx, i)
Jens Axboe67aec142014-05-30 08:25:36 -0600271 blk_mq_unregister_hctx(hctx);
272
Ming Lei1db49092018-11-20 09:44:35 +0800273 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
274 kobject_del(q->mq_kobj);
Matias Bjørlingb21d5b32016-09-16 14:25:06 +0200275 kobject_put(&dev->kobj);
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900276
277 q->mq_sysfs_init_done = false;
Jens Axboec0f3fd22016-08-02 08:45:44 -0600278}
279
Keith Busch868f2f02015-12-17 17:08:14 -0700280void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
281{
282 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
283}
284
Ming Lei7ea5fe32017-02-22 18:14:00 +0800285void blk_mq_sysfs_deinit(struct request_queue *q)
286{
287 struct blk_mq_ctx *ctx;
288 int cpu;
289
290 for_each_possible_cpu(cpu) {
291 ctx = per_cpu_ptr(q->queue_ctx, cpu);
292 kobject_put(&ctx->kobj);
293 }
Ming Lei1db49092018-11-20 09:44:35 +0800294 kobject_put(q->mq_kobj);
Ming Lei7ea5fe32017-02-22 18:14:00 +0800295}
296
Ming Lei737f98c2017-02-22 18:13:59 +0800297void blk_mq_sysfs_init(struct request_queue *q)
Jens Axboe67aec142014-05-30 08:25:36 -0600298{
Jens Axboe67aec142014-05-30 08:25:36 -0600299 struct blk_mq_ctx *ctx;
Thomas Gleixner897bb0c2016-03-19 11:30:33 +0100300 int cpu;
Jens Axboe67aec142014-05-30 08:25:36 -0600301
Ming Lei1db49092018-11-20 09:44:35 +0800302 kobject_init(q->mq_kobj, &blk_mq_ktype);
Jens Axboe67aec142014-05-30 08:25:36 -0600303
Thomas Gleixner897bb0c2016-03-19 11:30:33 +0100304 for_each_possible_cpu(cpu) {
305 ctx = per_cpu_ptr(q->queue_ctx, cpu);
Ming Lei1db49092018-11-20 09:44:35 +0800306
307 kobject_get(q->mq_kobj);
Takashi Iwai06a41a92014-12-10 16:38:30 +0100308 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
Thomas Gleixner897bb0c2016-03-19 11:30:33 +0100309 }
Jens Axboe67aec142014-05-30 08:25:36 -0600310}
311
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700312int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +0100313{
Jens Axboe320ae512013-10-24 09:20:05 +0100314 struct blk_mq_hw_ctx *hctx;
Jens Axboe67aec142014-05-30 08:25:36 -0600315 int ret, i;
Jens Axboe320ae512013-10-24 09:20:05 +0100316
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700317 WARN_ON_ONCE(!q->kobj.parent);
Ming Leicecf5d82019-08-27 19:01:48 +0800318 lockdep_assert_held(&q->sysfs_dir_lock);
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900319
Ming Lei1db49092018-11-20 09:44:35 +0800320 ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
Jens Axboe320ae512013-10-24 09:20:05 +0100321 if (ret < 0)
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900322 goto out;
Jens Axboe320ae512013-10-24 09:20:05 +0100323
Ming Lei1db49092018-11-20 09:44:35 +0800324 kobject_uevent(q->mq_kobj, KOBJ_ADD);
Jens Axboe320ae512013-10-24 09:20:05 +0100325
326 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe67aec142014-05-30 08:25:36 -0600327 ret = blk_mq_register_hctx(hctx);
Jens Axboe320ae512013-10-24 09:20:05 +0100328 if (ret)
Bart Van Asschef05d1ba2017-04-26 13:47:51 -0700329 goto unreg;
Jens Axboe320ae512013-10-24 09:20:05 +0100330 }
331
Bart Van Asschef05d1ba2017-04-26 13:47:51 -0700332 q->mq_sysfs_init_done = true;
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700333
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900334out:
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700335 return ret;
Bart Van Asschef05d1ba2017-04-26 13:47:51 -0700336
337unreg:
338 while (--i >= 0)
339 blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
340
Ming Lei1db49092018-11-20 09:44:35 +0800341 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
342 kobject_del(q->mq_kobj);
Bart Van Asschef05d1ba2017-04-26 13:47:51 -0700343 kobject_put(&dev->kobj);
344 return ret;
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700345}
346
Jens Axboe67aec142014-05-30 08:25:36 -0600347void blk_mq_sysfs_unregister(struct request_queue *q)
348{
349 struct blk_mq_hw_ctx *hctx;
350 int i;
351
Ming Leicecf5d82019-08-27 19:01:48 +0800352 mutex_lock(&q->sysfs_dir_lock);
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900353 if (!q->mq_sysfs_init_done)
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700354 goto unlock;
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900355
Jens Axboe67aec142014-05-30 08:25:36 -0600356 queue_for_each_hw_ctx(q, hctx, i)
357 blk_mq_unregister_hctx(hctx);
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700358
359unlock:
Ming Leicecf5d82019-08-27 19:01:48 +0800360 mutex_unlock(&q->sysfs_dir_lock);
Jens Axboe67aec142014-05-30 08:25:36 -0600361}
362
363int blk_mq_sysfs_register(struct request_queue *q)
364{
365 struct blk_mq_hw_ctx *hctx;
366 int i, ret = 0;
367
Ming Leicecf5d82019-08-27 19:01:48 +0800368 mutex_lock(&q->sysfs_dir_lock);
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900369 if (!q->mq_sysfs_init_done)
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700370 goto unlock;
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900371
Jens Axboe67aec142014-05-30 08:25:36 -0600372 queue_for_each_hw_ctx(q, hctx, i) {
373 ret = blk_mq_register_hctx(hctx);
374 if (ret)
375 break;
376 }
377
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700378unlock:
Ming Leicecf5d82019-08-27 19:01:48 +0800379 mutex_unlock(&q->sysfs_dir_lock);
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700380
Jens Axboe67aec142014-05-30 08:25:36 -0600381 return ret;
382}