blob: c782492c00997e809760982054566fd0f9f729a8 [file] [log] [blame]
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001/*
2 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
3 * Shaohua Li <shli@fb.com>
4 */
Jens Axboef2298c02013-10-25 11:52:25 +01005#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01006
Jens Axboef2298c02013-10-25 11:52:25 +01007#include <linux/moduleparam.h>
8#include <linux/sched.h>
9#include <linux/fs.h>
10#include <linux/blkdev.h>
11#include <linux/init.h>
12#include <linux/slab.h>
13#include <linux/blk-mq.h>
14#include <linux/hrtimer.h>
Matias Bjørlingb2b7e002015-11-12 20:25:10 +010015#include <linux/lightnvm.h>
Shaohua Li3bf2bd22017-08-14 15:04:53 -070016#include <linux/configfs.h>
Jens Axboef2298c02013-10-25 11:52:25 +010017
18struct nullb_cmd {
19 struct list_head list;
20 struct llist_node ll_list;
21 struct call_single_data csd;
22 struct request *rq;
23 struct bio *bio;
24 unsigned int tag;
25 struct nullb_queue *nq;
Paolo Valente3c395a92015-12-01 11:48:17 +010026 struct hrtimer timer;
Jens Axboef2298c02013-10-25 11:52:25 +010027};
28
29struct nullb_queue {
30 unsigned long *tag_map;
31 wait_queue_head_t wait;
32 unsigned int queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -070033 struct nullb_device *dev;
Jens Axboef2298c02013-10-25 11:52:25 +010034
35 struct nullb_cmd *cmds;
36};
37
Shaohua Li3bf2bd22017-08-14 15:04:53 -070038/*
39 * Status flags for nullb_device.
40 *
41 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
42 * UP: Device is currently on and visible in userspace.
43 */
44enum nullb_device_flags {
45 NULLB_DEV_FL_CONFIGURED = 0,
46 NULLB_DEV_FL_UP = 1,
47};
48
Shaohua Li2984c862017-08-14 15:04:52 -070049struct nullb_device {
50 struct nullb *nullb;
Shaohua Li3bf2bd22017-08-14 15:04:53 -070051 struct config_item item;
52 unsigned long flags; /* device flags */
Shaohua Li2984c862017-08-14 15:04:52 -070053
54 unsigned long size; /* device size in MB */
55 unsigned long completion_nsec; /* time in ns to complete a request */
56 unsigned int submit_queues; /* number of submission queues */
57 unsigned int home_node; /* home node for the device */
58 unsigned int queue_mode; /* block interface */
59 unsigned int blocksize; /* block size */
60 unsigned int irqmode; /* IRQ completion handler */
61 unsigned int hw_queue_depth; /* queue depth */
62 bool use_lightnvm; /* register as a LightNVM device */
63 bool blocking; /* blocking blk-mq device */
64 bool use_per_node_hctx; /* use per-node allocation for hardware context */
65};
66
Jens Axboef2298c02013-10-25 11:52:25 +010067struct nullb {
Shaohua Li2984c862017-08-14 15:04:52 -070068 struct nullb_device *dev;
Jens Axboef2298c02013-10-25 11:52:25 +010069 struct list_head list;
70 unsigned int index;
71 struct request_queue *q;
72 struct gendisk *disk;
Matias Bjørlingb0b4e092016-09-16 14:25:07 +020073 struct nvm_dev *ndev;
Jens Axboe82f402f2017-06-20 14:22:01 -060074 struct blk_mq_tag_set *tag_set;
75 struct blk_mq_tag_set __tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +010076 struct hrtimer timer;
77 unsigned int queue_depth;
78 spinlock_t lock;
79
80 struct nullb_queue *queues;
81 unsigned int nr_queues;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +010082 char disk_name[DISK_NAME_LEN];
Jens Axboef2298c02013-10-25 11:52:25 +010083};
84
85static LIST_HEAD(nullb_list);
86static struct mutex lock;
87static int null_major;
88static int nullb_indexes;
Matias Bjørling6bb95352015-11-19 12:50:08 +010089static struct kmem_cache *ppa_cache;
Jens Axboe82f402f2017-06-20 14:22:01 -060090static struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +010091
Jens Axboef2298c02013-10-25 11:52:25 +010092enum {
93 NULL_IRQ_NONE = 0,
94 NULL_IRQ_SOFTIRQ = 1,
95 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -080096};
Jens Axboef2298c02013-10-25 11:52:25 +010097
Christoph Hellwigce2c3502014-02-10 03:24:40 -080098enum {
Jens Axboef2298c02013-10-25 11:52:25 +010099 NULL_Q_BIO = 0,
100 NULL_Q_RQ = 1,
101 NULL_Q_MQ = 2,
102};
103
Shaohua Li2984c862017-08-14 15:04:52 -0700104static int g_submit_queues = 1;
105module_param_named(submit_queues, g_submit_queues, int, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100106MODULE_PARM_DESC(submit_queues, "Number of submission queues");
107
Shaohua Li2984c862017-08-14 15:04:52 -0700108static int g_home_node = NUMA_NO_NODE;
109module_param_named(home_node, g_home_node, int, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100110MODULE_PARM_DESC(home_node, "Home node for the device");
111
Shaohua Li2984c862017-08-14 15:04:52 -0700112static int g_queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700113
114static int null_param_store_val(const char *str, int *val, int min, int max)
115{
116 int ret, new_val;
117
118 ret = kstrtoint(str, 10, &new_val);
119 if (ret)
120 return -EINVAL;
121
122 if (new_val < min || new_val > max)
123 return -EINVAL;
124
125 *val = new_val;
126 return 0;
127}
128
129static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
130{
Shaohua Li2984c862017-08-14 15:04:52 -0700131 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
Matias Bjorling709c8662014-11-26 14:45:48 -0700132}
133
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930134static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700135 .set = null_set_queue_mode,
136 .get = param_get_int,
137};
138
Shaohua Li2984c862017-08-14 15:04:52 -0700139device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, S_IRUGO);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400140MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100141
Shaohua Li2984c862017-08-14 15:04:52 -0700142static int g_gb = 250;
143module_param_named(gb, g_gb, int, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100144MODULE_PARM_DESC(gb, "Size in GB");
145
Shaohua Li2984c862017-08-14 15:04:52 -0700146static int g_bs = 512;
147module_param_named(bs, g_bs, int, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100148MODULE_PARM_DESC(bs, "Block size (in bytes)");
149
Jens Axboe82f402f2017-06-20 14:22:01 -0600150static int nr_devices = 1;
Jens Axboef2298c02013-10-25 11:52:25 +0100151module_param(nr_devices, int, S_IRUGO);
152MODULE_PARM_DESC(nr_devices, "Number of devices to register");
153
Shaohua Li2984c862017-08-14 15:04:52 -0700154static bool g_use_lightnvm;
155module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100156MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
157
Shaohua Li2984c862017-08-14 15:04:52 -0700158static bool g_blocking;
159module_param_named(blocking, g_blocking, bool, S_IRUGO);
Jens Axboedb5bcf82017-03-30 13:44:26 -0600160MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
161
Jens Axboe82f402f2017-06-20 14:22:01 -0600162static bool shared_tags;
163module_param(shared_tags, bool, S_IRUGO);
164MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
165
Shaohua Li2984c862017-08-14 15:04:52 -0700166static int g_irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700167
168static int null_set_irqmode(const char *str, const struct kernel_param *kp)
169{
Shaohua Li2984c862017-08-14 15:04:52 -0700170 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
Matias Bjorling709c8662014-11-26 14:45:48 -0700171 NULL_IRQ_TIMER);
172}
173
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930174static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700175 .set = null_set_irqmode,
176 .get = param_get_int,
177};
178
Shaohua Li2984c862017-08-14 15:04:52 -0700179device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100180MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
181
Shaohua Li2984c862017-08-14 15:04:52 -0700182static unsigned long g_completion_nsec = 10000;
183module_param_named(completion_nsec, g_completion_nsec, ulong, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100184MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
185
Shaohua Li2984c862017-08-14 15:04:52 -0700186static int g_hw_queue_depth = 64;
187module_param_named(hw_queue_depth, g_hw_queue_depth, int, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100188MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
189
Shaohua Li2984c862017-08-14 15:04:52 -0700190static bool g_use_per_node_hctx;
191module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, S_IRUGO);
Matias Bjørling20005242013-12-21 00:11:00 +0100192MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100193
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700194static struct nullb_device *null_alloc_dev(void);
195static void null_free_dev(struct nullb_device *dev);
196
197static inline struct nullb_device *to_nullb_device(struct config_item *item)
198{
199 return item ? container_of(item, struct nullb_device, item) : NULL;
200}
201
202static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
203{
204 return snprintf(page, PAGE_SIZE, "%u\n", val);
205}
206
207static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
208 char *page)
209{
210 return snprintf(page, PAGE_SIZE, "%lu\n", val);
211}
212
213static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
214{
215 return snprintf(page, PAGE_SIZE, "%u\n", val);
216}
217
218static ssize_t nullb_device_uint_attr_store(unsigned int *val,
219 const char *page, size_t count)
220{
221 unsigned int tmp;
222 int result;
223
224 result = kstrtouint(page, 0, &tmp);
225 if (result)
226 return result;
227
228 *val = tmp;
229 return count;
230}
231
232static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
233 const char *page, size_t count)
234{
235 int result;
236 unsigned long tmp;
237
238 result = kstrtoul(page, 0, &tmp);
239 if (result)
240 return result;
241
242 *val = tmp;
243 return count;
244}
245
246static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
247 size_t count)
248{
249 bool tmp;
250 int result;
251
252 result = kstrtobool(page, &tmp);
253 if (result)
254 return result;
255
256 *val = tmp;
257 return count;
258}
259
260/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
261#define NULLB_DEVICE_ATTR(NAME, TYPE) \
262static ssize_t \
263nullb_device_##NAME##_show(struct config_item *item, char *page) \
264{ \
265 return nullb_device_##TYPE##_attr_show( \
266 to_nullb_device(item)->NAME, page); \
267} \
268static ssize_t \
269nullb_device_##NAME##_store(struct config_item *item, const char *page, \
270 size_t count) \
271{ \
272 if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
273 return -EBUSY; \
274 return nullb_device_##TYPE##_attr_store( \
275 &to_nullb_device(item)->NAME, page, count); \
276} \
277CONFIGFS_ATTR(nullb_device_, NAME);
278
279NULLB_DEVICE_ATTR(size, ulong);
280NULLB_DEVICE_ATTR(completion_nsec, ulong);
281NULLB_DEVICE_ATTR(submit_queues, uint);
282NULLB_DEVICE_ATTR(home_node, uint);
283NULLB_DEVICE_ATTR(queue_mode, uint);
284NULLB_DEVICE_ATTR(blocksize, uint);
285NULLB_DEVICE_ATTR(irqmode, uint);
286NULLB_DEVICE_ATTR(hw_queue_depth, uint);
287NULLB_DEVICE_ATTR(use_lightnvm, bool);
288NULLB_DEVICE_ATTR(blocking, bool);
289NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
290
291static struct configfs_attribute *nullb_device_attrs[] = {
292 &nullb_device_attr_size,
293 &nullb_device_attr_completion_nsec,
294 &nullb_device_attr_submit_queues,
295 &nullb_device_attr_home_node,
296 &nullb_device_attr_queue_mode,
297 &nullb_device_attr_blocksize,
298 &nullb_device_attr_irqmode,
299 &nullb_device_attr_hw_queue_depth,
300 &nullb_device_attr_use_lightnvm,
301 &nullb_device_attr_blocking,
302 &nullb_device_attr_use_per_node_hctx,
303 NULL,
304};
305
306static void nullb_device_release(struct config_item *item)
307{
308 null_free_dev(to_nullb_device(item));
309}
310
311static struct configfs_item_operations nullb_device_ops = {
312 .release = nullb_device_release,
313};
314
315static struct config_item_type nullb_device_type = {
316 .ct_item_ops = &nullb_device_ops,
317 .ct_attrs = nullb_device_attrs,
318 .ct_owner = THIS_MODULE,
319};
320
321static struct
322config_item *nullb_group_make_item(struct config_group *group, const char *name)
323{
324 struct nullb_device *dev;
325
326 dev = null_alloc_dev();
327 if (!dev)
328 return ERR_PTR(-ENOMEM);
329
330 config_item_init_type_name(&dev->item, name, &nullb_device_type);
331
332 return &dev->item;
333}
334
335static void
336nullb_group_drop_item(struct config_group *group, struct config_item *item)
337{
338 config_item_put(item);
339}
340
341static ssize_t memb_group_features_show(struct config_item *item, char *page)
342{
343 return snprintf(page, PAGE_SIZE, "\n");
344}
345
346CONFIGFS_ATTR_RO(memb_group_, features);
347
348static struct configfs_attribute *nullb_group_attrs[] = {
349 &memb_group_attr_features,
350 NULL,
351};
352
353static struct configfs_group_operations nullb_group_ops = {
354 .make_item = nullb_group_make_item,
355 .drop_item = nullb_group_drop_item,
356};
357
358static struct config_item_type nullb_group_type = {
359 .ct_group_ops = &nullb_group_ops,
360 .ct_attrs = nullb_group_attrs,
361 .ct_owner = THIS_MODULE,
362};
363
364static struct configfs_subsystem nullb_subsys = {
365 .su_group = {
366 .cg_item = {
367 .ci_namebuf = "nullb",
368 .ci_type = &nullb_group_type,
369 },
370 },
371};
372
Shaohua Li2984c862017-08-14 15:04:52 -0700373static struct nullb_device *null_alloc_dev(void)
374{
375 struct nullb_device *dev;
376
377 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
378 if (!dev)
379 return NULL;
380 dev->size = g_gb * 1024;
381 dev->completion_nsec = g_completion_nsec;
382 dev->submit_queues = g_submit_queues;
383 dev->home_node = g_home_node;
384 dev->queue_mode = g_queue_mode;
385 dev->blocksize = g_bs;
386 dev->irqmode = g_irqmode;
387 dev->hw_queue_depth = g_hw_queue_depth;
388 dev->use_lightnvm = g_use_lightnvm;
389 dev->blocking = g_blocking;
390 dev->use_per_node_hctx = g_use_per_node_hctx;
391 return dev;
392}
393
394static void null_free_dev(struct nullb_device *dev)
395{
396 kfree(dev);
397}
398
Jens Axboef2298c02013-10-25 11:52:25 +0100399static void put_tag(struct nullb_queue *nq, unsigned int tag)
400{
401 clear_bit_unlock(tag, nq->tag_map);
402
403 if (waitqueue_active(&nq->wait))
404 wake_up(&nq->wait);
405}
406
407static unsigned int get_tag(struct nullb_queue *nq)
408{
409 unsigned int tag;
410
411 do {
412 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
413 if (tag >= nq->queue_depth)
414 return -1U;
415 } while (test_and_set_bit_lock(tag, nq->tag_map));
416
417 return tag;
418}
419
420static void free_cmd(struct nullb_cmd *cmd)
421{
422 put_tag(cmd->nq, cmd->tag);
423}
424
Paolo Valente3c395a92015-12-01 11:48:17 +0100425static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
426
Jens Axboef2298c02013-10-25 11:52:25 +0100427static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
428{
429 struct nullb_cmd *cmd;
430 unsigned int tag;
431
432 tag = get_tag(nq);
433 if (tag != -1U) {
434 cmd = &nq->cmds[tag];
435 cmd->tag = tag;
436 cmd->nq = nq;
Shaohua Li2984c862017-08-14 15:04:52 -0700437 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +0100438 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
439 HRTIMER_MODE_REL);
440 cmd->timer.function = null_cmd_timer_expired;
441 }
Jens Axboef2298c02013-10-25 11:52:25 +0100442 return cmd;
443 }
444
445 return NULL;
446}
447
448static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
449{
450 struct nullb_cmd *cmd;
451 DEFINE_WAIT(wait);
452
453 cmd = __alloc_cmd(nq);
454 if (cmd || !can_wait)
455 return cmd;
456
457 do {
458 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
459 cmd = __alloc_cmd(nq);
460 if (cmd)
461 break;
462
463 io_schedule();
464 } while (1);
465
466 finish_wait(&nq->wait, &wait);
467 return cmd;
468}
469
470static void end_cmd(struct nullb_cmd *cmd)
471{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100472 struct request_queue *q = NULL;
Shaohua Li2984c862017-08-14 15:04:52 -0700473 int queue_mode = cmd->nq->dev->queue_mode;
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100474
Mike Krinkine8271202015-12-15 12:56:40 +0300475 if (cmd->rq)
476 q = cmd->rq->q;
477
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800478 switch (queue_mode) {
479 case NULL_Q_MQ:
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200480 blk_mq_end_request(cmd->rq, BLK_STS_OK);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800481 return;
482 case NULL_Q_RQ:
483 INIT_LIST_HEAD(&cmd->rq->queuelist);
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200484 blk_end_request_all(cmd->rq, BLK_STS_OK);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800485 break;
486 case NULL_Q_BIO:
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200487 bio_endio(cmd->bio);
Jens Axboe48cc6612015-12-28 13:02:47 -0700488 break;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800489 }
Jens Axboef2298c02013-10-25 11:52:25 +0100490
Jens Axboe48cc6612015-12-28 13:02:47 -0700491 free_cmd(cmd);
492
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100493 /* Restart queue if needed, as we are freeing a tag */
Jens Axboe48cc6612015-12-28 13:02:47 -0700494 if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100495 unsigned long flags;
496
497 spin_lock_irqsave(q->queue_lock, flags);
Jens Axboe48cc6612015-12-28 13:02:47 -0700498 blk_start_queue_async(q);
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100499 spin_unlock_irqrestore(q->queue_lock, flags);
500 }
Jens Axboef2298c02013-10-25 11:52:25 +0100501}
502
503static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
504{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100505 end_cmd(container_of(timer, struct nullb_cmd, timer));
Jens Axboef2298c02013-10-25 11:52:25 +0100506
507 return HRTIMER_NORESTART;
508}
509
510static void null_cmd_end_timer(struct nullb_cmd *cmd)
511{
Shaohua Li2984c862017-08-14 15:04:52 -0700512 ktime_t kt = cmd->nq->dev->completion_nsec;
Jens Axboef2298c02013-10-25 11:52:25 +0100513
Paolo Valente3c395a92015-12-01 11:48:17 +0100514 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +0100515}
516
517static void null_softirq_done_fn(struct request *rq)
518{
Shaohua Li2984c862017-08-14 15:04:52 -0700519 struct nullb *nullb = rq->q->queuedata;
520
521 if (nullb->dev->queue_mode == NULL_Q_MQ)
Jens Axboed891fa72014-06-16 11:40:25 -0600522 end_cmd(blk_mq_rq_to_pdu(rq));
523 else
524 end_cmd(rq->special);
Jens Axboef2298c02013-10-25 11:52:25 +0100525}
526
Jens Axboef2298c02013-10-25 11:52:25 +0100527static inline void null_handle_cmd(struct nullb_cmd *cmd)
528{
529 /* Complete IO by inline, softirq or timer */
Shaohua Li2984c862017-08-14 15:04:52 -0700530 switch (cmd->nq->dev->irqmode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800531 case NULL_IRQ_SOFTIRQ:
Shaohua Li2984c862017-08-14 15:04:52 -0700532 switch (cmd->nq->dev->queue_mode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800533 case NULL_Q_MQ:
Christoph Hellwig08e00292017-04-20 16:03:09 +0200534 blk_mq_complete_request(cmd->rq);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800535 break;
536 case NULL_Q_RQ:
537 blk_complete_request(cmd->rq);
538 break;
539 case NULL_Q_BIO:
540 /*
541 * XXX: no proper submitting cpu information available.
542 */
543 end_cmd(cmd);
544 break;
545 }
546 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100547 case NULL_IRQ_NONE:
548 end_cmd(cmd);
549 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100550 case NULL_IRQ_TIMER:
551 null_cmd_end_timer(cmd);
552 break;
553 }
554}
555
556static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
557{
558 int index = 0;
559
560 if (nullb->nr_queues != 1)
561 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
562
563 return &nullb->queues[index];
564}
565
Jens Axboedece1632015-11-05 10:41:16 -0700566static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +0100567{
568 struct nullb *nullb = q->queuedata;
569 struct nullb_queue *nq = nullb_to_queue(nullb);
570 struct nullb_cmd *cmd;
571
572 cmd = alloc_cmd(nq, 1);
573 cmd->bio = bio;
574
575 null_handle_cmd(cmd);
Jens Axboedece1632015-11-05 10:41:16 -0700576 return BLK_QC_T_NONE;
Jens Axboef2298c02013-10-25 11:52:25 +0100577}
578
579static int null_rq_prep_fn(struct request_queue *q, struct request *req)
580{
581 struct nullb *nullb = q->queuedata;
582 struct nullb_queue *nq = nullb_to_queue(nullb);
583 struct nullb_cmd *cmd;
584
585 cmd = alloc_cmd(nq, 0);
586 if (cmd) {
587 cmd->rq = req;
588 req->special = cmd;
589 return BLKPREP_OK;
590 }
Akinobu Mita8b70f452015-06-02 08:35:10 +0900591 blk_stop_queue(q);
Jens Axboef2298c02013-10-25 11:52:25 +0100592
593 return BLKPREP_DEFER;
594}
595
596static void null_request_fn(struct request_queue *q)
597{
598 struct request *rq;
599
600 while ((rq = blk_fetch_request(q)) != NULL) {
601 struct nullb_cmd *cmd = rq->special;
602
603 spin_unlock_irq(q->queue_lock);
604 null_handle_cmd(cmd);
605 spin_lock_irq(q->queue_lock);
606 }
607}
608
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200609static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
Jens Axboe74c45052014-10-29 11:14:52 -0600610 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +0100611{
Jens Axboe74c45052014-10-29 11:14:52 -0600612 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Shaohua Li2984c862017-08-14 15:04:52 -0700613 struct nullb_queue *nq = hctx->driver_data;
Jens Axboef2298c02013-10-25 11:52:25 +0100614
Jens Axboedb5bcf82017-03-30 13:44:26 -0600615 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
616
Shaohua Li2984c862017-08-14 15:04:52 -0700617 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +0100618 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
619 cmd->timer.function = null_cmd_timer_expired;
620 }
Jens Axboe74c45052014-10-29 11:14:52 -0600621 cmd->rq = bd->rq;
Shaohua Li2984c862017-08-14 15:04:52 -0700622 cmd->nq = nq;
Jens Axboef2298c02013-10-25 11:52:25 +0100623
Jens Axboe74c45052014-10-29 11:14:52 -0600624 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -0700625
Jens Axboef2298c02013-10-25 11:52:25 +0100626 null_handle_cmd(cmd);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200627 return BLK_STS_OK;
Jens Axboef2298c02013-10-25 11:52:25 +0100628}
629
Eric Biggersf363b082017-03-30 13:39:16 -0700630static const struct blk_mq_ops null_mq_ops = {
Jens Axboef2298c02013-10-25 11:52:25 +0100631 .queue_rq = null_queue_rq,
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800632 .complete = null_softirq_done_fn,
Jens Axboef2298c02013-10-25 11:52:25 +0100633};
634
Matias Bjørlingde65d2d2015-08-31 14:17:18 +0200635static void cleanup_queue(struct nullb_queue *nq)
636{
637 kfree(nq->tag_map);
638 kfree(nq->cmds);
639}
640
641static void cleanup_queues(struct nullb *nullb)
642{
643 int i;
644
645 for (i = 0; i < nullb->nr_queues; i++)
646 cleanup_queue(&nullb->queues[i]);
647
648 kfree(nullb->queues);
649}
650
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100651#ifdef CONFIG_NVM
652
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200653static void null_lnvm_end_io(struct request *rq, blk_status_t status)
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100654{
655 struct nvm_rq *rqd = rq->end_io_data;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100656
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200657 /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
658 rqd->error = status ? -EIO : 0;
Matias Bjørling06894ef2017-01-31 13:17:17 +0100659 nvm_end_io(rqd);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100660
661 blk_put_request(rq);
662}
663
Matias Bjørling16f26c32015-12-06 11:25:48 +0100664static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100665{
Matias Bjørling16f26c32015-12-06 11:25:48 +0100666 struct request_queue *q = dev->q;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100667 struct request *rq;
668 struct bio *bio = rqd->bio;
669
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100670 rq = blk_mq_alloc_request(q,
671 op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100672 if (IS_ERR(rq))
673 return -ENOMEM;
674
Bart Van Assche2644a3c2017-04-19 14:01:25 -0700675 blk_init_request_from_bio(rq, bio);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100676
677 rq->end_io_data = rqd;
678
679 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
680
681 return 0;
682}
683
Matias Bjørling16f26c32015-12-06 11:25:48 +0100684static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100685{
Shaohua Li2984c862017-08-14 15:04:52 -0700686 struct nullb *nullb = dev->q->queuedata;
687 sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100688 sector_t blksize;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100689 struct nvm_id_group *grp;
690
691 id->ver_id = 0x1;
692 id->vmnt = 0;
Matias Bjørlingbf643182016-02-04 15:13:27 +0100693 id->cap = 0x2;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100694 id->dom = 0x1;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100695
696 id->ppaf.blk_offset = 0;
697 id->ppaf.blk_len = 16;
698 id->ppaf.pg_offset = 16;
699 id->ppaf.pg_len = 16;
700 id->ppaf.sect_offset = 32;
701 id->ppaf.sect_len = 8;
702 id->ppaf.pln_offset = 40;
703 id->ppaf.pln_len = 8;
704 id->ppaf.lun_offset = 48;
705 id->ppaf.lun_len = 8;
706 id->ppaf.ch_offset = 56;
707 id->ppaf.ch_len = 8;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100708
Shaohua Li2984c862017-08-14 15:04:52 -0700709 sector_div(size, nullb->dev->blocksize); /* convert size to pages */
Arnd Bergmanne93d12a2016-01-13 23:04:08 +0100710 size >>= 8; /* concert size to pgs pr blk */
Matias Bjørling19bd6fe2017-01-31 13:17:15 +0100711 grp = &id->grp;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100712 grp->mtype = 0;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100713 grp->fmtype = 0;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100714 grp->num_ch = 1;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100715 grp->num_pg = 256;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100716 blksize = size;
Arnd Bergmanne93d12a2016-01-13 23:04:08 +0100717 size >>= 16;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100718 grp->num_lun = size + 1;
Arnd Bergmanne93d12a2016-01-13 23:04:08 +0100719 sector_div(blksize, grp->num_lun);
Matias Bjørling5b40db92015-11-19 12:50:09 +0100720 grp->num_blk = blksize;
721 grp->num_pln = 1;
722
Shaohua Li2984c862017-08-14 15:04:52 -0700723 grp->fpg_sz = nullb->dev->blocksize;
724 grp->csecs = nullb->dev->blocksize;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100725 grp->trdt = 25000;
726 grp->trdm = 25000;
727 grp->tprt = 500000;
728 grp->tprm = 500000;
729 grp->tbet = 1500000;
730 grp->tbem = 1500000;
731 grp->mpos = 0x010101; /* single plane rwe */
Shaohua Li2984c862017-08-14 15:04:52 -0700732 grp->cpar = nullb->dev->hw_queue_depth;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100733
734 return 0;
735}
736
Matias Bjørling16f26c32015-12-06 11:25:48 +0100737static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100738{
739 mempool_t *virtmem_pool;
740
Matias Bjørling6bb95352015-11-19 12:50:08 +0100741 virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100742 if (!virtmem_pool) {
743 pr_err("null_blk: Unable to create virtual memory pool\n");
744 return NULL;
745 }
746
747 return virtmem_pool;
748}
749
750static void null_lnvm_destroy_dma_pool(void *pool)
751{
752 mempool_destroy(pool);
753}
754
Matias Bjørling16f26c32015-12-06 11:25:48 +0100755static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100756 gfp_t mem_flags, dma_addr_t *dma_handler)
757{
758 return mempool_alloc(pool, mem_flags);
759}
760
761static void null_lnvm_dev_dma_free(void *pool, void *entry,
762 dma_addr_t dma_handler)
763{
764 mempool_free(entry, pool);
765}
766
767static struct nvm_dev_ops null_lnvm_dev_ops = {
768 .identity = null_lnvm_id,
769 .submit_io = null_lnvm_submit_io,
770
771 .create_dma_pool = null_lnvm_create_dma_pool,
772 .destroy_dma_pool = null_lnvm_destroy_dma_pool,
773 .dev_dma_alloc = null_lnvm_dev_dma_alloc,
774 .dev_dma_free = null_lnvm_dev_dma_free,
775
776 /* Simulate nvme protocol restriction */
777 .max_phys_sect = 64,
778};
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200779
780static int null_nvm_register(struct nullb *nullb)
781{
Matias Bjørlingb0b4e092016-09-16 14:25:07 +0200782 struct nvm_dev *dev;
783 int rv;
784
785 dev = nvm_alloc_dev(0);
786 if (!dev)
787 return -ENOMEM;
788
789 dev->q = nullb->q;
790 memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN);
791 dev->ops = &null_lnvm_dev_ops;
792
793 rv = nvm_register(dev);
794 if (rv) {
795 kfree(dev);
796 return rv;
797 }
798 nullb->ndev = dev;
799 return 0;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200800}
801
802static void null_nvm_unregister(struct nullb *nullb)
803{
Matias Bjørlingb0b4e092016-09-16 14:25:07 +0200804 nvm_unregister(nullb->ndev);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200805}
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100806#else
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200807static int null_nvm_register(struct nullb *nullb)
808{
Yasuaki Ishimatsu92153d32016-11-16 08:26:11 -0700809 pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200810 return -EINVAL;
811}
812static void null_nvm_unregister(struct nullb *nullb) {}
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100813#endif /* CONFIG_NVM */
814
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200815static void null_del_dev(struct nullb *nullb)
816{
Shaohua Li2984c862017-08-14 15:04:52 -0700817 struct nullb_device *dev = nullb->dev;
818
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200819 list_del_init(&nullb->list);
820
Shaohua Li2984c862017-08-14 15:04:52 -0700821 if (dev->use_lightnvm)
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200822 null_nvm_unregister(nullb);
823 else
824 del_gendisk(nullb->disk);
825 blk_cleanup_queue(nullb->q);
Shaohua Li2984c862017-08-14 15:04:52 -0700826 if (dev->queue_mode == NULL_Q_MQ &&
827 nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -0600828 blk_mq_free_tag_set(nullb->tag_set);
Shaohua Li2984c862017-08-14 15:04:52 -0700829 if (!dev->use_lightnvm)
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200830 put_disk(nullb->disk);
831 cleanup_queues(nullb);
832 kfree(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -0700833 dev->nullb = NULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200834}
835
Jens Axboef2298c02013-10-25 11:52:25 +0100836static int null_open(struct block_device *bdev, fmode_t mode)
837{
838 return 0;
839}
840
841static void null_release(struct gendisk *disk, fmode_t mode)
842{
843}
844
845static const struct block_device_operations null_fops = {
846 .owner = THIS_MODULE,
847 .open = null_open,
848 .release = null_release,
849};
850
Jens Axboe82f402f2017-06-20 14:22:01 -0600851static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
852{
853 BUG_ON(!nullb);
854 BUG_ON(!nq);
855
856 init_waitqueue_head(&nq->wait);
857 nq->queue_depth = nullb->queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -0700858 nq->dev = nullb->dev;
Jens Axboe82f402f2017-06-20 14:22:01 -0600859}
860
861static void null_init_queues(struct nullb *nullb)
862{
863 struct request_queue *q = nullb->q;
864 struct blk_mq_hw_ctx *hctx;
865 struct nullb_queue *nq;
866 int i;
867
868 queue_for_each_hw_ctx(q, hctx, i) {
869 if (!hctx->nr_ctx || !hctx->tags)
870 continue;
871 nq = &nullb->queues[i];
872 hctx->driver_data = nq;
873 null_init_queue(nullb, nq);
874 nullb->nr_queues++;
875 }
876}
877
Jens Axboef2298c02013-10-25 11:52:25 +0100878static int setup_commands(struct nullb_queue *nq)
879{
880 struct nullb_cmd *cmd;
881 int i, tag_size;
882
883 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
884 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100885 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100886
887 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
888 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
889 if (!nq->tag_map) {
890 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +0100891 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100892 }
893
894 for (i = 0; i < nq->queue_depth; i++) {
895 cmd = &nq->cmds[i];
896 INIT_LIST_HEAD(&cmd->list);
897 cmd->ll_list.next = NULL;
898 cmd->tag = -1U;
899 }
900
901 return 0;
902}
903
Jens Axboef2298c02013-10-25 11:52:25 +0100904static int setup_queues(struct nullb *nullb)
905{
Shaohua Li2984c862017-08-14 15:04:52 -0700906 nullb->queues = kzalloc(nullb->dev->submit_queues *
907 sizeof(struct nullb_queue), GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +0100908 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100909 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100910
911 nullb->nr_queues = 0;
Shaohua Li2984c862017-08-14 15:04:52 -0700912 nullb->queue_depth = nullb->dev->hw_queue_depth;
Jens Axboef2298c02013-10-25 11:52:25 +0100913
Matias Bjorling2d263a782013-12-18 13:41:43 +0100914 return 0;
915}
916
917static int init_driver_queues(struct nullb *nullb)
918{
919 struct nullb_queue *nq;
920 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100921
Shaohua Li2984c862017-08-14 15:04:52 -0700922 for (i = 0; i < nullb->dev->submit_queues; i++) {
Jens Axboef2298c02013-10-25 11:52:25 +0100923 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +0100924
925 null_init_queue(nullb, nq);
926
927 ret = setup_commands(nq);
928 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +0200929 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +0100930 nullb->nr_queues++;
931 }
Matias Bjorling2d263a782013-12-18 13:41:43 +0100932 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100933}
934
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200935static int null_gendisk_register(struct nullb *nullb)
Jens Axboef2298c02013-10-25 11:52:25 +0100936{
937 struct gendisk *disk;
Jens Axboef2298c02013-10-25 11:52:25 +0100938 sector_t size;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200939
Shaohua Li2984c862017-08-14 15:04:52 -0700940 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200941 if (!disk)
942 return -ENOMEM;
Shaohua Li2984c862017-08-14 15:04:52 -0700943 size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200944 set_capacity(disk, size >> 9);
945
946 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
947 disk->major = null_major;
948 disk->first_minor = nullb->index;
949 disk->fops = &null_fops;
950 disk->private_data = nullb;
951 disk->queue = nullb->q;
952 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
953
954 add_disk(disk);
955 return 0;
956}
957
Shaohua Li2984c862017-08-14 15:04:52 -0700958static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
Jens Axboe82f402f2017-06-20 14:22:01 -0600959{
960 set->ops = &null_mq_ops;
Shaohua Li2984c862017-08-14 15:04:52 -0700961 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
962 g_submit_queues;
963 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
964 g_hw_queue_depth;
965 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
Jens Axboe82f402f2017-06-20 14:22:01 -0600966 set->cmd_size = sizeof(struct nullb_cmd);
967 set->flags = BLK_MQ_F_SHOULD_MERGE;
968 set->driver_data = NULL;
969
Shaohua Li2984c862017-08-14 15:04:52 -0700970 if (nullb->dev->blocking)
Jens Axboe82f402f2017-06-20 14:22:01 -0600971 set->flags |= BLK_MQ_F_BLOCKING;
972
973 return blk_mq_alloc_tag_set(set);
974}
975
Shaohua Li2984c862017-08-14 15:04:52 -0700976static int null_add_dev(struct nullb_device *dev)
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200977{
978 struct nullb *nullb;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500979 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +0100980
Shaohua Li2984c862017-08-14 15:04:52 -0700981 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500982 if (!nullb) {
983 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600984 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500985 }
Shaohua Li2984c862017-08-14 15:04:52 -0700986 nullb->dev = dev;
987 dev->nullb = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100988
989 spin_lock_init(&nullb->lock);
990
Robert Elliottdc501dc2014-09-02 11:38:49 -0500991 rv = setup_queues(nullb);
992 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600993 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100994
Shaohua Li2984c862017-08-14 15:04:52 -0700995 if (dev->queue_mode == NULL_Q_MQ) {
Jens Axboe82f402f2017-06-20 14:22:01 -0600996 if (shared_tags) {
997 nullb->tag_set = &tag_set;
998 rv = 0;
999 } else {
1000 nullb->tag_set = &nullb->__tag_set;
Shaohua Li2984c862017-08-14 15:04:52 -07001001 rv = null_init_tag_set(nullb, nullb->tag_set);
Jens Axboe82f402f2017-06-20 14:22:01 -06001002 }
Jens Axboef2298c02013-10-25 11:52:25 +01001003
Robert Elliottdc501dc2014-09-02 11:38:49 -05001004 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001005 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +01001006
Jens Axboe82f402f2017-06-20 14:22:01 -06001007 nullb->q = blk_mq_init_queue(nullb->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +00001008 if (IS_ERR(nullb->q)) {
Robert Elliottdc501dc2014-09-02 11:38:49 -05001009 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001010 goto out_cleanup_tags;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001011 }
Jens Axboe82f402f2017-06-20 14:22:01 -06001012 null_init_queues(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001013 } else if (dev->queue_mode == NULL_Q_BIO) {
1014 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001015 if (!nullb->q) {
1016 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001017 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001018 }
Jens Axboef2298c02013-10-25 11:52:25 +01001019 blk_queue_make_request(nullb->q, null_queue_bio);
Jan Kara31f96902014-10-22 15:34:21 +02001020 rv = init_driver_queues(nullb);
1021 if (rv)
1022 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001023 } else {
Shaohua Li2984c862017-08-14 15:04:52 -07001024 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock,
1025 dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001026 if (!nullb->q) {
1027 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001028 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001029 }
Jens Axboef2298c02013-10-25 11:52:25 +01001030 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001031 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
Jan Kara31f96902014-10-22 15:34:21 +02001032 rv = init_driver_queues(nullb);
1033 if (rv)
1034 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001035 }
1036
Jens Axboef2298c02013-10-25 11:52:25 +01001037 nullb->q->queuedata = nullb;
1038 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
Mike Snitzerb277da02014-10-04 10:55:32 -06001039 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +01001040
Jens Axboef2298c02013-10-25 11:52:25 +01001041 mutex_lock(&lock);
Jens Axboef2298c02013-10-25 11:52:25 +01001042 nullb->index = nullb_indexes++;
1043 mutex_unlock(&lock);
1044
Shaohua Li2984c862017-08-14 15:04:52 -07001045 blk_queue_logical_block_size(nullb->q, dev->blocksize);
1046 blk_queue_physical_block_size(nullb->q, dev->blocksize);
Jens Axboef2298c02013-10-25 11:52:25 +01001047
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001048 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1049
Shaohua Li2984c862017-08-14 15:04:52 -07001050 if (dev->use_lightnvm)
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001051 rv = null_nvm_register(nullb);
1052 else
1053 rv = null_gendisk_register(nullb);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001054
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001055 if (rv)
1056 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001057
Matias Bjørlinga5143792016-02-11 14:49:13 +01001058 mutex_lock(&lock);
1059 list_add_tail(&nullb->list, &nullb_list);
1060 mutex_unlock(&lock);
Wenwei Tao3681c85d2016-03-05 00:27:04 +08001061
Jens Axboef2298c02013-10-25 11:52:25 +01001062 return 0;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001063out_cleanup_blk_queue:
1064 blk_cleanup_queue(nullb->q);
1065out_cleanup_tags:
Shaohua Li2984c862017-08-14 15:04:52 -07001066 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001067 blk_mq_free_tag_set(nullb->tag_set);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001068out_cleanup_queues:
1069 cleanup_queues(nullb);
1070out_free_nullb:
1071 kfree(nullb);
1072out:
Shaohua Li2984c862017-08-14 15:04:52 -07001073 null_free_dev(dev);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001074 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +01001075}
1076
1077static int __init null_init(void)
1078{
Minfei Huangaf096e22015-12-08 13:47:34 -07001079 int ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001080 unsigned int i;
Minfei Huangaf096e22015-12-08 13:47:34 -07001081 struct nullb *nullb;
Shaohua Li2984c862017-08-14 15:04:52 -07001082 struct nullb_device *dev;
Jens Axboef2298c02013-10-25 11:52:25 +01001083
Shaohua Li2984c862017-08-14 15:04:52 -07001084 if (g_bs > PAGE_SIZE) {
Raghavendra K T9967d8a2014-01-21 16:59:59 +05301085 pr_warn("null_blk: invalid block size\n");
1086 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
Shaohua Li2984c862017-08-14 15:04:52 -07001087 g_bs = PAGE_SIZE;
Raghavendra K T9967d8a2014-01-21 16:59:59 +05301088 }
Jens Axboef2298c02013-10-25 11:52:25 +01001089
Shaohua Li2984c862017-08-14 15:04:52 -07001090 if (g_use_lightnvm && g_bs != 4096) {
Matias Bjørling6bb95352015-11-19 12:50:08 +01001091 pr_warn("null_blk: LightNVM only supports 4k block size\n");
1092 pr_warn("null_blk: defaults block size to 4k\n");
Shaohua Li2984c862017-08-14 15:04:52 -07001093 g_bs = 4096;
Matias Bjørling6bb95352015-11-19 12:50:08 +01001094 }
1095
Shaohua Li2984c862017-08-14 15:04:52 -07001096 if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) {
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001097 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
1098 pr_warn("null_blk: defaults queue mode to blk-mq\n");
Shaohua Li2984c862017-08-14 15:04:52 -07001099 g_queue_mode = NULL_Q_MQ;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001100 }
1101
Shaohua Li2984c862017-08-14 15:04:52 -07001102 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
1103 if (g_submit_queues != nr_online_nodes) {
weiping zhang558ab3002017-08-03 00:26:39 +08001104 pr_warn("null_blk: submit_queues param is set to %u.\n",
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +01001105 nr_online_nodes);
Shaohua Li2984c862017-08-14 15:04:52 -07001106 g_submit_queues = nr_online_nodes;
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01001107 }
Shaohua Li2984c862017-08-14 15:04:52 -07001108 } else if (g_submit_queues > nr_cpu_ids)
1109 g_submit_queues = nr_cpu_ids;
1110 else if (g_submit_queues <= 0)
1111 g_submit_queues = 1;
Jens Axboef2298c02013-10-25 11:52:25 +01001112
Shaohua Li2984c862017-08-14 15:04:52 -07001113 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
1114 ret = null_init_tag_set(NULL, &tag_set);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001115 if (ret)
1116 return ret;
1117 }
1118
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001119 config_group_init(&nullb_subsys.su_group);
1120 mutex_init(&nullb_subsys.su_mutex);
1121
1122 ret = configfs_register_subsystem(&nullb_subsys);
1123 if (ret)
1124 goto err_tagset;
1125
Jens Axboef2298c02013-10-25 11:52:25 +01001126 mutex_init(&lock);
1127
Jens Axboef2298c02013-10-25 11:52:25 +01001128 null_major = register_blkdev(0, "nullb");
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001129 if (null_major < 0) {
1130 ret = null_major;
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001131 goto err_conf;
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001132 }
Jens Axboef2298c02013-10-25 11:52:25 +01001133
Shaohua Li2984c862017-08-14 15:04:52 -07001134 if (g_use_lightnvm) {
Matias Bjørling6bb95352015-11-19 12:50:08 +01001135 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
1136 0, 0, NULL);
1137 if (!ppa_cache) {
1138 pr_err("null_blk: unable to create ppa cache\n");
Minfei Huangaf096e22015-12-08 13:47:34 -07001139 ret = -ENOMEM;
Matias Bjørling6bb95352015-11-19 12:50:08 +01001140 goto err_ppa;
Jens Axboef2298c02013-10-25 11:52:25 +01001141 }
1142 }
1143
Minfei Huangaf096e22015-12-08 13:47:34 -07001144 for (i = 0; i < nr_devices; i++) {
Shaohua Li2984c862017-08-14 15:04:52 -07001145 dev = null_alloc_dev();
1146 if (!dev)
Minfei Huangaf096e22015-12-08 13:47:34 -07001147 goto err_dev;
Shaohua Li2984c862017-08-14 15:04:52 -07001148 ret = null_add_dev(dev);
1149 if (ret) {
1150 null_free_dev(dev);
1151 goto err_dev;
1152 }
Minfei Huangaf096e22015-12-08 13:47:34 -07001153 }
1154
Jens Axboef2298c02013-10-25 11:52:25 +01001155 pr_info("null: module loaded\n");
1156 return 0;
Minfei Huangaf096e22015-12-08 13:47:34 -07001157
1158err_dev:
1159 while (!list_empty(&nullb_list)) {
1160 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07001161 dev = nullb->dev;
Minfei Huangaf096e22015-12-08 13:47:34 -07001162 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001163 null_free_dev(dev);
Minfei Huangaf096e22015-12-08 13:47:34 -07001164 }
Matias Bjørling6bb95352015-11-19 12:50:08 +01001165 kmem_cache_destroy(ppa_cache);
Minfei Huangaf096e22015-12-08 13:47:34 -07001166err_ppa:
1167 unregister_blkdev(null_major, "nullb");
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001168err_conf:
1169 configfs_unregister_subsystem(&nullb_subsys);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001170err_tagset:
Shaohua Li2984c862017-08-14 15:04:52 -07001171 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001172 blk_mq_free_tag_set(&tag_set);
Minfei Huangaf096e22015-12-08 13:47:34 -07001173 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01001174}
1175
1176static void __exit null_exit(void)
1177{
1178 struct nullb *nullb;
1179
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001180 configfs_unregister_subsystem(&nullb_subsys);
1181
Jens Axboef2298c02013-10-25 11:52:25 +01001182 unregister_blkdev(null_major, "nullb");
1183
1184 mutex_lock(&lock);
1185 while (!list_empty(&nullb_list)) {
Shaohua Li2984c862017-08-14 15:04:52 -07001186 struct nullb_device *dev;
1187
Jens Axboef2298c02013-10-25 11:52:25 +01001188 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07001189 dev = nullb->dev;
Jens Axboef2298c02013-10-25 11:52:25 +01001190 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001191 null_free_dev(dev);
Jens Axboef2298c02013-10-25 11:52:25 +01001192 }
1193 mutex_unlock(&lock);
Matias Bjørling6bb95352015-11-19 12:50:08 +01001194
Shaohua Li2984c862017-08-14 15:04:52 -07001195 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Jens Axboe82f402f2017-06-20 14:22:01 -06001196 blk_mq_free_tag_set(&tag_set);
1197
Matias Bjørling6bb95352015-11-19 12:50:08 +01001198 kmem_cache_destroy(ppa_cache);
Jens Axboef2298c02013-10-25 11:52:25 +01001199}
1200
1201module_init(null_init);
1202module_exit(null_exit);
1203
1204MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
1205MODULE_LICENSE("GPL");