blob: 08932f5ea9f31aae97ede5934617a6cf109cecdb [file] [log] [blame]
Jens Axboef2298c02013-10-25 11:52:25 +01001#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01002
Jens Axboef2298c02013-10-25 11:52:25 +01003#include <linux/moduleparam.h>
4#include <linux/sched.h>
5#include <linux/fs.h>
6#include <linux/blkdev.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/blk-mq.h>
10#include <linux/hrtimer.h>
Matias Bjørlingb2b7e002015-11-12 20:25:10 +010011#include <linux/lightnvm.h>
Jens Axboef2298c02013-10-25 11:52:25 +010012
13struct nullb_cmd {
14 struct list_head list;
15 struct llist_node ll_list;
16 struct call_single_data csd;
17 struct request *rq;
18 struct bio *bio;
19 unsigned int tag;
20 struct nullb_queue *nq;
Paolo Valente3c395a92015-12-01 11:48:17 +010021 struct hrtimer timer;
Jens Axboef2298c02013-10-25 11:52:25 +010022};
23
24struct nullb_queue {
25 unsigned long *tag_map;
26 wait_queue_head_t wait;
27 unsigned int queue_depth;
28
29 struct nullb_cmd *cmds;
30};
31
32struct nullb {
33 struct list_head list;
34 unsigned int index;
35 struct request_queue *q;
36 struct gendisk *disk;
Christoph Hellwig24d2f902014-04-15 14:14:00 -060037 struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +010038 struct hrtimer timer;
39 unsigned int queue_depth;
40 spinlock_t lock;
41
42 struct nullb_queue *queues;
43 unsigned int nr_queues;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +010044 char disk_name[DISK_NAME_LEN];
Jens Axboef2298c02013-10-25 11:52:25 +010045};
46
47static LIST_HEAD(nullb_list);
48static struct mutex lock;
49static int null_major;
50static int nullb_indexes;
Matias Bjørling6bb95352015-11-19 12:50:08 +010051static struct kmem_cache *ppa_cache;
Jens Axboef2298c02013-10-25 11:52:25 +010052
Jens Axboef2298c02013-10-25 11:52:25 +010053enum {
54 NULL_IRQ_NONE = 0,
55 NULL_IRQ_SOFTIRQ = 1,
56 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -080057};
Jens Axboef2298c02013-10-25 11:52:25 +010058
Christoph Hellwigce2c3502014-02-10 03:24:40 -080059enum {
Jens Axboef2298c02013-10-25 11:52:25 +010060 NULL_Q_BIO = 0,
61 NULL_Q_RQ = 1,
62 NULL_Q_MQ = 2,
63};
64
Matias Bjorling2d263a782013-12-18 13:41:43 +010065static int submit_queues;
Jens Axboef2298c02013-10-25 11:52:25 +010066module_param(submit_queues, int, S_IRUGO);
67MODULE_PARM_DESC(submit_queues, "Number of submission queues");
68
69static int home_node = NUMA_NO_NODE;
70module_param(home_node, int, S_IRUGO);
71MODULE_PARM_DESC(home_node, "Home node for the device");
72
73static int queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -070074
75static int null_param_store_val(const char *str, int *val, int min, int max)
76{
77 int ret, new_val;
78
79 ret = kstrtoint(str, 10, &new_val);
80 if (ret)
81 return -EINVAL;
82
83 if (new_val < min || new_val > max)
84 return -EINVAL;
85
86 *val = new_val;
87 return 0;
88}
89
90static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
91{
92 return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
93}
94
Luis R. Rodriguez9c278472015-05-27 11:09:38 +093095static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -070096 .set = null_set_queue_mode,
97 .get = param_get_int,
98};
99
100device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400101MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100102
103static int gb = 250;
104module_param(gb, int, S_IRUGO);
105MODULE_PARM_DESC(gb, "Size in GB");
106
107static int bs = 512;
108module_param(bs, int, S_IRUGO);
109MODULE_PARM_DESC(bs, "Block size (in bytes)");
110
111static int nr_devices = 2;
112module_param(nr_devices, int, S_IRUGO);
113MODULE_PARM_DESC(nr_devices, "Number of devices to register");
114
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100115static bool use_lightnvm;
116module_param(use_lightnvm, bool, S_IRUGO);
117MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
118
Jens Axboef2298c02013-10-25 11:52:25 +0100119static int irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700120
121static int null_set_irqmode(const char *str, const struct kernel_param *kp)
122{
123 return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
124 NULL_IRQ_TIMER);
125}
126
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930127static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700128 .set = null_set_irqmode,
129 .get = param_get_int,
130};
131
132device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100133MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
134
135static int completion_nsec = 10000;
136module_param(completion_nsec, int, S_IRUGO);
137MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
138
139static int hw_queue_depth = 64;
140module_param(hw_queue_depth, int, S_IRUGO);
141MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
142
Matias Bjørling20005242013-12-21 00:11:00 +0100143static bool use_per_node_hctx = false;
Jens Axboef2298c02013-10-25 11:52:25 +0100144module_param(use_per_node_hctx, bool, S_IRUGO);
Matias Bjørling20005242013-12-21 00:11:00 +0100145MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100146
147static void put_tag(struct nullb_queue *nq, unsigned int tag)
148{
149 clear_bit_unlock(tag, nq->tag_map);
150
151 if (waitqueue_active(&nq->wait))
152 wake_up(&nq->wait);
153}
154
155static unsigned int get_tag(struct nullb_queue *nq)
156{
157 unsigned int tag;
158
159 do {
160 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
161 if (tag >= nq->queue_depth)
162 return -1U;
163 } while (test_and_set_bit_lock(tag, nq->tag_map));
164
165 return tag;
166}
167
168static void free_cmd(struct nullb_cmd *cmd)
169{
170 put_tag(cmd->nq, cmd->tag);
171}
172
Paolo Valente3c395a92015-12-01 11:48:17 +0100173static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
174
Jens Axboef2298c02013-10-25 11:52:25 +0100175static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
176{
177 struct nullb_cmd *cmd;
178 unsigned int tag;
179
180 tag = get_tag(nq);
181 if (tag != -1U) {
182 cmd = &nq->cmds[tag];
183 cmd->tag = tag;
184 cmd->nq = nq;
Paolo Valente3c395a92015-12-01 11:48:17 +0100185 if (irqmode == NULL_IRQ_TIMER) {
186 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
187 HRTIMER_MODE_REL);
188 cmd->timer.function = null_cmd_timer_expired;
189 }
Jens Axboef2298c02013-10-25 11:52:25 +0100190 return cmd;
191 }
192
193 return NULL;
194}
195
196static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
197{
198 struct nullb_cmd *cmd;
199 DEFINE_WAIT(wait);
200
201 cmd = __alloc_cmd(nq);
202 if (cmd || !can_wait)
203 return cmd;
204
205 do {
206 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
207 cmd = __alloc_cmd(nq);
208 if (cmd)
209 break;
210
211 io_schedule();
212 } while (1);
213
214 finish_wait(&nq->wait, &wait);
215 return cmd;
216}
217
218static void end_cmd(struct nullb_cmd *cmd)
219{
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800220 switch (queue_mode) {
221 case NULL_Q_MQ:
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700222 blk_mq_end_request(cmd->rq, 0);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800223 return;
224 case NULL_Q_RQ:
225 INIT_LIST_HEAD(&cmd->rq->queuelist);
226 blk_end_request_all(cmd->rq, 0);
227 break;
228 case NULL_Q_BIO:
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200229 bio_endio(cmd->bio);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800230 break;
231 }
Jens Axboef2298c02013-10-25 11:52:25 +0100232
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800233 free_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +0100234}
235
236static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
237{
Paolo Valente3c395a92015-12-01 11:48:17 +0100238 struct nullb_cmd *cmd = container_of(timer, struct nullb_cmd, timer);
239 struct request_queue *q = NULL;
Jens Axboef2298c02013-10-25 11:52:25 +0100240
Paolo Valente3c395a92015-12-01 11:48:17 +0100241 if (cmd->rq)
242 q = cmd->rq->q;
Jens Axboef2298c02013-10-25 11:52:25 +0100243
Paolo Valente3c395a92015-12-01 11:48:17 +0100244 if (q && !q->mq_ops && blk_queue_stopped(q)) {
245 spin_lock(q->queue_lock);
246 if (blk_queue_stopped(q))
247 blk_start_queue(q);
248 spin_unlock(q->queue_lock);
Jens Axboef2298c02013-10-25 11:52:25 +0100249 }
Paolo Valente3c395a92015-12-01 11:48:17 +0100250 end_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +0100251
252 return HRTIMER_NORESTART;
253}
254
255static void null_cmd_end_timer(struct nullb_cmd *cmd)
256{
Paolo Valente3c395a92015-12-01 11:48:17 +0100257 ktime_t kt = ktime_set(0, completion_nsec);
Jens Axboef2298c02013-10-25 11:52:25 +0100258
Paolo Valente3c395a92015-12-01 11:48:17 +0100259 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +0100260}
261
262static void null_softirq_done_fn(struct request *rq)
263{
Jens Axboed891fa72014-06-16 11:40:25 -0600264 if (queue_mode == NULL_Q_MQ)
265 end_cmd(blk_mq_rq_to_pdu(rq));
266 else
267 end_cmd(rq->special);
Jens Axboef2298c02013-10-25 11:52:25 +0100268}
269
Jens Axboef2298c02013-10-25 11:52:25 +0100270static inline void null_handle_cmd(struct nullb_cmd *cmd)
271{
272 /* Complete IO by inline, softirq or timer */
273 switch (irqmode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800274 case NULL_IRQ_SOFTIRQ:
275 switch (queue_mode) {
276 case NULL_Q_MQ:
Christoph Hellwigf4829a92015-09-27 21:01:50 +0200277 blk_mq_complete_request(cmd->rq, cmd->rq->errors);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800278 break;
279 case NULL_Q_RQ:
280 blk_complete_request(cmd->rq);
281 break;
282 case NULL_Q_BIO:
283 /*
284 * XXX: no proper submitting cpu information available.
285 */
286 end_cmd(cmd);
287 break;
288 }
289 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100290 case NULL_IRQ_NONE:
291 end_cmd(cmd);
292 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100293 case NULL_IRQ_TIMER:
294 null_cmd_end_timer(cmd);
295 break;
296 }
297}
298
299static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
300{
301 int index = 0;
302
303 if (nullb->nr_queues != 1)
304 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
305
306 return &nullb->queues[index];
307}
308
Jens Axboedece1632015-11-05 10:41:16 -0700309static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +0100310{
311 struct nullb *nullb = q->queuedata;
312 struct nullb_queue *nq = nullb_to_queue(nullb);
313 struct nullb_cmd *cmd;
314
315 cmd = alloc_cmd(nq, 1);
316 cmd->bio = bio;
317
318 null_handle_cmd(cmd);
Jens Axboedece1632015-11-05 10:41:16 -0700319 return BLK_QC_T_NONE;
Jens Axboef2298c02013-10-25 11:52:25 +0100320}
321
322static int null_rq_prep_fn(struct request_queue *q, struct request *req)
323{
324 struct nullb *nullb = q->queuedata;
325 struct nullb_queue *nq = nullb_to_queue(nullb);
326 struct nullb_cmd *cmd;
327
328 cmd = alloc_cmd(nq, 0);
329 if (cmd) {
330 cmd->rq = req;
331 req->special = cmd;
332 return BLKPREP_OK;
333 }
Akinobu Mita8b70f452015-06-02 08:35:10 +0900334 blk_stop_queue(q);
Jens Axboef2298c02013-10-25 11:52:25 +0100335
336 return BLKPREP_DEFER;
337}
338
339static void null_request_fn(struct request_queue *q)
340{
341 struct request *rq;
342
343 while ((rq = blk_fetch_request(q)) != NULL) {
344 struct nullb_cmd *cmd = rq->special;
345
346 spin_unlock_irq(q->queue_lock);
347 null_handle_cmd(cmd);
348 spin_lock_irq(q->queue_lock);
349 }
350}
351
Jens Axboe74c45052014-10-29 11:14:52 -0600352static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
353 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +0100354{
Jens Axboe74c45052014-10-29 11:14:52 -0600355 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Jens Axboef2298c02013-10-25 11:52:25 +0100356
Paolo Valente3c395a92015-12-01 11:48:17 +0100357 if (irqmode == NULL_IRQ_TIMER) {
358 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
359 cmd->timer.function = null_cmd_timer_expired;
360 }
Jens Axboe74c45052014-10-29 11:14:52 -0600361 cmd->rq = bd->rq;
Jens Axboef2298c02013-10-25 11:52:25 +0100362 cmd->nq = hctx->driver_data;
363
Jens Axboe74c45052014-10-29 11:14:52 -0600364 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -0700365
Jens Axboef2298c02013-10-25 11:52:25 +0100366 null_handle_cmd(cmd);
367 return BLK_MQ_RQ_QUEUE_OK;
368}
369
Matias Bjorling2d263a782013-12-18 13:41:43 +0100370static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
371{
372 BUG_ON(!nullb);
373 BUG_ON(!nq);
374
375 init_waitqueue_head(&nq->wait);
376 nq->queue_depth = nullb->queue_depth;
377}
378
Jens Axboef2298c02013-10-25 11:52:25 +0100379static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
380 unsigned int index)
381{
382 struct nullb *nullb = data;
383 struct nullb_queue *nq = &nullb->queues[index];
384
Jens Axboef2298c02013-10-25 11:52:25 +0100385 hctx->driver_data = nq;
Matias Bjorling2d263a782013-12-18 13:41:43 +0100386 null_init_queue(nullb, nq);
387 nullb->nr_queues++;
Jens Axboef2298c02013-10-25 11:52:25 +0100388
389 return 0;
390}
391
392static struct blk_mq_ops null_mq_ops = {
393 .queue_rq = null_queue_rq,
394 .map_queue = blk_mq_map_queue,
395 .init_hctx = null_init_hctx,
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800396 .complete = null_softirq_done_fn,
Jens Axboef2298c02013-10-25 11:52:25 +0100397};
398
Matias Bjørlingde65d2d2015-08-31 14:17:18 +0200399static void cleanup_queue(struct nullb_queue *nq)
400{
401 kfree(nq->tag_map);
402 kfree(nq->cmds);
403}
404
405static void cleanup_queues(struct nullb *nullb)
406{
407 int i;
408
409 for (i = 0; i < nullb->nr_queues; i++)
410 cleanup_queue(&nullb->queues[i]);
411
412 kfree(nullb->queues);
413}
414
Jens Axboef2298c02013-10-25 11:52:25 +0100415static void null_del_dev(struct nullb *nullb)
416{
417 list_del_init(&nullb->list);
418
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100419 if (use_lightnvm)
Matias Bjørling54514aa42015-11-19 12:50:10 +0100420 nvm_unregister(nullb->disk_name);
421 else
422 del_gendisk(nullb->disk);
Ming Lei518d00b2013-12-26 21:31:37 +0800423 blk_cleanup_queue(nullb->q);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600424 if (queue_mode == NULL_Q_MQ)
425 blk_mq_free_tag_set(&nullb->tag_set);
Matias Bjørling54514aa42015-11-19 12:50:10 +0100426 if (!use_lightnvm)
427 put_disk(nullb->disk);
Matias Bjørlingde65d2d2015-08-31 14:17:18 +0200428 cleanup_queues(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +0100429 kfree(nullb);
430}
431
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100432#ifdef CONFIG_NVM
433
434static void null_lnvm_end_io(struct request *rq, int error)
435{
436 struct nvm_rq *rqd = rq->end_io_data;
437 struct nvm_dev *dev = rqd->dev;
438
439 dev->mt->end_io(rqd, error);
440
441 blk_put_request(rq);
442}
443
444static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
445{
446 struct request *rq;
447 struct bio *bio = rqd->bio;
448
449 rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
450 if (IS_ERR(rq))
451 return -ENOMEM;
452
453 rq->cmd_type = REQ_TYPE_DRV_PRIV;
454 rq->__sector = bio->bi_iter.bi_sector;
455 rq->ioprio = bio_prio(bio);
456
457 if (bio_has_data(bio))
458 rq->nr_phys_segments = bio_phys_segments(q, bio);
459
460 rq->__data_len = bio->bi_iter.bi_size;
461 rq->bio = rq->biotail = bio;
462
463 rq->end_io_data = rqd;
464
465 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
466
467 return 0;
468}
469
470static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
471{
472 sector_t size = gb * 1024 * 1024 * 1024ULL;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100473 sector_t blksize;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100474 struct nvm_id_group *grp;
475
476 id->ver_id = 0x1;
477 id->vmnt = 0;
478 id->cgrps = 1;
479 id->cap = 0x3;
480 id->dom = 0x1;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100481
482 id->ppaf.blk_offset = 0;
483 id->ppaf.blk_len = 16;
484 id->ppaf.pg_offset = 16;
485 id->ppaf.pg_len = 16;
486 id->ppaf.sect_offset = 32;
487 id->ppaf.sect_len = 8;
488 id->ppaf.pln_offset = 40;
489 id->ppaf.pln_len = 8;
490 id->ppaf.lun_offset = 48;
491 id->ppaf.lun_len = 8;
492 id->ppaf.ch_offset = 56;
493 id->ppaf.ch_len = 8;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100494
495 do_div(size, bs); /* convert size to pages */
Matias Bjørling5b40db92015-11-19 12:50:09 +0100496 do_div(size, 256); /* concert size to pgs pr blk */
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100497 grp = &id->groups[0];
498 grp->mtype = 0;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100499 grp->fmtype = 0;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100500 grp->num_ch = 1;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100501 grp->num_pg = 256;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100502 blksize = size;
503 do_div(size, (1 << 16));
504 grp->num_lun = size + 1;
505 do_div(blksize, grp->num_lun);
506 grp->num_blk = blksize;
507 grp->num_pln = 1;
508
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100509 grp->fpg_sz = bs;
510 grp->csecs = bs;
511 grp->trdt = 25000;
512 grp->trdm = 25000;
513 grp->tprt = 500000;
514 grp->tprm = 500000;
515 grp->tbet = 1500000;
516 grp->tbem = 1500000;
517 grp->mpos = 0x010101; /* single plane rwe */
518 grp->cpar = hw_queue_depth;
519
520 return 0;
521}
522
523static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
524{
525 mempool_t *virtmem_pool;
526
Matias Bjørling6bb95352015-11-19 12:50:08 +0100527 virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100528 if (!virtmem_pool) {
529 pr_err("null_blk: Unable to create virtual memory pool\n");
530 return NULL;
531 }
532
533 return virtmem_pool;
534}
535
536static void null_lnvm_destroy_dma_pool(void *pool)
537{
538 mempool_destroy(pool);
539}
540
541static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
542 gfp_t mem_flags, dma_addr_t *dma_handler)
543{
544 return mempool_alloc(pool, mem_flags);
545}
546
547static void null_lnvm_dev_dma_free(void *pool, void *entry,
548 dma_addr_t dma_handler)
549{
550 mempool_free(entry, pool);
551}
552
553static struct nvm_dev_ops null_lnvm_dev_ops = {
554 .identity = null_lnvm_id,
555 .submit_io = null_lnvm_submit_io,
556
557 .create_dma_pool = null_lnvm_create_dma_pool,
558 .destroy_dma_pool = null_lnvm_destroy_dma_pool,
559 .dev_dma_alloc = null_lnvm_dev_dma_alloc,
560 .dev_dma_free = null_lnvm_dev_dma_free,
561
562 /* Simulate nvme protocol restriction */
563 .max_phys_sect = 64,
564};
565#else
566static struct nvm_dev_ops null_lnvm_dev_ops;
567#endif /* CONFIG_NVM */
568
Jens Axboef2298c02013-10-25 11:52:25 +0100569static int null_open(struct block_device *bdev, fmode_t mode)
570{
571 return 0;
572}
573
574static void null_release(struct gendisk *disk, fmode_t mode)
575{
576}
577
578static const struct block_device_operations null_fops = {
579 .owner = THIS_MODULE,
580 .open = null_open,
581 .release = null_release,
582};
583
584static int setup_commands(struct nullb_queue *nq)
585{
586 struct nullb_cmd *cmd;
587 int i, tag_size;
588
589 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
590 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100591 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100592
593 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
594 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
595 if (!nq->tag_map) {
596 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +0100597 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100598 }
599
600 for (i = 0; i < nq->queue_depth; i++) {
601 cmd = &nq->cmds[i];
602 INIT_LIST_HEAD(&cmd->list);
603 cmd->ll_list.next = NULL;
604 cmd->tag = -1U;
605 }
606
607 return 0;
608}
609
Jens Axboef2298c02013-10-25 11:52:25 +0100610static int setup_queues(struct nullb *nullb)
611{
Matias Bjorling2d263a782013-12-18 13:41:43 +0100612 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
613 GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +0100614 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100615 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100616
617 nullb->nr_queues = 0;
618 nullb->queue_depth = hw_queue_depth;
619
Matias Bjorling2d263a782013-12-18 13:41:43 +0100620 return 0;
621}
622
623static int init_driver_queues(struct nullb *nullb)
624{
625 struct nullb_queue *nq;
626 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100627
628 for (i = 0; i < submit_queues; i++) {
629 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +0100630
631 null_init_queue(nullb, nq);
632
633 ret = setup_commands(nq);
634 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +0200635 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +0100636 nullb->nr_queues++;
637 }
Matias Bjorling2d263a782013-12-18 13:41:43 +0100638 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100639}
640
641static int null_add_dev(void)
642{
643 struct gendisk *disk;
644 struct nullb *nullb;
645 sector_t size;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500646 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +0100647
648 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500649 if (!nullb) {
650 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600651 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500652 }
Jens Axboef2298c02013-10-25 11:52:25 +0100653
654 spin_lock_init(&nullb->lock);
655
Matias Bjorling57053d82013-12-10 16:50:38 +0100656 if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
657 submit_queues = nr_online_nodes;
658
Robert Elliottdc501dc2014-09-02 11:38:49 -0500659 rv = setup_queues(nullb);
660 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600661 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100662
663 if (queue_mode == NULL_Q_MQ) {
Christoph Hellwigcdef54d2014-05-28 18:11:06 +0200664 nullb->tag_set.ops = &null_mq_ops;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600665 nullb->tag_set.nr_hw_queues = submit_queues;
666 nullb->tag_set.queue_depth = hw_queue_depth;
667 nullb->tag_set.numa_node = home_node;
668 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
669 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
670 nullb->tag_set.driver_data = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100671
Robert Elliottdc501dc2014-09-02 11:38:49 -0500672 rv = blk_mq_alloc_tag_set(&nullb->tag_set);
673 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600674 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +0100675
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600676 nullb->q = blk_mq_init_queue(&nullb->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +0000677 if (IS_ERR(nullb->q)) {
Robert Elliottdc501dc2014-09-02 11:38:49 -0500678 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600679 goto out_cleanup_tags;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500680 }
Jens Axboef2298c02013-10-25 11:52:25 +0100681 } else if (queue_mode == NULL_Q_BIO) {
682 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500683 if (!nullb->q) {
684 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600685 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500686 }
Jens Axboef2298c02013-10-25 11:52:25 +0100687 blk_queue_make_request(nullb->q, null_queue_bio);
Jan Kara31f96902014-10-22 15:34:21 +0200688 rv = init_driver_queues(nullb);
689 if (rv)
690 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100691 } else {
692 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500693 if (!nullb->q) {
694 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600695 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500696 }
Jens Axboef2298c02013-10-25 11:52:25 +0100697 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600698 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
Jan Kara31f96902014-10-22 15:34:21 +0200699 rv = init_driver_queues(nullb);
700 if (rv)
701 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100702 }
703
Jens Axboef2298c02013-10-25 11:52:25 +0100704 nullb->q->queuedata = nullb;
705 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
Mike Snitzerb277da02014-10-04 10:55:32 -0600706 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +0100707
Jens Axboef2298c02013-10-25 11:52:25 +0100708
709 mutex_lock(&lock);
710 list_add_tail(&nullb->list, &nullb_list);
711 nullb->index = nullb_indexes++;
712 mutex_unlock(&lock);
713
714 blk_queue_logical_block_size(nullb->q, bs);
715 blk_queue_physical_block_size(nullb->q, bs);
716
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100717 sprintf(nullb->disk_name, "nullb%d", nullb->index);
718
719 if (use_lightnvm) {
720 rv = nvm_register(nullb->q, nullb->disk_name,
721 &null_lnvm_dev_ops);
722 if (rv)
723 goto out_cleanup_blk_queue;
724 goto done;
725 }
726
727 disk = nullb->disk = alloc_disk_node(1, home_node);
728 if (!disk) {
729 rv = -ENOMEM;
730 goto out_cleanup_lightnvm;
731 }
Jens Axboef2298c02013-10-25 11:52:25 +0100732 size = gb * 1024 * 1024 * 1024ULL;
Matias Bjørling5fdb7e12015-08-31 14:17:31 +0200733 set_capacity(disk, size >> 9);
Jens Axboef2298c02013-10-25 11:52:25 +0100734
Jens Axboe227290b2015-01-16 16:02:24 -0700735 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
Jens Axboef2298c02013-10-25 11:52:25 +0100736 disk->major = null_major;
737 disk->first_minor = nullb->index;
738 disk->fops = &null_fops;
739 disk->private_data = nullb;
740 disk->queue = nullb->q;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100741 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
742
Jens Axboef2298c02013-10-25 11:52:25 +0100743 add_disk(disk);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100744done:
Jens Axboef2298c02013-10-25 11:52:25 +0100745 return 0;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600746
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100747out_cleanup_lightnvm:
748 if (use_lightnvm)
749 nvm_unregister(nullb->disk_name);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600750out_cleanup_blk_queue:
751 blk_cleanup_queue(nullb->q);
752out_cleanup_tags:
753 if (queue_mode == NULL_Q_MQ)
754 blk_mq_free_tag_set(&nullb->tag_set);
755out_cleanup_queues:
756 cleanup_queues(nullb);
757out_free_nullb:
758 kfree(nullb);
759out:
Robert Elliottdc501dc2014-09-02 11:38:49 -0500760 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +0100761}
762
763static int __init null_init(void)
764{
765 unsigned int i;
766
Raghavendra K T9967d8a2014-01-21 16:59:59 +0530767 if (bs > PAGE_SIZE) {
768 pr_warn("null_blk: invalid block size\n");
769 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
770 bs = PAGE_SIZE;
771 }
Jens Axboef2298c02013-10-25 11:52:25 +0100772
Matias Bjørling6bb95352015-11-19 12:50:08 +0100773 if (use_lightnvm && bs != 4096) {
774 pr_warn("null_blk: LightNVM only supports 4k block size\n");
775 pr_warn("null_blk: defaults block size to 4k\n");
776 bs = 4096;
777 }
778
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100779 if (use_lightnvm && queue_mode != NULL_Q_MQ) {
780 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
781 pr_warn("null_blk: defaults queue mode to blk-mq\n");
782 queue_mode = NULL_Q_MQ;
783 }
784
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100785 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100786 if (submit_queues < nr_online_nodes) {
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100787 pr_warn("null_blk: submit_queues param is set to %u.",
788 nr_online_nodes);
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100789 submit_queues = nr_online_nodes;
790 }
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100791 } else if (submit_queues > nr_cpu_ids)
Jens Axboef2298c02013-10-25 11:52:25 +0100792 submit_queues = nr_cpu_ids;
793 else if (!submit_queues)
794 submit_queues = 1;
795
796 mutex_init(&lock);
797
Jens Axboef2298c02013-10-25 11:52:25 +0100798 null_major = register_blkdev(0, "nullb");
799 if (null_major < 0)
800 return null_major;
801
Matias Bjørling6bb95352015-11-19 12:50:08 +0100802 if (use_lightnvm) {
803 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
804 0, 0, NULL);
805 if (!ppa_cache) {
806 pr_err("null_blk: unable to create ppa cache\n");
807 return -ENOMEM;
808 }
809 }
810
Jens Axboef2298c02013-10-25 11:52:25 +0100811 for (i = 0; i < nr_devices; i++) {
812 if (null_add_dev()) {
813 unregister_blkdev(null_major, "nullb");
Matias Bjørling6bb95352015-11-19 12:50:08 +0100814 goto err_ppa;
Jens Axboef2298c02013-10-25 11:52:25 +0100815 }
816 }
817
818 pr_info("null: module loaded\n");
819 return 0;
Matias Bjørling6bb95352015-11-19 12:50:08 +0100820err_ppa:
821 kmem_cache_destroy(ppa_cache);
822 return -EINVAL;
Jens Axboef2298c02013-10-25 11:52:25 +0100823}
824
825static void __exit null_exit(void)
826{
827 struct nullb *nullb;
828
829 unregister_blkdev(null_major, "nullb");
830
831 mutex_lock(&lock);
832 while (!list_empty(&nullb_list)) {
833 nullb = list_entry(nullb_list.next, struct nullb, list);
834 null_del_dev(nullb);
835 }
836 mutex_unlock(&lock);
Matias Bjørling6bb95352015-11-19 12:50:08 +0100837
838 kmem_cache_destroy(ppa_cache);
Jens Axboef2298c02013-10-25 11:52:25 +0100839}
840
841module_init(null_init);
842module_exit(null_exit);
843
844MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
845MODULE_LICENSE("GPL");