blob: 6f2e565bccc59e8f3610f1651a385de885443e4c [file] [log] [blame]
Jens Axboef2298c02013-10-25 11:52:25 +01001#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01002
Jens Axboef2298c02013-10-25 11:52:25 +01003#include <linux/moduleparam.h>
4#include <linux/sched.h>
5#include <linux/fs.h>
6#include <linux/blkdev.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/blk-mq.h>
10#include <linux/hrtimer.h>
Matias Bjørlingb2b7e002015-11-12 20:25:10 +010011#include <linux/lightnvm.h>
Jens Axboef2298c02013-10-25 11:52:25 +010012
13struct nullb_cmd {
14 struct list_head list;
15 struct llist_node ll_list;
16 struct call_single_data csd;
17 struct request *rq;
18 struct bio *bio;
19 unsigned int tag;
20 struct nullb_queue *nq;
Paolo Valente3c395a92015-12-01 11:48:17 +010021 struct hrtimer timer;
Jens Axboef2298c02013-10-25 11:52:25 +010022};
23
24struct nullb_queue {
25 unsigned long *tag_map;
26 wait_queue_head_t wait;
27 unsigned int queue_depth;
28
29 struct nullb_cmd *cmds;
30};
31
32struct nullb {
33 struct list_head list;
34 unsigned int index;
35 struct request_queue *q;
36 struct gendisk *disk;
Matias Bjørlingb0b4e092016-09-16 14:25:07 +020037 struct nvm_dev *ndev;
Christoph Hellwig24d2f902014-04-15 14:14:00 -060038 struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +010039 struct hrtimer timer;
40 unsigned int queue_depth;
41 spinlock_t lock;
42
43 struct nullb_queue *queues;
44 unsigned int nr_queues;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +010045 char disk_name[DISK_NAME_LEN];
Jens Axboef2298c02013-10-25 11:52:25 +010046};
47
48static LIST_HEAD(nullb_list);
49static struct mutex lock;
50static int null_major;
51static int nullb_indexes;
Matias Bjørling6bb95352015-11-19 12:50:08 +010052static struct kmem_cache *ppa_cache;
Jens Axboef2298c02013-10-25 11:52:25 +010053
Jens Axboef2298c02013-10-25 11:52:25 +010054enum {
55 NULL_IRQ_NONE = 0,
56 NULL_IRQ_SOFTIRQ = 1,
57 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -080058};
Jens Axboef2298c02013-10-25 11:52:25 +010059
Christoph Hellwigce2c3502014-02-10 03:24:40 -080060enum {
Jens Axboef2298c02013-10-25 11:52:25 +010061 NULL_Q_BIO = 0,
62 NULL_Q_RQ = 1,
63 NULL_Q_MQ = 2,
64};
65
Matias Bjorling2d263a782013-12-18 13:41:43 +010066static int submit_queues;
Jens Axboef2298c02013-10-25 11:52:25 +010067module_param(submit_queues, int, S_IRUGO);
68MODULE_PARM_DESC(submit_queues, "Number of submission queues");
69
70static int home_node = NUMA_NO_NODE;
71module_param(home_node, int, S_IRUGO);
72MODULE_PARM_DESC(home_node, "Home node for the device");
73
74static int queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -070075
76static int null_param_store_val(const char *str, int *val, int min, int max)
77{
78 int ret, new_val;
79
80 ret = kstrtoint(str, 10, &new_val);
81 if (ret)
82 return -EINVAL;
83
84 if (new_val < min || new_val > max)
85 return -EINVAL;
86
87 *val = new_val;
88 return 0;
89}
90
91static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
92{
93 return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
94}
95
Luis R. Rodriguez9c278472015-05-27 11:09:38 +093096static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -070097 .set = null_set_queue_mode,
98 .get = param_get_int,
99};
100
101device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400102MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100103
104static int gb = 250;
105module_param(gb, int, S_IRUGO);
106MODULE_PARM_DESC(gb, "Size in GB");
107
108static int bs = 512;
109module_param(bs, int, S_IRUGO);
110MODULE_PARM_DESC(bs, "Block size (in bytes)");
111
112static int nr_devices = 2;
113module_param(nr_devices, int, S_IRUGO);
114MODULE_PARM_DESC(nr_devices, "Number of devices to register");
115
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100116static bool use_lightnvm;
117module_param(use_lightnvm, bool, S_IRUGO);
118MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
119
Jens Axboef2298c02013-10-25 11:52:25 +0100120static int irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700121
122static int null_set_irqmode(const char *str, const struct kernel_param *kp)
123{
124 return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
125 NULL_IRQ_TIMER);
126}
127
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930128static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700129 .set = null_set_irqmode,
130 .get = param_get_int,
131};
132
133device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100134MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
135
Arianna Avanzinidbac1172015-12-01 11:48:19 +0100136static unsigned long completion_nsec = 10000;
137module_param(completion_nsec, ulong, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100138MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
139
140static int hw_queue_depth = 64;
141module_param(hw_queue_depth, int, S_IRUGO);
142MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
143
Matias Bjørling20005242013-12-21 00:11:00 +0100144static bool use_per_node_hctx = false;
Jens Axboef2298c02013-10-25 11:52:25 +0100145module_param(use_per_node_hctx, bool, S_IRUGO);
Matias Bjørling20005242013-12-21 00:11:00 +0100146MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100147
148static void put_tag(struct nullb_queue *nq, unsigned int tag)
149{
150 clear_bit_unlock(tag, nq->tag_map);
151
152 if (waitqueue_active(&nq->wait))
153 wake_up(&nq->wait);
154}
155
156static unsigned int get_tag(struct nullb_queue *nq)
157{
158 unsigned int tag;
159
160 do {
161 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
162 if (tag >= nq->queue_depth)
163 return -1U;
164 } while (test_and_set_bit_lock(tag, nq->tag_map));
165
166 return tag;
167}
168
169static void free_cmd(struct nullb_cmd *cmd)
170{
171 put_tag(cmd->nq, cmd->tag);
172}
173
Paolo Valente3c395a92015-12-01 11:48:17 +0100174static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
175
Jens Axboef2298c02013-10-25 11:52:25 +0100176static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
177{
178 struct nullb_cmd *cmd;
179 unsigned int tag;
180
181 tag = get_tag(nq);
182 if (tag != -1U) {
183 cmd = &nq->cmds[tag];
184 cmd->tag = tag;
185 cmd->nq = nq;
Paolo Valente3c395a92015-12-01 11:48:17 +0100186 if (irqmode == NULL_IRQ_TIMER) {
187 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
188 HRTIMER_MODE_REL);
189 cmd->timer.function = null_cmd_timer_expired;
190 }
Jens Axboef2298c02013-10-25 11:52:25 +0100191 return cmd;
192 }
193
194 return NULL;
195}
196
197static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
198{
199 struct nullb_cmd *cmd;
200 DEFINE_WAIT(wait);
201
202 cmd = __alloc_cmd(nq);
203 if (cmd || !can_wait)
204 return cmd;
205
206 do {
207 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
208 cmd = __alloc_cmd(nq);
209 if (cmd)
210 break;
211
212 io_schedule();
213 } while (1);
214
215 finish_wait(&nq->wait, &wait);
216 return cmd;
217}
218
219static void end_cmd(struct nullb_cmd *cmd)
220{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100221 struct request_queue *q = NULL;
222
Mike Krinkine8271202015-12-15 12:56:40 +0300223 if (cmd->rq)
224 q = cmd->rq->q;
225
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800226 switch (queue_mode) {
227 case NULL_Q_MQ:
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700228 blk_mq_end_request(cmd->rq, 0);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800229 return;
230 case NULL_Q_RQ:
231 INIT_LIST_HEAD(&cmd->rq->queuelist);
232 blk_end_request_all(cmd->rq, 0);
233 break;
234 case NULL_Q_BIO:
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200235 bio_endio(cmd->bio);
Jens Axboe48cc6612015-12-28 13:02:47 -0700236 break;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800237 }
Jens Axboef2298c02013-10-25 11:52:25 +0100238
Jens Axboe48cc6612015-12-28 13:02:47 -0700239 free_cmd(cmd);
240
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100241 /* Restart queue if needed, as we are freeing a tag */
Jens Axboe48cc6612015-12-28 13:02:47 -0700242 if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100243 unsigned long flags;
244
245 spin_lock_irqsave(q->queue_lock, flags);
Jens Axboe48cc6612015-12-28 13:02:47 -0700246 blk_start_queue_async(q);
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100247 spin_unlock_irqrestore(q->queue_lock, flags);
248 }
Jens Axboef2298c02013-10-25 11:52:25 +0100249}
250
251static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
252{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100253 end_cmd(container_of(timer, struct nullb_cmd, timer));
Jens Axboef2298c02013-10-25 11:52:25 +0100254
255 return HRTIMER_NORESTART;
256}
257
258static void null_cmd_end_timer(struct nullb_cmd *cmd)
259{
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100260 ktime_t kt = completion_nsec;
Jens Axboef2298c02013-10-25 11:52:25 +0100261
Paolo Valente3c395a92015-12-01 11:48:17 +0100262 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +0100263}
264
265static void null_softirq_done_fn(struct request *rq)
266{
Jens Axboed891fa72014-06-16 11:40:25 -0600267 if (queue_mode == NULL_Q_MQ)
268 end_cmd(blk_mq_rq_to_pdu(rq));
269 else
270 end_cmd(rq->special);
Jens Axboef2298c02013-10-25 11:52:25 +0100271}
272
Jens Axboef2298c02013-10-25 11:52:25 +0100273static inline void null_handle_cmd(struct nullb_cmd *cmd)
274{
275 /* Complete IO by inline, softirq or timer */
276 switch (irqmode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800277 case NULL_IRQ_SOFTIRQ:
278 switch (queue_mode) {
279 case NULL_Q_MQ:
Christoph Hellwigf4829a92015-09-27 21:01:50 +0200280 blk_mq_complete_request(cmd->rq, cmd->rq->errors);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800281 break;
282 case NULL_Q_RQ:
283 blk_complete_request(cmd->rq);
284 break;
285 case NULL_Q_BIO:
286 /*
287 * XXX: no proper submitting cpu information available.
288 */
289 end_cmd(cmd);
290 break;
291 }
292 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100293 case NULL_IRQ_NONE:
294 end_cmd(cmd);
295 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100296 case NULL_IRQ_TIMER:
297 null_cmd_end_timer(cmd);
298 break;
299 }
300}
301
302static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
303{
304 int index = 0;
305
306 if (nullb->nr_queues != 1)
307 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
308
309 return &nullb->queues[index];
310}
311
Jens Axboedece1632015-11-05 10:41:16 -0700312static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +0100313{
314 struct nullb *nullb = q->queuedata;
315 struct nullb_queue *nq = nullb_to_queue(nullb);
316 struct nullb_cmd *cmd;
317
318 cmd = alloc_cmd(nq, 1);
319 cmd->bio = bio;
320
321 null_handle_cmd(cmd);
Jens Axboedece1632015-11-05 10:41:16 -0700322 return BLK_QC_T_NONE;
Jens Axboef2298c02013-10-25 11:52:25 +0100323}
324
325static int null_rq_prep_fn(struct request_queue *q, struct request *req)
326{
327 struct nullb *nullb = q->queuedata;
328 struct nullb_queue *nq = nullb_to_queue(nullb);
329 struct nullb_cmd *cmd;
330
331 cmd = alloc_cmd(nq, 0);
332 if (cmd) {
333 cmd->rq = req;
334 req->special = cmd;
335 return BLKPREP_OK;
336 }
Akinobu Mita8b70f452015-06-02 08:35:10 +0900337 blk_stop_queue(q);
Jens Axboef2298c02013-10-25 11:52:25 +0100338
339 return BLKPREP_DEFER;
340}
341
342static void null_request_fn(struct request_queue *q)
343{
344 struct request *rq;
345
346 while ((rq = blk_fetch_request(q)) != NULL) {
347 struct nullb_cmd *cmd = rq->special;
348
349 spin_unlock_irq(q->queue_lock);
350 null_handle_cmd(cmd);
351 spin_lock_irq(q->queue_lock);
352 }
353}
354
Jens Axboe74c45052014-10-29 11:14:52 -0600355static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
356 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +0100357{
Jens Axboe74c45052014-10-29 11:14:52 -0600358 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Jens Axboef2298c02013-10-25 11:52:25 +0100359
Paolo Valente3c395a92015-12-01 11:48:17 +0100360 if (irqmode == NULL_IRQ_TIMER) {
361 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
362 cmd->timer.function = null_cmd_timer_expired;
363 }
Jens Axboe74c45052014-10-29 11:14:52 -0600364 cmd->rq = bd->rq;
Jens Axboef2298c02013-10-25 11:52:25 +0100365 cmd->nq = hctx->driver_data;
366
Jens Axboe74c45052014-10-29 11:14:52 -0600367 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -0700368
Jens Axboef2298c02013-10-25 11:52:25 +0100369 null_handle_cmd(cmd);
370 return BLK_MQ_RQ_QUEUE_OK;
371}
372
Matias Bjorling2d263a782013-12-18 13:41:43 +0100373static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
374{
375 BUG_ON(!nullb);
376 BUG_ON(!nq);
377
378 init_waitqueue_head(&nq->wait);
379 nq->queue_depth = nullb->queue_depth;
380}
381
Jens Axboef2298c02013-10-25 11:52:25 +0100382static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
383 unsigned int index)
384{
385 struct nullb *nullb = data;
386 struct nullb_queue *nq = &nullb->queues[index];
387
Jens Axboef2298c02013-10-25 11:52:25 +0100388 hctx->driver_data = nq;
Matias Bjorling2d263a782013-12-18 13:41:43 +0100389 null_init_queue(nullb, nq);
390 nullb->nr_queues++;
Jens Axboef2298c02013-10-25 11:52:25 +0100391
392 return 0;
393}
394
395static struct blk_mq_ops null_mq_ops = {
396 .queue_rq = null_queue_rq,
Jens Axboef2298c02013-10-25 11:52:25 +0100397 .init_hctx = null_init_hctx,
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800398 .complete = null_softirq_done_fn,
Jens Axboef2298c02013-10-25 11:52:25 +0100399};
400
Matias Bjørlingde65d2d2015-08-31 14:17:18 +0200401static void cleanup_queue(struct nullb_queue *nq)
402{
403 kfree(nq->tag_map);
404 kfree(nq->cmds);
405}
406
407static void cleanup_queues(struct nullb *nullb)
408{
409 int i;
410
411 for (i = 0; i < nullb->nr_queues; i++)
412 cleanup_queue(&nullb->queues[i]);
413
414 kfree(nullb->queues);
415}
416
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100417#ifdef CONFIG_NVM
418
419static void null_lnvm_end_io(struct request *rq, int error)
420{
421 struct nvm_rq *rqd = rq->end_io_data;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100422
Matias Bjørling06894ef2017-01-31 13:17:17 +0100423 rqd->error = error;
424 nvm_end_io(rqd);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100425
426 blk_put_request(rq);
427}
428
Matias Bjørling16f26c32015-12-06 11:25:48 +0100429static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100430{
Matias Bjørling16f26c32015-12-06 11:25:48 +0100431 struct request_queue *q = dev->q;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100432 struct request *rq;
433 struct bio *bio = rqd->bio;
434
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100435 rq = blk_mq_alloc_request(q,
436 op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100437 if (IS_ERR(rq))
438 return -ENOMEM;
439
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100440 rq->__sector = bio->bi_iter.bi_sector;
441 rq->ioprio = bio_prio(bio);
442
443 if (bio_has_data(bio))
444 rq->nr_phys_segments = bio_phys_segments(q, bio);
445
446 rq->__data_len = bio->bi_iter.bi_size;
447 rq->bio = rq->biotail = bio;
448
449 rq->end_io_data = rqd;
450
451 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
452
453 return 0;
454}
455
Matias Bjørling16f26c32015-12-06 11:25:48 +0100456static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100457{
458 sector_t size = gb * 1024 * 1024 * 1024ULL;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100459 sector_t blksize;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100460 struct nvm_id_group *grp;
461
462 id->ver_id = 0x1;
463 id->vmnt = 0;
Matias Bjørlingbf643182016-02-04 15:13:27 +0100464 id->cap = 0x2;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100465 id->dom = 0x1;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100466
467 id->ppaf.blk_offset = 0;
468 id->ppaf.blk_len = 16;
469 id->ppaf.pg_offset = 16;
470 id->ppaf.pg_len = 16;
471 id->ppaf.sect_offset = 32;
472 id->ppaf.sect_len = 8;
473 id->ppaf.pln_offset = 40;
474 id->ppaf.pln_len = 8;
475 id->ppaf.lun_offset = 48;
476 id->ppaf.lun_len = 8;
477 id->ppaf.ch_offset = 56;
478 id->ppaf.ch_len = 8;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100479
Arnd Bergmanne93d12a2016-01-13 23:04:08 +0100480 sector_div(size, bs); /* convert size to pages */
481 size >>= 8; /* concert size to pgs pr blk */
Matias Bjørling19bd6fe2017-01-31 13:17:15 +0100482 grp = &id->grp;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100483 grp->mtype = 0;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100484 grp->fmtype = 0;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100485 grp->num_ch = 1;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100486 grp->num_pg = 256;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100487 blksize = size;
Arnd Bergmanne93d12a2016-01-13 23:04:08 +0100488 size >>= 16;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100489 grp->num_lun = size + 1;
Arnd Bergmanne93d12a2016-01-13 23:04:08 +0100490 sector_div(blksize, grp->num_lun);
Matias Bjørling5b40db92015-11-19 12:50:09 +0100491 grp->num_blk = blksize;
492 grp->num_pln = 1;
493
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100494 grp->fpg_sz = bs;
495 grp->csecs = bs;
496 grp->trdt = 25000;
497 grp->trdm = 25000;
498 grp->tprt = 500000;
499 grp->tprm = 500000;
500 grp->tbet = 1500000;
501 grp->tbem = 1500000;
502 grp->mpos = 0x010101; /* single plane rwe */
503 grp->cpar = hw_queue_depth;
504
505 return 0;
506}
507
Matias Bjørling16f26c32015-12-06 11:25:48 +0100508static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100509{
510 mempool_t *virtmem_pool;
511
Matias Bjørling6bb95352015-11-19 12:50:08 +0100512 virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100513 if (!virtmem_pool) {
514 pr_err("null_blk: Unable to create virtual memory pool\n");
515 return NULL;
516 }
517
518 return virtmem_pool;
519}
520
521static void null_lnvm_destroy_dma_pool(void *pool)
522{
523 mempool_destroy(pool);
524}
525
Matias Bjørling16f26c32015-12-06 11:25:48 +0100526static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100527 gfp_t mem_flags, dma_addr_t *dma_handler)
528{
529 return mempool_alloc(pool, mem_flags);
530}
531
532static void null_lnvm_dev_dma_free(void *pool, void *entry,
533 dma_addr_t dma_handler)
534{
535 mempool_free(entry, pool);
536}
537
538static struct nvm_dev_ops null_lnvm_dev_ops = {
539 .identity = null_lnvm_id,
540 .submit_io = null_lnvm_submit_io,
541
542 .create_dma_pool = null_lnvm_create_dma_pool,
543 .destroy_dma_pool = null_lnvm_destroy_dma_pool,
544 .dev_dma_alloc = null_lnvm_dev_dma_alloc,
545 .dev_dma_free = null_lnvm_dev_dma_free,
546
547 /* Simulate nvme protocol restriction */
548 .max_phys_sect = 64,
549};
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200550
551static int null_nvm_register(struct nullb *nullb)
552{
Matias Bjørlingb0b4e092016-09-16 14:25:07 +0200553 struct nvm_dev *dev;
554 int rv;
555
556 dev = nvm_alloc_dev(0);
557 if (!dev)
558 return -ENOMEM;
559
560 dev->q = nullb->q;
561 memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN);
562 dev->ops = &null_lnvm_dev_ops;
563
564 rv = nvm_register(dev);
565 if (rv) {
566 kfree(dev);
567 return rv;
568 }
569 nullb->ndev = dev;
570 return 0;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200571}
572
573static void null_nvm_unregister(struct nullb *nullb)
574{
Matias Bjørlingb0b4e092016-09-16 14:25:07 +0200575 nvm_unregister(nullb->ndev);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200576}
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100577#else
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200578static int null_nvm_register(struct nullb *nullb)
579{
Yasuaki Ishimatsu92153d32016-11-16 08:26:11 -0700580 pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200581 return -EINVAL;
582}
583static void null_nvm_unregister(struct nullb *nullb) {}
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100584#endif /* CONFIG_NVM */
585
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200586static void null_del_dev(struct nullb *nullb)
587{
588 list_del_init(&nullb->list);
589
590 if (use_lightnvm)
591 null_nvm_unregister(nullb);
592 else
593 del_gendisk(nullb->disk);
594 blk_cleanup_queue(nullb->q);
595 if (queue_mode == NULL_Q_MQ)
596 blk_mq_free_tag_set(&nullb->tag_set);
597 if (!use_lightnvm)
598 put_disk(nullb->disk);
599 cleanup_queues(nullb);
600 kfree(nullb);
601}
602
Jens Axboef2298c02013-10-25 11:52:25 +0100603static int null_open(struct block_device *bdev, fmode_t mode)
604{
605 return 0;
606}
607
608static void null_release(struct gendisk *disk, fmode_t mode)
609{
610}
611
612static const struct block_device_operations null_fops = {
613 .owner = THIS_MODULE,
614 .open = null_open,
615 .release = null_release,
616};
617
618static int setup_commands(struct nullb_queue *nq)
619{
620 struct nullb_cmd *cmd;
621 int i, tag_size;
622
623 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
624 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100625 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100626
627 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
628 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
629 if (!nq->tag_map) {
630 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +0100631 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100632 }
633
634 for (i = 0; i < nq->queue_depth; i++) {
635 cmd = &nq->cmds[i];
636 INIT_LIST_HEAD(&cmd->list);
637 cmd->ll_list.next = NULL;
638 cmd->tag = -1U;
639 }
640
641 return 0;
642}
643
Jens Axboef2298c02013-10-25 11:52:25 +0100644static int setup_queues(struct nullb *nullb)
645{
Matias Bjorling2d263a782013-12-18 13:41:43 +0100646 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
647 GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +0100648 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100649 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100650
651 nullb->nr_queues = 0;
652 nullb->queue_depth = hw_queue_depth;
653
Matias Bjorling2d263a782013-12-18 13:41:43 +0100654 return 0;
655}
656
657static int init_driver_queues(struct nullb *nullb)
658{
659 struct nullb_queue *nq;
660 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100661
662 for (i = 0; i < submit_queues; i++) {
663 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +0100664
665 null_init_queue(nullb, nq);
666
667 ret = setup_commands(nq);
668 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +0200669 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +0100670 nullb->nr_queues++;
671 }
Matias Bjorling2d263a782013-12-18 13:41:43 +0100672 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100673}
674
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200675static int null_gendisk_register(struct nullb *nullb)
Jens Axboef2298c02013-10-25 11:52:25 +0100676{
677 struct gendisk *disk;
Jens Axboef2298c02013-10-25 11:52:25 +0100678 sector_t size;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200679
680 disk = nullb->disk = alloc_disk_node(1, home_node);
681 if (!disk)
682 return -ENOMEM;
683 size = gb * 1024 * 1024 * 1024ULL;
684 set_capacity(disk, size >> 9);
685
686 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
687 disk->major = null_major;
688 disk->first_minor = nullb->index;
689 disk->fops = &null_fops;
690 disk->private_data = nullb;
691 disk->queue = nullb->q;
692 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
693
694 add_disk(disk);
695 return 0;
696}
697
698static int null_add_dev(void)
699{
700 struct nullb *nullb;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500701 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +0100702
703 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500704 if (!nullb) {
705 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600706 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500707 }
Jens Axboef2298c02013-10-25 11:52:25 +0100708
709 spin_lock_init(&nullb->lock);
710
Matias Bjorling57053d82013-12-10 16:50:38 +0100711 if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
712 submit_queues = nr_online_nodes;
713
Robert Elliottdc501dc2014-09-02 11:38:49 -0500714 rv = setup_queues(nullb);
715 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600716 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100717
718 if (queue_mode == NULL_Q_MQ) {
Christoph Hellwigcdef54d2014-05-28 18:11:06 +0200719 nullb->tag_set.ops = &null_mq_ops;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600720 nullb->tag_set.nr_hw_queues = submit_queues;
721 nullb->tag_set.queue_depth = hw_queue_depth;
722 nullb->tag_set.numa_node = home_node;
723 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
724 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
725 nullb->tag_set.driver_data = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100726
Robert Elliottdc501dc2014-09-02 11:38:49 -0500727 rv = blk_mq_alloc_tag_set(&nullb->tag_set);
728 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600729 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +0100730
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600731 nullb->q = blk_mq_init_queue(&nullb->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +0000732 if (IS_ERR(nullb->q)) {
Robert Elliottdc501dc2014-09-02 11:38:49 -0500733 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600734 goto out_cleanup_tags;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500735 }
Jens Axboef2298c02013-10-25 11:52:25 +0100736 } else if (queue_mode == NULL_Q_BIO) {
737 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500738 if (!nullb->q) {
739 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600740 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500741 }
Jens Axboef2298c02013-10-25 11:52:25 +0100742 blk_queue_make_request(nullb->q, null_queue_bio);
Jan Kara31f96902014-10-22 15:34:21 +0200743 rv = init_driver_queues(nullb);
744 if (rv)
745 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100746 } else {
747 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500748 if (!nullb->q) {
749 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600750 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500751 }
Jens Axboef2298c02013-10-25 11:52:25 +0100752 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600753 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
Jan Kara31f96902014-10-22 15:34:21 +0200754 rv = init_driver_queues(nullb);
755 if (rv)
756 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100757 }
758
Jens Axboef2298c02013-10-25 11:52:25 +0100759 nullb->q->queuedata = nullb;
760 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
Mike Snitzerb277da02014-10-04 10:55:32 -0600761 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +0100762
Jens Axboef2298c02013-10-25 11:52:25 +0100763 mutex_lock(&lock);
Jens Axboef2298c02013-10-25 11:52:25 +0100764 nullb->index = nullb_indexes++;
765 mutex_unlock(&lock);
766
767 blk_queue_logical_block_size(nullb->q, bs);
768 blk_queue_physical_block_size(nullb->q, bs);
769
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100770 sprintf(nullb->disk_name, "nullb%d", nullb->index);
771
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200772 if (use_lightnvm)
773 rv = null_nvm_register(nullb);
774 else
775 rv = null_gendisk_register(nullb);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100776
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200777 if (rv)
778 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100779
Matias Bjørlinga5143792016-02-11 14:49:13 +0100780 mutex_lock(&lock);
781 list_add_tail(&nullb->list, &nullb_list);
782 mutex_unlock(&lock);
Wenwei Tao3681c85d2016-03-05 00:27:04 +0800783
Jens Axboef2298c02013-10-25 11:52:25 +0100784 return 0;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600785out_cleanup_blk_queue:
786 blk_cleanup_queue(nullb->q);
787out_cleanup_tags:
788 if (queue_mode == NULL_Q_MQ)
789 blk_mq_free_tag_set(&nullb->tag_set);
790out_cleanup_queues:
791 cleanup_queues(nullb);
792out_free_nullb:
793 kfree(nullb);
794out:
Robert Elliottdc501dc2014-09-02 11:38:49 -0500795 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +0100796}
797
798static int __init null_init(void)
799{
Minfei Huangaf096e22015-12-08 13:47:34 -0700800 int ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100801 unsigned int i;
Minfei Huangaf096e22015-12-08 13:47:34 -0700802 struct nullb *nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100803
Raghavendra K T9967d8a2014-01-21 16:59:59 +0530804 if (bs > PAGE_SIZE) {
805 pr_warn("null_blk: invalid block size\n");
806 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
807 bs = PAGE_SIZE;
808 }
Jens Axboef2298c02013-10-25 11:52:25 +0100809
Matias Bjørling6bb95352015-11-19 12:50:08 +0100810 if (use_lightnvm && bs != 4096) {
811 pr_warn("null_blk: LightNVM only supports 4k block size\n");
812 pr_warn("null_blk: defaults block size to 4k\n");
813 bs = 4096;
814 }
815
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100816 if (use_lightnvm && queue_mode != NULL_Q_MQ) {
817 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
818 pr_warn("null_blk: defaults queue mode to blk-mq\n");
819 queue_mode = NULL_Q_MQ;
820 }
821
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100822 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100823 if (submit_queues < nr_online_nodes) {
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100824 pr_warn("null_blk: submit_queues param is set to %u.",
825 nr_online_nodes);
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100826 submit_queues = nr_online_nodes;
827 }
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100828 } else if (submit_queues > nr_cpu_ids)
Jens Axboef2298c02013-10-25 11:52:25 +0100829 submit_queues = nr_cpu_ids;
830 else if (!submit_queues)
831 submit_queues = 1;
832
833 mutex_init(&lock);
834
Jens Axboef2298c02013-10-25 11:52:25 +0100835 null_major = register_blkdev(0, "nullb");
836 if (null_major < 0)
837 return null_major;
838
Matias Bjørling6bb95352015-11-19 12:50:08 +0100839 if (use_lightnvm) {
840 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
841 0, 0, NULL);
842 if (!ppa_cache) {
843 pr_err("null_blk: unable to create ppa cache\n");
Minfei Huangaf096e22015-12-08 13:47:34 -0700844 ret = -ENOMEM;
Matias Bjørling6bb95352015-11-19 12:50:08 +0100845 goto err_ppa;
Jens Axboef2298c02013-10-25 11:52:25 +0100846 }
847 }
848
Minfei Huangaf096e22015-12-08 13:47:34 -0700849 for (i = 0; i < nr_devices; i++) {
850 ret = null_add_dev();
851 if (ret)
852 goto err_dev;
853 }
854
Jens Axboef2298c02013-10-25 11:52:25 +0100855 pr_info("null: module loaded\n");
856 return 0;
Minfei Huangaf096e22015-12-08 13:47:34 -0700857
858err_dev:
859 while (!list_empty(&nullb_list)) {
860 nullb = list_entry(nullb_list.next, struct nullb, list);
861 null_del_dev(nullb);
862 }
Matias Bjørling6bb95352015-11-19 12:50:08 +0100863 kmem_cache_destroy(ppa_cache);
Minfei Huangaf096e22015-12-08 13:47:34 -0700864err_ppa:
865 unregister_blkdev(null_major, "nullb");
866 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +0100867}
868
869static void __exit null_exit(void)
870{
871 struct nullb *nullb;
872
873 unregister_blkdev(null_major, "nullb");
874
875 mutex_lock(&lock);
876 while (!list_empty(&nullb_list)) {
877 nullb = list_entry(nullb_list.next, struct nullb, list);
878 null_del_dev(nullb);
879 }
880 mutex_unlock(&lock);
Matias Bjørling6bb95352015-11-19 12:50:08 +0100881
882 kmem_cache_destroy(ppa_cache);
Jens Axboef2298c02013-10-25 11:52:25 +0100883}
884
885module_init(null_init);
886module_exit(null_exit);
887
888MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
889MODULE_LICENSE("GPL");