Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1 | #include <linux/module.h> |
Matias Bjørling | fc1bc35 | 2013-12-21 00:11:01 +0100 | [diff] [blame] | 2 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 3 | #include <linux/moduleparam.h> |
| 4 | #include <linux/sched.h> |
| 5 | #include <linux/fs.h> |
| 6 | #include <linux/blkdev.h> |
| 7 | #include <linux/init.h> |
| 8 | #include <linux/slab.h> |
| 9 | #include <linux/blk-mq.h> |
| 10 | #include <linux/hrtimer.h> |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 11 | #include <linux/lightnvm.h> |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 12 | |
| 13 | struct nullb_cmd { |
| 14 | struct list_head list; |
| 15 | struct llist_node ll_list; |
| 16 | struct call_single_data csd; |
| 17 | struct request *rq; |
| 18 | struct bio *bio; |
| 19 | unsigned int tag; |
| 20 | struct nullb_queue *nq; |
Paolo Valente | 3c395a9 | 2015-12-01 11:48:17 +0100 | [diff] [blame] | 21 | struct hrtimer timer; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 22 | }; |
| 23 | |
| 24 | struct nullb_queue { |
| 25 | unsigned long *tag_map; |
| 26 | wait_queue_head_t wait; |
| 27 | unsigned int queue_depth; |
| 28 | |
| 29 | struct nullb_cmd *cmds; |
| 30 | }; |
| 31 | |
| 32 | struct nullb { |
| 33 | struct list_head list; |
| 34 | unsigned int index; |
| 35 | struct request_queue *q; |
| 36 | struct gendisk *disk; |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 37 | struct nvm_dev *ndev; |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 38 | struct blk_mq_tag_set *tag_set; |
| 39 | struct blk_mq_tag_set __tag_set; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 40 | struct hrtimer timer; |
| 41 | unsigned int queue_depth; |
| 42 | spinlock_t lock; |
| 43 | |
| 44 | struct nullb_queue *queues; |
| 45 | unsigned int nr_queues; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 46 | char disk_name[DISK_NAME_LEN]; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 47 | }; |
| 48 | |
| 49 | static LIST_HEAD(nullb_list); |
| 50 | static struct mutex lock; |
| 51 | static int null_major; |
| 52 | static int nullb_indexes; |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 53 | static struct kmem_cache *ppa_cache; |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 54 | static struct blk_mq_tag_set tag_set; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 55 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 56 | enum { |
| 57 | NULL_IRQ_NONE = 0, |
| 58 | NULL_IRQ_SOFTIRQ = 1, |
| 59 | NULL_IRQ_TIMER = 2, |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 60 | }; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 61 | |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 62 | enum { |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 63 | NULL_Q_BIO = 0, |
| 64 | NULL_Q_RQ = 1, |
| 65 | NULL_Q_MQ = 2, |
| 66 | }; |
| 67 | |
Matias Bjorling | 2d263a78 | 2013-12-18 13:41:43 +0100 | [diff] [blame] | 68 | static int submit_queues; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 69 | module_param(submit_queues, int, S_IRUGO); |
| 70 | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); |
| 71 | |
| 72 | static int home_node = NUMA_NO_NODE; |
| 73 | module_param(home_node, int, S_IRUGO); |
| 74 | MODULE_PARM_DESC(home_node, "Home node for the device"); |
| 75 | |
| 76 | static int queue_mode = NULL_Q_MQ; |
Matias Bjorling | 709c866 | 2014-11-26 14:45:48 -0700 | [diff] [blame] | 77 | |
| 78 | static int null_param_store_val(const char *str, int *val, int min, int max) |
| 79 | { |
| 80 | int ret, new_val; |
| 81 | |
| 82 | ret = kstrtoint(str, 10, &new_val); |
| 83 | if (ret) |
| 84 | return -EINVAL; |
| 85 | |
| 86 | if (new_val < min || new_val > max) |
| 87 | return -EINVAL; |
| 88 | |
| 89 | *val = new_val; |
| 90 | return 0; |
| 91 | } |
| 92 | |
| 93 | static int null_set_queue_mode(const char *str, const struct kernel_param *kp) |
| 94 | { |
| 95 | return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ); |
| 96 | } |
| 97 | |
Luis R. Rodriguez | 9c27847 | 2015-05-27 11:09:38 +0930 | [diff] [blame] | 98 | static const struct kernel_param_ops null_queue_mode_param_ops = { |
Matias Bjorling | 709c866 | 2014-11-26 14:45:48 -0700 | [diff] [blame] | 99 | .set = null_set_queue_mode, |
| 100 | .get = param_get_int, |
| 101 | }; |
| 102 | |
| 103 | device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO); |
Mike Snitzer | 54ae81c | 2014-06-11 17:13:50 -0400 | [diff] [blame] | 104 | MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)"); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 105 | |
| 106 | static int gb = 250; |
| 107 | module_param(gb, int, S_IRUGO); |
| 108 | MODULE_PARM_DESC(gb, "Size in GB"); |
| 109 | |
| 110 | static int bs = 512; |
| 111 | module_param(bs, int, S_IRUGO); |
| 112 | MODULE_PARM_DESC(bs, "Block size (in bytes)"); |
| 113 | |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 114 | static int nr_devices = 1; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 115 | module_param(nr_devices, int, S_IRUGO); |
| 116 | MODULE_PARM_DESC(nr_devices, "Number of devices to register"); |
| 117 | |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 118 | static bool use_lightnvm; |
| 119 | module_param(use_lightnvm, bool, S_IRUGO); |
| 120 | MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device"); |
| 121 | |
Jens Axboe | db5bcf8 | 2017-03-30 13:44:26 -0600 | [diff] [blame] | 122 | static bool blocking; |
| 123 | module_param(blocking, bool, S_IRUGO); |
| 124 | MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device"); |
| 125 | |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 126 | static bool shared_tags; |
| 127 | module_param(shared_tags, bool, S_IRUGO); |
| 128 | MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq"); |
| 129 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 130 | static int irqmode = NULL_IRQ_SOFTIRQ; |
Matias Bjorling | 709c866 | 2014-11-26 14:45:48 -0700 | [diff] [blame] | 131 | |
| 132 | static int null_set_irqmode(const char *str, const struct kernel_param *kp) |
| 133 | { |
| 134 | return null_param_store_val(str, &irqmode, NULL_IRQ_NONE, |
| 135 | NULL_IRQ_TIMER); |
| 136 | } |
| 137 | |
Luis R. Rodriguez | 9c27847 | 2015-05-27 11:09:38 +0930 | [diff] [blame] | 138 | static const struct kernel_param_ops null_irqmode_param_ops = { |
Matias Bjorling | 709c866 | 2014-11-26 14:45:48 -0700 | [diff] [blame] | 139 | .set = null_set_irqmode, |
| 140 | .get = param_get_int, |
| 141 | }; |
| 142 | |
| 143 | device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 144 | MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); |
| 145 | |
Arianna Avanzini | dbac117 | 2015-12-01 11:48:19 +0100 | [diff] [blame] | 146 | static unsigned long completion_nsec = 10000; |
| 147 | module_param(completion_nsec, ulong, S_IRUGO); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 148 | MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); |
| 149 | |
| 150 | static int hw_queue_depth = 64; |
| 151 | module_param(hw_queue_depth, int, S_IRUGO); |
| 152 | MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); |
| 153 | |
Matias Bjørling | 2000524 | 2013-12-21 00:11:00 +0100 | [diff] [blame] | 154 | static bool use_per_node_hctx = false; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 155 | module_param(use_per_node_hctx, bool, S_IRUGO); |
Matias Bjørling | 2000524 | 2013-12-21 00:11:00 +0100 | [diff] [blame] | 156 | MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 157 | |
| 158 | static void put_tag(struct nullb_queue *nq, unsigned int tag) |
| 159 | { |
| 160 | clear_bit_unlock(tag, nq->tag_map); |
| 161 | |
| 162 | if (waitqueue_active(&nq->wait)) |
| 163 | wake_up(&nq->wait); |
| 164 | } |
| 165 | |
| 166 | static unsigned int get_tag(struct nullb_queue *nq) |
| 167 | { |
| 168 | unsigned int tag; |
| 169 | |
| 170 | do { |
| 171 | tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); |
| 172 | if (tag >= nq->queue_depth) |
| 173 | return -1U; |
| 174 | } while (test_and_set_bit_lock(tag, nq->tag_map)); |
| 175 | |
| 176 | return tag; |
| 177 | } |
| 178 | |
| 179 | static void free_cmd(struct nullb_cmd *cmd) |
| 180 | { |
| 181 | put_tag(cmd->nq, cmd->tag); |
| 182 | } |
| 183 | |
Paolo Valente | 3c395a9 | 2015-12-01 11:48:17 +0100 | [diff] [blame] | 184 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer); |
| 185 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 186 | static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) |
| 187 | { |
| 188 | struct nullb_cmd *cmd; |
| 189 | unsigned int tag; |
| 190 | |
| 191 | tag = get_tag(nq); |
| 192 | if (tag != -1U) { |
| 193 | cmd = &nq->cmds[tag]; |
| 194 | cmd->tag = tag; |
| 195 | cmd->nq = nq; |
Paolo Valente | 3c395a9 | 2015-12-01 11:48:17 +0100 | [diff] [blame] | 196 | if (irqmode == NULL_IRQ_TIMER) { |
| 197 | hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, |
| 198 | HRTIMER_MODE_REL); |
| 199 | cmd->timer.function = null_cmd_timer_expired; |
| 200 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 201 | return cmd; |
| 202 | } |
| 203 | |
| 204 | return NULL; |
| 205 | } |
| 206 | |
| 207 | static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) |
| 208 | { |
| 209 | struct nullb_cmd *cmd; |
| 210 | DEFINE_WAIT(wait); |
| 211 | |
| 212 | cmd = __alloc_cmd(nq); |
| 213 | if (cmd || !can_wait) |
| 214 | return cmd; |
| 215 | |
| 216 | do { |
| 217 | prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); |
| 218 | cmd = __alloc_cmd(nq); |
| 219 | if (cmd) |
| 220 | break; |
| 221 | |
| 222 | io_schedule(); |
| 223 | } while (1); |
| 224 | |
| 225 | finish_wait(&nq->wait, &wait); |
| 226 | return cmd; |
| 227 | } |
| 228 | |
| 229 | static void end_cmd(struct nullb_cmd *cmd) |
| 230 | { |
Arianna Avanzini | cf8ecc5 | 2015-12-01 11:48:18 +0100 | [diff] [blame] | 231 | struct request_queue *q = NULL; |
| 232 | |
Mike Krinkin | e827120 | 2015-12-15 12:56:40 +0300 | [diff] [blame] | 233 | if (cmd->rq) |
| 234 | q = cmd->rq->q; |
| 235 | |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 236 | switch (queue_mode) { |
| 237 | case NULL_Q_MQ: |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 238 | blk_mq_end_request(cmd->rq, BLK_STS_OK); |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 239 | return; |
| 240 | case NULL_Q_RQ: |
| 241 | INIT_LIST_HEAD(&cmd->rq->queuelist); |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 242 | blk_end_request_all(cmd->rq, BLK_STS_OK); |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 243 | break; |
| 244 | case NULL_Q_BIO: |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 245 | bio_endio(cmd->bio); |
Jens Axboe | 48cc661 | 2015-12-28 13:02:47 -0700 | [diff] [blame] | 246 | break; |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 247 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 248 | |
Jens Axboe | 48cc661 | 2015-12-28 13:02:47 -0700 | [diff] [blame] | 249 | free_cmd(cmd); |
| 250 | |
Arianna Avanzini | cf8ecc5 | 2015-12-01 11:48:18 +0100 | [diff] [blame] | 251 | /* Restart queue if needed, as we are freeing a tag */ |
Jens Axboe | 48cc661 | 2015-12-28 13:02:47 -0700 | [diff] [blame] | 252 | if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) { |
Arianna Avanzini | cf8ecc5 | 2015-12-01 11:48:18 +0100 | [diff] [blame] | 253 | unsigned long flags; |
| 254 | |
| 255 | spin_lock_irqsave(q->queue_lock, flags); |
Jens Axboe | 48cc661 | 2015-12-28 13:02:47 -0700 | [diff] [blame] | 256 | blk_start_queue_async(q); |
Arianna Avanzini | cf8ecc5 | 2015-12-01 11:48:18 +0100 | [diff] [blame] | 257 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 258 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 259 | } |
| 260 | |
| 261 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) |
| 262 | { |
Arianna Avanzini | cf8ecc5 | 2015-12-01 11:48:18 +0100 | [diff] [blame] | 263 | end_cmd(container_of(timer, struct nullb_cmd, timer)); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 264 | |
| 265 | return HRTIMER_NORESTART; |
| 266 | } |
| 267 | |
| 268 | static void null_cmd_end_timer(struct nullb_cmd *cmd) |
| 269 | { |
Thomas Gleixner | 8b0e195 | 2016-12-25 12:30:41 +0100 | [diff] [blame] | 270 | ktime_t kt = completion_nsec; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 271 | |
Paolo Valente | 3c395a9 | 2015-12-01 11:48:17 +0100 | [diff] [blame] | 272 | hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 273 | } |
| 274 | |
| 275 | static void null_softirq_done_fn(struct request *rq) |
| 276 | { |
Jens Axboe | d891fa7 | 2014-06-16 11:40:25 -0600 | [diff] [blame] | 277 | if (queue_mode == NULL_Q_MQ) |
| 278 | end_cmd(blk_mq_rq_to_pdu(rq)); |
| 279 | else |
| 280 | end_cmd(rq->special); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 281 | } |
| 282 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 283 | static inline void null_handle_cmd(struct nullb_cmd *cmd) |
| 284 | { |
| 285 | /* Complete IO by inline, softirq or timer */ |
| 286 | switch (irqmode) { |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 287 | case NULL_IRQ_SOFTIRQ: |
| 288 | switch (queue_mode) { |
| 289 | case NULL_Q_MQ: |
Christoph Hellwig | 08e0029 | 2017-04-20 16:03:09 +0200 | [diff] [blame] | 290 | blk_mq_complete_request(cmd->rq); |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 291 | break; |
| 292 | case NULL_Q_RQ: |
| 293 | blk_complete_request(cmd->rq); |
| 294 | break; |
| 295 | case NULL_Q_BIO: |
| 296 | /* |
| 297 | * XXX: no proper submitting cpu information available. |
| 298 | */ |
| 299 | end_cmd(cmd); |
| 300 | break; |
| 301 | } |
| 302 | break; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 303 | case NULL_IRQ_NONE: |
| 304 | end_cmd(cmd); |
| 305 | break; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 306 | case NULL_IRQ_TIMER: |
| 307 | null_cmd_end_timer(cmd); |
| 308 | break; |
| 309 | } |
| 310 | } |
| 311 | |
| 312 | static struct nullb_queue *nullb_to_queue(struct nullb *nullb) |
| 313 | { |
| 314 | int index = 0; |
| 315 | |
| 316 | if (nullb->nr_queues != 1) |
| 317 | index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); |
| 318 | |
| 319 | return &nullb->queues[index]; |
| 320 | } |
| 321 | |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 322 | static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 323 | { |
| 324 | struct nullb *nullb = q->queuedata; |
| 325 | struct nullb_queue *nq = nullb_to_queue(nullb); |
| 326 | struct nullb_cmd *cmd; |
| 327 | |
| 328 | cmd = alloc_cmd(nq, 1); |
| 329 | cmd->bio = bio; |
| 330 | |
| 331 | null_handle_cmd(cmd); |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 332 | return BLK_QC_T_NONE; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 333 | } |
| 334 | |
| 335 | static int null_rq_prep_fn(struct request_queue *q, struct request *req) |
| 336 | { |
| 337 | struct nullb *nullb = q->queuedata; |
| 338 | struct nullb_queue *nq = nullb_to_queue(nullb); |
| 339 | struct nullb_cmd *cmd; |
| 340 | |
| 341 | cmd = alloc_cmd(nq, 0); |
| 342 | if (cmd) { |
| 343 | cmd->rq = req; |
| 344 | req->special = cmd; |
| 345 | return BLKPREP_OK; |
| 346 | } |
Akinobu Mita | 8b70f45 | 2015-06-02 08:35:10 +0900 | [diff] [blame] | 347 | blk_stop_queue(q); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 348 | |
| 349 | return BLKPREP_DEFER; |
| 350 | } |
| 351 | |
| 352 | static void null_request_fn(struct request_queue *q) |
| 353 | { |
| 354 | struct request *rq; |
| 355 | |
| 356 | while ((rq = blk_fetch_request(q)) != NULL) { |
| 357 | struct nullb_cmd *cmd = rq->special; |
| 358 | |
| 359 | spin_unlock_irq(q->queue_lock); |
| 360 | null_handle_cmd(cmd); |
| 361 | spin_lock_irq(q->queue_lock); |
| 362 | } |
| 363 | } |
| 364 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 365 | static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 366 | const struct blk_mq_queue_data *bd) |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 367 | { |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 368 | struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 369 | |
Jens Axboe | db5bcf8 | 2017-03-30 13:44:26 -0600 | [diff] [blame] | 370 | might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); |
| 371 | |
Paolo Valente | 3c395a9 | 2015-12-01 11:48:17 +0100 | [diff] [blame] | 372 | if (irqmode == NULL_IRQ_TIMER) { |
| 373 | hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 374 | cmd->timer.function = null_cmd_timer_expired; |
| 375 | } |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 376 | cmd->rq = bd->rq; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 377 | cmd->nq = hctx->driver_data; |
| 378 | |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 379 | blk_mq_start_request(bd->rq); |
Christoph Hellwig | e249007 | 2014-09-13 16:40:09 -0700 | [diff] [blame] | 380 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 381 | null_handle_cmd(cmd); |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 382 | return BLK_STS_OK; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 383 | } |
| 384 | |
Eric Biggers | f363b08 | 2017-03-30 13:39:16 -0700 | [diff] [blame] | 385 | static const struct blk_mq_ops null_mq_ops = { |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 386 | .queue_rq = null_queue_rq, |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 387 | .complete = null_softirq_done_fn, |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 388 | }; |
| 389 | |
Matias Bjørling | de65d2d | 2015-08-31 14:17:18 +0200 | [diff] [blame] | 390 | static void cleanup_queue(struct nullb_queue *nq) |
| 391 | { |
| 392 | kfree(nq->tag_map); |
| 393 | kfree(nq->cmds); |
| 394 | } |
| 395 | |
| 396 | static void cleanup_queues(struct nullb *nullb) |
| 397 | { |
| 398 | int i; |
| 399 | |
| 400 | for (i = 0; i < nullb->nr_queues; i++) |
| 401 | cleanup_queue(&nullb->queues[i]); |
| 402 | |
| 403 | kfree(nullb->queues); |
| 404 | } |
| 405 | |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 406 | #ifdef CONFIG_NVM |
| 407 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 408 | static void null_lnvm_end_io(struct request *rq, blk_status_t status) |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 409 | { |
| 410 | struct nvm_rq *rqd = rq->end_io_data; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 411 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 412 | /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */ |
| 413 | rqd->error = status ? -EIO : 0; |
Matias Bjørling | 06894ef | 2017-01-31 13:17:17 +0100 | [diff] [blame] | 414 | nvm_end_io(rqd); |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 415 | |
| 416 | blk_put_request(rq); |
| 417 | } |
| 418 | |
Matias Bjørling | 16f26c3 | 2015-12-06 11:25:48 +0100 | [diff] [blame] | 419 | static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 420 | { |
Matias Bjørling | 16f26c3 | 2015-12-06 11:25:48 +0100 | [diff] [blame] | 421 | struct request_queue *q = dev->q; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 422 | struct request *rq; |
| 423 | struct bio *bio = rqd->bio; |
| 424 | |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 425 | rq = blk_mq_alloc_request(q, |
| 426 | op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 427 | if (IS_ERR(rq)) |
| 428 | return -ENOMEM; |
| 429 | |
Bart Van Assche | 2644a3c | 2017-04-19 14:01:25 -0700 | [diff] [blame] | 430 | blk_init_request_from_bio(rq, bio); |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 431 | |
| 432 | rq->end_io_data = rqd; |
| 433 | |
| 434 | blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io); |
| 435 | |
| 436 | return 0; |
| 437 | } |
| 438 | |
Matias Bjørling | 16f26c3 | 2015-12-06 11:25:48 +0100 | [diff] [blame] | 439 | static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 440 | { |
| 441 | sector_t size = gb * 1024 * 1024 * 1024ULL; |
Matias Bjørling | 5b40db9 | 2015-11-19 12:50:09 +0100 | [diff] [blame] | 442 | sector_t blksize; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 443 | struct nvm_id_group *grp; |
| 444 | |
| 445 | id->ver_id = 0x1; |
| 446 | id->vmnt = 0; |
Matias Bjørling | bf64318 | 2016-02-04 15:13:27 +0100 | [diff] [blame] | 447 | id->cap = 0x2; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 448 | id->dom = 0x1; |
Matias Bjørling | 5b40db9 | 2015-11-19 12:50:09 +0100 | [diff] [blame] | 449 | |
| 450 | id->ppaf.blk_offset = 0; |
| 451 | id->ppaf.blk_len = 16; |
| 452 | id->ppaf.pg_offset = 16; |
| 453 | id->ppaf.pg_len = 16; |
| 454 | id->ppaf.sect_offset = 32; |
| 455 | id->ppaf.sect_len = 8; |
| 456 | id->ppaf.pln_offset = 40; |
| 457 | id->ppaf.pln_len = 8; |
| 458 | id->ppaf.lun_offset = 48; |
| 459 | id->ppaf.lun_len = 8; |
| 460 | id->ppaf.ch_offset = 56; |
| 461 | id->ppaf.ch_len = 8; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 462 | |
Arnd Bergmann | e93d12a | 2016-01-13 23:04:08 +0100 | [diff] [blame] | 463 | sector_div(size, bs); /* convert size to pages */ |
| 464 | size >>= 8; /* concert size to pgs pr blk */ |
Matias Bjørling | 19bd6fe | 2017-01-31 13:17:15 +0100 | [diff] [blame] | 465 | grp = &id->grp; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 466 | grp->mtype = 0; |
Matias Bjørling | 5b40db9 | 2015-11-19 12:50:09 +0100 | [diff] [blame] | 467 | grp->fmtype = 0; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 468 | grp->num_ch = 1; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 469 | grp->num_pg = 256; |
Matias Bjørling | 5b40db9 | 2015-11-19 12:50:09 +0100 | [diff] [blame] | 470 | blksize = size; |
Arnd Bergmann | e93d12a | 2016-01-13 23:04:08 +0100 | [diff] [blame] | 471 | size >>= 16; |
Matias Bjørling | 5b40db9 | 2015-11-19 12:50:09 +0100 | [diff] [blame] | 472 | grp->num_lun = size + 1; |
Arnd Bergmann | e93d12a | 2016-01-13 23:04:08 +0100 | [diff] [blame] | 473 | sector_div(blksize, grp->num_lun); |
Matias Bjørling | 5b40db9 | 2015-11-19 12:50:09 +0100 | [diff] [blame] | 474 | grp->num_blk = blksize; |
| 475 | grp->num_pln = 1; |
| 476 | |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 477 | grp->fpg_sz = bs; |
| 478 | grp->csecs = bs; |
| 479 | grp->trdt = 25000; |
| 480 | grp->trdm = 25000; |
| 481 | grp->tprt = 500000; |
| 482 | grp->tprm = 500000; |
| 483 | grp->tbet = 1500000; |
| 484 | grp->tbem = 1500000; |
| 485 | grp->mpos = 0x010101; /* single plane rwe */ |
| 486 | grp->cpar = hw_queue_depth; |
| 487 | |
| 488 | return 0; |
| 489 | } |
| 490 | |
Matias Bjørling | 16f26c3 | 2015-12-06 11:25:48 +0100 | [diff] [blame] | 491 | static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name) |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 492 | { |
| 493 | mempool_t *virtmem_pool; |
| 494 | |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 495 | virtmem_pool = mempool_create_slab_pool(64, ppa_cache); |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 496 | if (!virtmem_pool) { |
| 497 | pr_err("null_blk: Unable to create virtual memory pool\n"); |
| 498 | return NULL; |
| 499 | } |
| 500 | |
| 501 | return virtmem_pool; |
| 502 | } |
| 503 | |
| 504 | static void null_lnvm_destroy_dma_pool(void *pool) |
| 505 | { |
| 506 | mempool_destroy(pool); |
| 507 | } |
| 508 | |
Matias Bjørling | 16f26c3 | 2015-12-06 11:25:48 +0100 | [diff] [blame] | 509 | static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool, |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 510 | gfp_t mem_flags, dma_addr_t *dma_handler) |
| 511 | { |
| 512 | return mempool_alloc(pool, mem_flags); |
| 513 | } |
| 514 | |
| 515 | static void null_lnvm_dev_dma_free(void *pool, void *entry, |
| 516 | dma_addr_t dma_handler) |
| 517 | { |
| 518 | mempool_free(entry, pool); |
| 519 | } |
| 520 | |
| 521 | static struct nvm_dev_ops null_lnvm_dev_ops = { |
| 522 | .identity = null_lnvm_id, |
| 523 | .submit_io = null_lnvm_submit_io, |
| 524 | |
| 525 | .create_dma_pool = null_lnvm_create_dma_pool, |
| 526 | .destroy_dma_pool = null_lnvm_destroy_dma_pool, |
| 527 | .dev_dma_alloc = null_lnvm_dev_dma_alloc, |
| 528 | .dev_dma_free = null_lnvm_dev_dma_free, |
| 529 | |
| 530 | /* Simulate nvme protocol restriction */ |
| 531 | .max_phys_sect = 64, |
| 532 | }; |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 533 | |
| 534 | static int null_nvm_register(struct nullb *nullb) |
| 535 | { |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 536 | struct nvm_dev *dev; |
| 537 | int rv; |
| 538 | |
| 539 | dev = nvm_alloc_dev(0); |
| 540 | if (!dev) |
| 541 | return -ENOMEM; |
| 542 | |
| 543 | dev->q = nullb->q; |
| 544 | memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN); |
| 545 | dev->ops = &null_lnvm_dev_ops; |
| 546 | |
| 547 | rv = nvm_register(dev); |
| 548 | if (rv) { |
| 549 | kfree(dev); |
| 550 | return rv; |
| 551 | } |
| 552 | nullb->ndev = dev; |
| 553 | return 0; |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 554 | } |
| 555 | |
| 556 | static void null_nvm_unregister(struct nullb *nullb) |
| 557 | { |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 558 | nvm_unregister(nullb->ndev); |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 559 | } |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 560 | #else |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 561 | static int null_nvm_register(struct nullb *nullb) |
| 562 | { |
Yasuaki Ishimatsu | 92153d3 | 2016-11-16 08:26:11 -0700 | [diff] [blame] | 563 | pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n"); |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 564 | return -EINVAL; |
| 565 | } |
| 566 | static void null_nvm_unregister(struct nullb *nullb) {} |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 567 | #endif /* CONFIG_NVM */ |
| 568 | |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 569 | static void null_del_dev(struct nullb *nullb) |
| 570 | { |
| 571 | list_del_init(&nullb->list); |
| 572 | |
| 573 | if (use_lightnvm) |
| 574 | null_nvm_unregister(nullb); |
| 575 | else |
| 576 | del_gendisk(nullb->disk); |
| 577 | blk_cleanup_queue(nullb->q); |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 578 | if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) |
| 579 | blk_mq_free_tag_set(nullb->tag_set); |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 580 | if (!use_lightnvm) |
| 581 | put_disk(nullb->disk); |
| 582 | cleanup_queues(nullb); |
| 583 | kfree(nullb); |
| 584 | } |
| 585 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 586 | static int null_open(struct block_device *bdev, fmode_t mode) |
| 587 | { |
| 588 | return 0; |
| 589 | } |
| 590 | |
| 591 | static void null_release(struct gendisk *disk, fmode_t mode) |
| 592 | { |
| 593 | } |
| 594 | |
| 595 | static const struct block_device_operations null_fops = { |
| 596 | .owner = THIS_MODULE, |
| 597 | .open = null_open, |
| 598 | .release = null_release, |
| 599 | }; |
| 600 | |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 601 | static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) |
| 602 | { |
| 603 | BUG_ON(!nullb); |
| 604 | BUG_ON(!nq); |
| 605 | |
| 606 | init_waitqueue_head(&nq->wait); |
| 607 | nq->queue_depth = nullb->queue_depth; |
| 608 | } |
| 609 | |
| 610 | static void null_init_queues(struct nullb *nullb) |
| 611 | { |
| 612 | struct request_queue *q = nullb->q; |
| 613 | struct blk_mq_hw_ctx *hctx; |
| 614 | struct nullb_queue *nq; |
| 615 | int i; |
| 616 | |
| 617 | queue_for_each_hw_ctx(q, hctx, i) { |
| 618 | if (!hctx->nr_ctx || !hctx->tags) |
| 619 | continue; |
| 620 | nq = &nullb->queues[i]; |
| 621 | hctx->driver_data = nq; |
| 622 | null_init_queue(nullb, nq); |
| 623 | nullb->nr_queues++; |
| 624 | } |
| 625 | } |
| 626 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 627 | static int setup_commands(struct nullb_queue *nq) |
| 628 | { |
| 629 | struct nullb_cmd *cmd; |
| 630 | int i, tag_size; |
| 631 | |
| 632 | nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); |
| 633 | if (!nq->cmds) |
Matias Bjorling | 2d263a78 | 2013-12-18 13:41:43 +0100 | [diff] [blame] | 634 | return -ENOMEM; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 635 | |
| 636 | tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; |
| 637 | nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); |
| 638 | if (!nq->tag_map) { |
| 639 | kfree(nq->cmds); |
Matias Bjorling | 2d263a78 | 2013-12-18 13:41:43 +0100 | [diff] [blame] | 640 | return -ENOMEM; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 641 | } |
| 642 | |
| 643 | for (i = 0; i < nq->queue_depth; i++) { |
| 644 | cmd = &nq->cmds[i]; |
| 645 | INIT_LIST_HEAD(&cmd->list); |
| 646 | cmd->ll_list.next = NULL; |
| 647 | cmd->tag = -1U; |
| 648 | } |
| 649 | |
| 650 | return 0; |
| 651 | } |
| 652 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 653 | static int setup_queues(struct nullb *nullb) |
| 654 | { |
Matias Bjorling | 2d263a78 | 2013-12-18 13:41:43 +0100 | [diff] [blame] | 655 | nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), |
| 656 | GFP_KERNEL); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 657 | if (!nullb->queues) |
Matias Bjorling | 2d263a78 | 2013-12-18 13:41:43 +0100 | [diff] [blame] | 658 | return -ENOMEM; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 659 | |
| 660 | nullb->nr_queues = 0; |
| 661 | nullb->queue_depth = hw_queue_depth; |
| 662 | |
Matias Bjorling | 2d263a78 | 2013-12-18 13:41:43 +0100 | [diff] [blame] | 663 | return 0; |
| 664 | } |
| 665 | |
| 666 | static int init_driver_queues(struct nullb *nullb) |
| 667 | { |
| 668 | struct nullb_queue *nq; |
| 669 | int i, ret = 0; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 670 | |
| 671 | for (i = 0; i < submit_queues; i++) { |
| 672 | nq = &nullb->queues[i]; |
Matias Bjorling | 2d263a78 | 2013-12-18 13:41:43 +0100 | [diff] [blame] | 673 | |
| 674 | null_init_queue(nullb, nq); |
| 675 | |
| 676 | ret = setup_commands(nq); |
| 677 | if (ret) |
Jan Kara | 31f9690 | 2014-10-22 15:34:21 +0200 | [diff] [blame] | 678 | return ret; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 679 | nullb->nr_queues++; |
| 680 | } |
Matias Bjorling | 2d263a78 | 2013-12-18 13:41:43 +0100 | [diff] [blame] | 681 | return 0; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 682 | } |
| 683 | |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 684 | static int null_gendisk_register(struct nullb *nullb) |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 685 | { |
| 686 | struct gendisk *disk; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 687 | sector_t size; |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 688 | |
| 689 | disk = nullb->disk = alloc_disk_node(1, home_node); |
| 690 | if (!disk) |
| 691 | return -ENOMEM; |
| 692 | size = gb * 1024 * 1024 * 1024ULL; |
| 693 | set_capacity(disk, size >> 9); |
| 694 | |
| 695 | disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; |
| 696 | disk->major = null_major; |
| 697 | disk->first_minor = nullb->index; |
| 698 | disk->fops = &null_fops; |
| 699 | disk->private_data = nullb; |
| 700 | disk->queue = nullb->q; |
| 701 | strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); |
| 702 | |
| 703 | add_disk(disk); |
| 704 | return 0; |
| 705 | } |
| 706 | |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 707 | static int null_init_tag_set(struct blk_mq_tag_set *set) |
| 708 | { |
| 709 | set->ops = &null_mq_ops; |
| 710 | set->nr_hw_queues = submit_queues; |
| 711 | set->queue_depth = hw_queue_depth; |
| 712 | set->numa_node = home_node; |
| 713 | set->cmd_size = sizeof(struct nullb_cmd); |
| 714 | set->flags = BLK_MQ_F_SHOULD_MERGE; |
| 715 | set->driver_data = NULL; |
| 716 | |
| 717 | if (blocking) |
| 718 | set->flags |= BLK_MQ_F_BLOCKING; |
| 719 | |
| 720 | return blk_mq_alloc_tag_set(set); |
| 721 | } |
| 722 | |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 723 | static int null_add_dev(void) |
| 724 | { |
| 725 | struct nullb *nullb; |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 726 | int rv; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 727 | |
| 728 | nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 729 | if (!nullb) { |
| 730 | rv = -ENOMEM; |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 731 | goto out; |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 732 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 733 | |
| 734 | spin_lock_init(&nullb->lock); |
| 735 | |
Matias Bjorling | 57053d8 | 2013-12-10 16:50:38 +0100 | [diff] [blame] | 736 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) |
| 737 | submit_queues = nr_online_nodes; |
| 738 | |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 739 | rv = setup_queues(nullb); |
| 740 | if (rv) |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 741 | goto out_free_nullb; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 742 | |
| 743 | if (queue_mode == NULL_Q_MQ) { |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 744 | if (shared_tags) { |
| 745 | nullb->tag_set = &tag_set; |
| 746 | rv = 0; |
| 747 | } else { |
| 748 | nullb->tag_set = &nullb->__tag_set; |
| 749 | rv = null_init_tag_set(nullb->tag_set); |
| 750 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 751 | |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 752 | if (rv) |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 753 | goto out_cleanup_queues; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 754 | |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 755 | nullb->q = blk_mq_init_queue(nullb->tag_set); |
Ming Lei | 35b489d | 2015-01-02 14:25:27 +0000 | [diff] [blame] | 756 | if (IS_ERR(nullb->q)) { |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 757 | rv = -ENOMEM; |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 758 | goto out_cleanup_tags; |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 759 | } |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 760 | null_init_queues(nullb); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 761 | } else if (queue_mode == NULL_Q_BIO) { |
| 762 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 763 | if (!nullb->q) { |
| 764 | rv = -ENOMEM; |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 765 | goto out_cleanup_queues; |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 766 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 767 | blk_queue_make_request(nullb->q, null_queue_bio); |
Jan Kara | 31f9690 | 2014-10-22 15:34:21 +0200 | [diff] [blame] | 768 | rv = init_driver_queues(nullb); |
| 769 | if (rv) |
| 770 | goto out_cleanup_blk_queue; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 771 | } else { |
| 772 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 773 | if (!nullb->q) { |
| 774 | rv = -ENOMEM; |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 775 | goto out_cleanup_queues; |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 776 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 777 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 778 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); |
Jan Kara | 31f9690 | 2014-10-22 15:34:21 +0200 | [diff] [blame] | 779 | rv = init_driver_queues(nullb); |
| 780 | if (rv) |
| 781 | goto out_cleanup_blk_queue; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 782 | } |
| 783 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 784 | nullb->q->queuedata = nullb; |
| 785 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); |
Mike Snitzer | b277da0 | 2014-10-04 10:55:32 -0600 | [diff] [blame] | 786 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 787 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 788 | mutex_lock(&lock); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 789 | nullb->index = nullb_indexes++; |
| 790 | mutex_unlock(&lock); |
| 791 | |
| 792 | blk_queue_logical_block_size(nullb->q, bs); |
| 793 | blk_queue_physical_block_size(nullb->q, bs); |
| 794 | |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 795 | sprintf(nullb->disk_name, "nullb%d", nullb->index); |
| 796 | |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 797 | if (use_lightnvm) |
| 798 | rv = null_nvm_register(nullb); |
| 799 | else |
| 800 | rv = null_gendisk_register(nullb); |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 801 | |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 802 | if (rv) |
| 803 | goto out_cleanup_blk_queue; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 804 | |
Matias Bjørling | a514379 | 2016-02-11 14:49:13 +0100 | [diff] [blame] | 805 | mutex_lock(&lock); |
| 806 | list_add_tail(&nullb->list, &nullb_list); |
| 807 | mutex_unlock(&lock); |
Wenwei Tao | 3681c85 | 2016-03-05 00:27:04 +0800 | [diff] [blame] | 808 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 809 | return 0; |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 810 | out_cleanup_blk_queue: |
| 811 | blk_cleanup_queue(nullb->q); |
| 812 | out_cleanup_tags: |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 813 | if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) |
| 814 | blk_mq_free_tag_set(nullb->tag_set); |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 815 | out_cleanup_queues: |
| 816 | cleanup_queues(nullb); |
| 817 | out_free_nullb: |
| 818 | kfree(nullb); |
| 819 | out: |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 820 | return rv; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 821 | } |
| 822 | |
| 823 | static int __init null_init(void) |
| 824 | { |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 825 | int ret = 0; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 826 | unsigned int i; |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 827 | struct nullb *nullb; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 828 | |
Raghavendra K T | 9967d8a | 2014-01-21 16:59:59 +0530 | [diff] [blame] | 829 | if (bs > PAGE_SIZE) { |
| 830 | pr_warn("null_blk: invalid block size\n"); |
| 831 | pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); |
| 832 | bs = PAGE_SIZE; |
| 833 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 834 | |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 835 | if (use_lightnvm && bs != 4096) { |
| 836 | pr_warn("null_blk: LightNVM only supports 4k block size\n"); |
| 837 | pr_warn("null_blk: defaults block size to 4k\n"); |
| 838 | bs = 4096; |
| 839 | } |
| 840 | |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 841 | if (use_lightnvm && queue_mode != NULL_Q_MQ) { |
| 842 | pr_warn("null_blk: LightNVM only supported for blk-mq\n"); |
| 843 | pr_warn("null_blk: defaults queue mode to blk-mq\n"); |
| 844 | queue_mode = NULL_Q_MQ; |
| 845 | } |
| 846 | |
Matias Bjorling | d15ee6b | 2013-12-18 13:41:44 +0100 | [diff] [blame] | 847 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { |
Matias Bjørling | fc1bc35 | 2013-12-21 00:11:01 +0100 | [diff] [blame] | 848 | if (submit_queues < nr_online_nodes) { |
Matias Bjorling | d15ee6b | 2013-12-18 13:41:44 +0100 | [diff] [blame] | 849 | pr_warn("null_blk: submit_queues param is set to %u.", |
| 850 | nr_online_nodes); |
Matias Bjørling | fc1bc35 | 2013-12-21 00:11:01 +0100 | [diff] [blame] | 851 | submit_queues = nr_online_nodes; |
| 852 | } |
Matias Bjorling | d15ee6b | 2013-12-18 13:41:44 +0100 | [diff] [blame] | 853 | } else if (submit_queues > nr_cpu_ids) |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 854 | submit_queues = nr_cpu_ids; |
| 855 | else if (!submit_queues) |
| 856 | submit_queues = 1; |
| 857 | |
Max Gurtovoy | db2d153 | 2017-07-06 18:00:07 +0300 | [diff] [blame^] | 858 | if (queue_mode == NULL_Q_MQ && shared_tags) { |
| 859 | ret = null_init_tag_set(&tag_set); |
| 860 | if (ret) |
| 861 | return ret; |
| 862 | } |
| 863 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 864 | mutex_init(&lock); |
| 865 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 866 | null_major = register_blkdev(0, "nullb"); |
Max Gurtovoy | db2d153 | 2017-07-06 18:00:07 +0300 | [diff] [blame^] | 867 | if (null_major < 0) { |
| 868 | ret = null_major; |
| 869 | goto err_tagset; |
| 870 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 871 | |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 872 | if (use_lightnvm) { |
| 873 | ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), |
| 874 | 0, 0, NULL); |
| 875 | if (!ppa_cache) { |
| 876 | pr_err("null_blk: unable to create ppa cache\n"); |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 877 | ret = -ENOMEM; |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 878 | goto err_ppa; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 879 | } |
| 880 | } |
| 881 | |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 882 | for (i = 0; i < nr_devices; i++) { |
| 883 | ret = null_add_dev(); |
| 884 | if (ret) |
| 885 | goto err_dev; |
| 886 | } |
| 887 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 888 | pr_info("null: module loaded\n"); |
| 889 | return 0; |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 890 | |
| 891 | err_dev: |
| 892 | while (!list_empty(&nullb_list)) { |
| 893 | nullb = list_entry(nullb_list.next, struct nullb, list); |
| 894 | null_del_dev(nullb); |
| 895 | } |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 896 | kmem_cache_destroy(ppa_cache); |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 897 | err_ppa: |
| 898 | unregister_blkdev(null_major, "nullb"); |
Max Gurtovoy | db2d153 | 2017-07-06 18:00:07 +0300 | [diff] [blame^] | 899 | err_tagset: |
| 900 | if (queue_mode == NULL_Q_MQ && shared_tags) |
| 901 | blk_mq_free_tag_set(&tag_set); |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 902 | return ret; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 903 | } |
| 904 | |
| 905 | static void __exit null_exit(void) |
| 906 | { |
| 907 | struct nullb *nullb; |
| 908 | |
| 909 | unregister_blkdev(null_major, "nullb"); |
| 910 | |
| 911 | mutex_lock(&lock); |
| 912 | while (!list_empty(&nullb_list)) { |
| 913 | nullb = list_entry(nullb_list.next, struct nullb, list); |
| 914 | null_del_dev(nullb); |
| 915 | } |
| 916 | mutex_unlock(&lock); |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 917 | |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 918 | if (queue_mode == NULL_Q_MQ && shared_tags) |
| 919 | blk_mq_free_tag_set(&tag_set); |
| 920 | |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 921 | kmem_cache_destroy(ppa_cache); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 922 | } |
| 923 | |
| 924 | module_init(null_init); |
| 925 | module_exit(null_exit); |
| 926 | |
| 927 | MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>"); |
| 928 | MODULE_LICENSE("GPL"); |