Shaohua Li | 3bf2bd2 | 2017-08-14 15:04:53 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and |
| 3 | * Shaohua Li <shli@fb.com> |
| 4 | */ |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 5 | #include <linux/module.h> |
Matias Bjørling | fc1bc35 | 2013-12-21 00:11:01 +0100 | [diff] [blame] | 6 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 7 | #include <linux/moduleparam.h> |
| 8 | #include <linux/sched.h> |
| 9 | #include <linux/fs.h> |
| 10 | #include <linux/blkdev.h> |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/blk-mq.h> |
| 14 | #include <linux/hrtimer.h> |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 15 | #include <linux/lightnvm.h> |
Shaohua Li | 3bf2bd2 | 2017-08-14 15:04:53 -0700 | [diff] [blame^] | 16 | #include <linux/configfs.h> |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 17 | |
| 18 | struct nullb_cmd { |
| 19 | struct list_head list; |
| 20 | struct llist_node ll_list; |
| 21 | struct call_single_data csd; |
| 22 | struct request *rq; |
| 23 | struct bio *bio; |
| 24 | unsigned int tag; |
| 25 | struct nullb_queue *nq; |
Paolo Valente | 3c395a9 | 2015-12-01 11:48:17 +0100 | [diff] [blame] | 26 | struct hrtimer timer; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 27 | }; |
| 28 | |
| 29 | struct nullb_queue { |
| 30 | unsigned long *tag_map; |
| 31 | wait_queue_head_t wait; |
| 32 | unsigned int queue_depth; |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 33 | struct nullb_device *dev; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 34 | |
| 35 | struct nullb_cmd *cmds; |
| 36 | }; |
| 37 | |
Shaohua Li | 3bf2bd2 | 2017-08-14 15:04:53 -0700 | [diff] [blame^] | 38 | /* |
| 39 | * Status flags for nullb_device. |
| 40 | * |
| 41 | * CONFIGURED: Device has been configured and turned on. Cannot reconfigure. |
| 42 | * UP: Device is currently on and visible in userspace. |
| 43 | */ |
| 44 | enum nullb_device_flags { |
| 45 | NULLB_DEV_FL_CONFIGURED = 0, |
| 46 | NULLB_DEV_FL_UP = 1, |
| 47 | }; |
| 48 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 49 | struct nullb_device { |
| 50 | struct nullb *nullb; |
Shaohua Li | 3bf2bd2 | 2017-08-14 15:04:53 -0700 | [diff] [blame^] | 51 | struct config_item item; |
| 52 | unsigned long flags; /* device flags */ |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 53 | |
| 54 | unsigned long size; /* device size in MB */ |
| 55 | unsigned long completion_nsec; /* time in ns to complete a request */ |
| 56 | unsigned int submit_queues; /* number of submission queues */ |
| 57 | unsigned int home_node; /* home node for the device */ |
| 58 | unsigned int queue_mode; /* block interface */ |
| 59 | unsigned int blocksize; /* block size */ |
| 60 | unsigned int irqmode; /* IRQ completion handler */ |
| 61 | unsigned int hw_queue_depth; /* queue depth */ |
| 62 | bool use_lightnvm; /* register as a LightNVM device */ |
| 63 | bool blocking; /* blocking blk-mq device */ |
| 64 | bool use_per_node_hctx; /* use per-node allocation for hardware context */ |
| 65 | }; |
| 66 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 67 | struct nullb { |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 68 | struct nullb_device *dev; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 69 | struct list_head list; |
| 70 | unsigned int index; |
| 71 | struct request_queue *q; |
| 72 | struct gendisk *disk; |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 73 | struct nvm_dev *ndev; |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 74 | struct blk_mq_tag_set *tag_set; |
| 75 | struct blk_mq_tag_set __tag_set; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 76 | struct hrtimer timer; |
| 77 | unsigned int queue_depth; |
| 78 | spinlock_t lock; |
| 79 | |
| 80 | struct nullb_queue *queues; |
| 81 | unsigned int nr_queues; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 82 | char disk_name[DISK_NAME_LEN]; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 83 | }; |
| 84 | |
| 85 | static LIST_HEAD(nullb_list); |
| 86 | static struct mutex lock; |
| 87 | static int null_major; |
| 88 | static int nullb_indexes; |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 89 | static struct kmem_cache *ppa_cache; |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 90 | static struct blk_mq_tag_set tag_set; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 91 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 92 | enum { |
| 93 | NULL_IRQ_NONE = 0, |
| 94 | NULL_IRQ_SOFTIRQ = 1, |
| 95 | NULL_IRQ_TIMER = 2, |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 96 | }; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 97 | |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 98 | enum { |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 99 | NULL_Q_BIO = 0, |
| 100 | NULL_Q_RQ = 1, |
| 101 | NULL_Q_MQ = 2, |
| 102 | }; |
| 103 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 104 | static int g_submit_queues = 1; |
| 105 | module_param_named(submit_queues, g_submit_queues, int, S_IRUGO); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 106 | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); |
| 107 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 108 | static int g_home_node = NUMA_NO_NODE; |
| 109 | module_param_named(home_node, g_home_node, int, S_IRUGO); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 110 | MODULE_PARM_DESC(home_node, "Home node for the device"); |
| 111 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 112 | static int g_queue_mode = NULL_Q_MQ; |
Matias Bjorling | 709c866 | 2014-11-26 14:45:48 -0700 | [diff] [blame] | 113 | |
| 114 | static int null_param_store_val(const char *str, int *val, int min, int max) |
| 115 | { |
| 116 | int ret, new_val; |
| 117 | |
| 118 | ret = kstrtoint(str, 10, &new_val); |
| 119 | if (ret) |
| 120 | return -EINVAL; |
| 121 | |
| 122 | if (new_val < min || new_val > max) |
| 123 | return -EINVAL; |
| 124 | |
| 125 | *val = new_val; |
| 126 | return 0; |
| 127 | } |
| 128 | |
| 129 | static int null_set_queue_mode(const char *str, const struct kernel_param *kp) |
| 130 | { |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 131 | return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ); |
Matias Bjorling | 709c866 | 2014-11-26 14:45:48 -0700 | [diff] [blame] | 132 | } |
| 133 | |
Luis R. Rodriguez | 9c27847 | 2015-05-27 11:09:38 +0930 | [diff] [blame] | 134 | static const struct kernel_param_ops null_queue_mode_param_ops = { |
Matias Bjorling | 709c866 | 2014-11-26 14:45:48 -0700 | [diff] [blame] | 135 | .set = null_set_queue_mode, |
| 136 | .get = param_get_int, |
| 137 | }; |
| 138 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 139 | device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, S_IRUGO); |
Mike Snitzer | 54ae81c | 2014-06-11 17:13:50 -0400 | [diff] [blame] | 140 | MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)"); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 141 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 142 | static int g_gb = 250; |
| 143 | module_param_named(gb, g_gb, int, S_IRUGO); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 144 | MODULE_PARM_DESC(gb, "Size in GB"); |
| 145 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 146 | static int g_bs = 512; |
| 147 | module_param_named(bs, g_bs, int, S_IRUGO); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 148 | MODULE_PARM_DESC(bs, "Block size (in bytes)"); |
| 149 | |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 150 | static int nr_devices = 1; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 151 | module_param(nr_devices, int, S_IRUGO); |
| 152 | MODULE_PARM_DESC(nr_devices, "Number of devices to register"); |
| 153 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 154 | static bool g_use_lightnvm; |
| 155 | module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO); |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 156 | MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device"); |
| 157 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 158 | static bool g_blocking; |
| 159 | module_param_named(blocking, g_blocking, bool, S_IRUGO); |
Jens Axboe | db5bcf8 | 2017-03-30 13:44:26 -0600 | [diff] [blame] | 160 | MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device"); |
| 161 | |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 162 | static bool shared_tags; |
| 163 | module_param(shared_tags, bool, S_IRUGO); |
| 164 | MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq"); |
| 165 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 166 | static int g_irqmode = NULL_IRQ_SOFTIRQ; |
Matias Bjorling | 709c866 | 2014-11-26 14:45:48 -0700 | [diff] [blame] | 167 | |
| 168 | static int null_set_irqmode(const char *str, const struct kernel_param *kp) |
| 169 | { |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 170 | return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE, |
Matias Bjorling | 709c866 | 2014-11-26 14:45:48 -0700 | [diff] [blame] | 171 | NULL_IRQ_TIMER); |
| 172 | } |
| 173 | |
Luis R. Rodriguez | 9c27847 | 2015-05-27 11:09:38 +0930 | [diff] [blame] | 174 | static const struct kernel_param_ops null_irqmode_param_ops = { |
Matias Bjorling | 709c866 | 2014-11-26 14:45:48 -0700 | [diff] [blame] | 175 | .set = null_set_irqmode, |
| 176 | .get = param_get_int, |
| 177 | }; |
| 178 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 179 | device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, S_IRUGO); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 180 | MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); |
| 181 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 182 | static unsigned long g_completion_nsec = 10000; |
| 183 | module_param_named(completion_nsec, g_completion_nsec, ulong, S_IRUGO); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 184 | MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); |
| 185 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 186 | static int g_hw_queue_depth = 64; |
| 187 | module_param_named(hw_queue_depth, g_hw_queue_depth, int, S_IRUGO); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 188 | MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); |
| 189 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 190 | static bool g_use_per_node_hctx; |
| 191 | module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, S_IRUGO); |
Matias Bjørling | 2000524 | 2013-12-21 00:11:00 +0100 | [diff] [blame] | 192 | MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 193 | |
Shaohua Li | 3bf2bd2 | 2017-08-14 15:04:53 -0700 | [diff] [blame^] | 194 | static struct nullb_device *null_alloc_dev(void); |
| 195 | static void null_free_dev(struct nullb_device *dev); |
| 196 | |
| 197 | static inline struct nullb_device *to_nullb_device(struct config_item *item) |
| 198 | { |
| 199 | return item ? container_of(item, struct nullb_device, item) : NULL; |
| 200 | } |
| 201 | |
| 202 | static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page) |
| 203 | { |
| 204 | return snprintf(page, PAGE_SIZE, "%u\n", val); |
| 205 | } |
| 206 | |
| 207 | static inline ssize_t nullb_device_ulong_attr_show(unsigned long val, |
| 208 | char *page) |
| 209 | { |
| 210 | return snprintf(page, PAGE_SIZE, "%lu\n", val); |
| 211 | } |
| 212 | |
| 213 | static inline ssize_t nullb_device_bool_attr_show(bool val, char *page) |
| 214 | { |
| 215 | return snprintf(page, PAGE_SIZE, "%u\n", val); |
| 216 | } |
| 217 | |
| 218 | static ssize_t nullb_device_uint_attr_store(unsigned int *val, |
| 219 | const char *page, size_t count) |
| 220 | { |
| 221 | unsigned int tmp; |
| 222 | int result; |
| 223 | |
| 224 | result = kstrtouint(page, 0, &tmp); |
| 225 | if (result) |
| 226 | return result; |
| 227 | |
| 228 | *val = tmp; |
| 229 | return count; |
| 230 | } |
| 231 | |
| 232 | static ssize_t nullb_device_ulong_attr_store(unsigned long *val, |
| 233 | const char *page, size_t count) |
| 234 | { |
| 235 | int result; |
| 236 | unsigned long tmp; |
| 237 | |
| 238 | result = kstrtoul(page, 0, &tmp); |
| 239 | if (result) |
| 240 | return result; |
| 241 | |
| 242 | *val = tmp; |
| 243 | return count; |
| 244 | } |
| 245 | |
| 246 | static ssize_t nullb_device_bool_attr_store(bool *val, const char *page, |
| 247 | size_t count) |
| 248 | { |
| 249 | bool tmp; |
| 250 | int result; |
| 251 | |
| 252 | result = kstrtobool(page, &tmp); |
| 253 | if (result) |
| 254 | return result; |
| 255 | |
| 256 | *val = tmp; |
| 257 | return count; |
| 258 | } |
| 259 | |
| 260 | /* The following macro should only be used with TYPE = {uint, ulong, bool}. */ |
| 261 | #define NULLB_DEVICE_ATTR(NAME, TYPE) \ |
| 262 | static ssize_t \ |
| 263 | nullb_device_##NAME##_show(struct config_item *item, char *page) \ |
| 264 | { \ |
| 265 | return nullb_device_##TYPE##_attr_show( \ |
| 266 | to_nullb_device(item)->NAME, page); \ |
| 267 | } \ |
| 268 | static ssize_t \ |
| 269 | nullb_device_##NAME##_store(struct config_item *item, const char *page, \ |
| 270 | size_t count) \ |
| 271 | { \ |
| 272 | if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \ |
| 273 | return -EBUSY; \ |
| 274 | return nullb_device_##TYPE##_attr_store( \ |
| 275 | &to_nullb_device(item)->NAME, page, count); \ |
| 276 | } \ |
| 277 | CONFIGFS_ATTR(nullb_device_, NAME); |
| 278 | |
| 279 | NULLB_DEVICE_ATTR(size, ulong); |
| 280 | NULLB_DEVICE_ATTR(completion_nsec, ulong); |
| 281 | NULLB_DEVICE_ATTR(submit_queues, uint); |
| 282 | NULLB_DEVICE_ATTR(home_node, uint); |
| 283 | NULLB_DEVICE_ATTR(queue_mode, uint); |
| 284 | NULLB_DEVICE_ATTR(blocksize, uint); |
| 285 | NULLB_DEVICE_ATTR(irqmode, uint); |
| 286 | NULLB_DEVICE_ATTR(hw_queue_depth, uint); |
| 287 | NULLB_DEVICE_ATTR(use_lightnvm, bool); |
| 288 | NULLB_DEVICE_ATTR(blocking, bool); |
| 289 | NULLB_DEVICE_ATTR(use_per_node_hctx, bool); |
| 290 | |
| 291 | static struct configfs_attribute *nullb_device_attrs[] = { |
| 292 | &nullb_device_attr_size, |
| 293 | &nullb_device_attr_completion_nsec, |
| 294 | &nullb_device_attr_submit_queues, |
| 295 | &nullb_device_attr_home_node, |
| 296 | &nullb_device_attr_queue_mode, |
| 297 | &nullb_device_attr_blocksize, |
| 298 | &nullb_device_attr_irqmode, |
| 299 | &nullb_device_attr_hw_queue_depth, |
| 300 | &nullb_device_attr_use_lightnvm, |
| 301 | &nullb_device_attr_blocking, |
| 302 | &nullb_device_attr_use_per_node_hctx, |
| 303 | NULL, |
| 304 | }; |
| 305 | |
| 306 | static void nullb_device_release(struct config_item *item) |
| 307 | { |
| 308 | null_free_dev(to_nullb_device(item)); |
| 309 | } |
| 310 | |
| 311 | static struct configfs_item_operations nullb_device_ops = { |
| 312 | .release = nullb_device_release, |
| 313 | }; |
| 314 | |
| 315 | static struct config_item_type nullb_device_type = { |
| 316 | .ct_item_ops = &nullb_device_ops, |
| 317 | .ct_attrs = nullb_device_attrs, |
| 318 | .ct_owner = THIS_MODULE, |
| 319 | }; |
| 320 | |
| 321 | static struct |
| 322 | config_item *nullb_group_make_item(struct config_group *group, const char *name) |
| 323 | { |
| 324 | struct nullb_device *dev; |
| 325 | |
| 326 | dev = null_alloc_dev(); |
| 327 | if (!dev) |
| 328 | return ERR_PTR(-ENOMEM); |
| 329 | |
| 330 | config_item_init_type_name(&dev->item, name, &nullb_device_type); |
| 331 | |
| 332 | return &dev->item; |
| 333 | } |
| 334 | |
| 335 | static void |
| 336 | nullb_group_drop_item(struct config_group *group, struct config_item *item) |
| 337 | { |
| 338 | config_item_put(item); |
| 339 | } |
| 340 | |
| 341 | static ssize_t memb_group_features_show(struct config_item *item, char *page) |
| 342 | { |
| 343 | return snprintf(page, PAGE_SIZE, "\n"); |
| 344 | } |
| 345 | |
| 346 | CONFIGFS_ATTR_RO(memb_group_, features); |
| 347 | |
| 348 | static struct configfs_attribute *nullb_group_attrs[] = { |
| 349 | &memb_group_attr_features, |
| 350 | NULL, |
| 351 | }; |
| 352 | |
| 353 | static struct configfs_group_operations nullb_group_ops = { |
| 354 | .make_item = nullb_group_make_item, |
| 355 | .drop_item = nullb_group_drop_item, |
| 356 | }; |
| 357 | |
| 358 | static struct config_item_type nullb_group_type = { |
| 359 | .ct_group_ops = &nullb_group_ops, |
| 360 | .ct_attrs = nullb_group_attrs, |
| 361 | .ct_owner = THIS_MODULE, |
| 362 | }; |
| 363 | |
| 364 | static struct configfs_subsystem nullb_subsys = { |
| 365 | .su_group = { |
| 366 | .cg_item = { |
| 367 | .ci_namebuf = "nullb", |
| 368 | .ci_type = &nullb_group_type, |
| 369 | }, |
| 370 | }, |
| 371 | }; |
| 372 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 373 | static struct nullb_device *null_alloc_dev(void) |
| 374 | { |
| 375 | struct nullb_device *dev; |
| 376 | |
| 377 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
| 378 | if (!dev) |
| 379 | return NULL; |
| 380 | dev->size = g_gb * 1024; |
| 381 | dev->completion_nsec = g_completion_nsec; |
| 382 | dev->submit_queues = g_submit_queues; |
| 383 | dev->home_node = g_home_node; |
| 384 | dev->queue_mode = g_queue_mode; |
| 385 | dev->blocksize = g_bs; |
| 386 | dev->irqmode = g_irqmode; |
| 387 | dev->hw_queue_depth = g_hw_queue_depth; |
| 388 | dev->use_lightnvm = g_use_lightnvm; |
| 389 | dev->blocking = g_blocking; |
| 390 | dev->use_per_node_hctx = g_use_per_node_hctx; |
| 391 | return dev; |
| 392 | } |
| 393 | |
| 394 | static void null_free_dev(struct nullb_device *dev) |
| 395 | { |
| 396 | kfree(dev); |
| 397 | } |
| 398 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 399 | static void put_tag(struct nullb_queue *nq, unsigned int tag) |
| 400 | { |
| 401 | clear_bit_unlock(tag, nq->tag_map); |
| 402 | |
| 403 | if (waitqueue_active(&nq->wait)) |
| 404 | wake_up(&nq->wait); |
| 405 | } |
| 406 | |
| 407 | static unsigned int get_tag(struct nullb_queue *nq) |
| 408 | { |
| 409 | unsigned int tag; |
| 410 | |
| 411 | do { |
| 412 | tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); |
| 413 | if (tag >= nq->queue_depth) |
| 414 | return -1U; |
| 415 | } while (test_and_set_bit_lock(tag, nq->tag_map)); |
| 416 | |
| 417 | return tag; |
| 418 | } |
| 419 | |
| 420 | static void free_cmd(struct nullb_cmd *cmd) |
| 421 | { |
| 422 | put_tag(cmd->nq, cmd->tag); |
| 423 | } |
| 424 | |
Paolo Valente | 3c395a9 | 2015-12-01 11:48:17 +0100 | [diff] [blame] | 425 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer); |
| 426 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 427 | static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) |
| 428 | { |
| 429 | struct nullb_cmd *cmd; |
| 430 | unsigned int tag; |
| 431 | |
| 432 | tag = get_tag(nq); |
| 433 | if (tag != -1U) { |
| 434 | cmd = &nq->cmds[tag]; |
| 435 | cmd->tag = tag; |
| 436 | cmd->nq = nq; |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 437 | if (nq->dev->irqmode == NULL_IRQ_TIMER) { |
Paolo Valente | 3c395a9 | 2015-12-01 11:48:17 +0100 | [diff] [blame] | 438 | hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, |
| 439 | HRTIMER_MODE_REL); |
| 440 | cmd->timer.function = null_cmd_timer_expired; |
| 441 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 442 | return cmd; |
| 443 | } |
| 444 | |
| 445 | return NULL; |
| 446 | } |
| 447 | |
| 448 | static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) |
| 449 | { |
| 450 | struct nullb_cmd *cmd; |
| 451 | DEFINE_WAIT(wait); |
| 452 | |
| 453 | cmd = __alloc_cmd(nq); |
| 454 | if (cmd || !can_wait) |
| 455 | return cmd; |
| 456 | |
| 457 | do { |
| 458 | prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); |
| 459 | cmd = __alloc_cmd(nq); |
| 460 | if (cmd) |
| 461 | break; |
| 462 | |
| 463 | io_schedule(); |
| 464 | } while (1); |
| 465 | |
| 466 | finish_wait(&nq->wait, &wait); |
| 467 | return cmd; |
| 468 | } |
| 469 | |
| 470 | static void end_cmd(struct nullb_cmd *cmd) |
| 471 | { |
Arianna Avanzini | cf8ecc5 | 2015-12-01 11:48:18 +0100 | [diff] [blame] | 472 | struct request_queue *q = NULL; |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 473 | int queue_mode = cmd->nq->dev->queue_mode; |
Arianna Avanzini | cf8ecc5 | 2015-12-01 11:48:18 +0100 | [diff] [blame] | 474 | |
Mike Krinkin | e827120 | 2015-12-15 12:56:40 +0300 | [diff] [blame] | 475 | if (cmd->rq) |
| 476 | q = cmd->rq->q; |
| 477 | |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 478 | switch (queue_mode) { |
| 479 | case NULL_Q_MQ: |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 480 | blk_mq_end_request(cmd->rq, BLK_STS_OK); |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 481 | return; |
| 482 | case NULL_Q_RQ: |
| 483 | INIT_LIST_HEAD(&cmd->rq->queuelist); |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 484 | blk_end_request_all(cmd->rq, BLK_STS_OK); |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 485 | break; |
| 486 | case NULL_Q_BIO: |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 487 | bio_endio(cmd->bio); |
Jens Axboe | 48cc661 | 2015-12-28 13:02:47 -0700 | [diff] [blame] | 488 | break; |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 489 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 490 | |
Jens Axboe | 48cc661 | 2015-12-28 13:02:47 -0700 | [diff] [blame] | 491 | free_cmd(cmd); |
| 492 | |
Arianna Avanzini | cf8ecc5 | 2015-12-01 11:48:18 +0100 | [diff] [blame] | 493 | /* Restart queue if needed, as we are freeing a tag */ |
Jens Axboe | 48cc661 | 2015-12-28 13:02:47 -0700 | [diff] [blame] | 494 | if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) { |
Arianna Avanzini | cf8ecc5 | 2015-12-01 11:48:18 +0100 | [diff] [blame] | 495 | unsigned long flags; |
| 496 | |
| 497 | spin_lock_irqsave(q->queue_lock, flags); |
Jens Axboe | 48cc661 | 2015-12-28 13:02:47 -0700 | [diff] [blame] | 498 | blk_start_queue_async(q); |
Arianna Avanzini | cf8ecc5 | 2015-12-01 11:48:18 +0100 | [diff] [blame] | 499 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 500 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 501 | } |
| 502 | |
| 503 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) |
| 504 | { |
Arianna Avanzini | cf8ecc5 | 2015-12-01 11:48:18 +0100 | [diff] [blame] | 505 | end_cmd(container_of(timer, struct nullb_cmd, timer)); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 506 | |
| 507 | return HRTIMER_NORESTART; |
| 508 | } |
| 509 | |
| 510 | static void null_cmd_end_timer(struct nullb_cmd *cmd) |
| 511 | { |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 512 | ktime_t kt = cmd->nq->dev->completion_nsec; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 513 | |
Paolo Valente | 3c395a9 | 2015-12-01 11:48:17 +0100 | [diff] [blame] | 514 | hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 515 | } |
| 516 | |
| 517 | static void null_softirq_done_fn(struct request *rq) |
| 518 | { |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 519 | struct nullb *nullb = rq->q->queuedata; |
| 520 | |
| 521 | if (nullb->dev->queue_mode == NULL_Q_MQ) |
Jens Axboe | d891fa7 | 2014-06-16 11:40:25 -0600 | [diff] [blame] | 522 | end_cmd(blk_mq_rq_to_pdu(rq)); |
| 523 | else |
| 524 | end_cmd(rq->special); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 525 | } |
| 526 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 527 | static inline void null_handle_cmd(struct nullb_cmd *cmd) |
| 528 | { |
| 529 | /* Complete IO by inline, softirq or timer */ |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 530 | switch (cmd->nq->dev->irqmode) { |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 531 | case NULL_IRQ_SOFTIRQ: |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 532 | switch (cmd->nq->dev->queue_mode) { |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 533 | case NULL_Q_MQ: |
Christoph Hellwig | 08e0029 | 2017-04-20 16:03:09 +0200 | [diff] [blame] | 534 | blk_mq_complete_request(cmd->rq); |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 535 | break; |
| 536 | case NULL_Q_RQ: |
| 537 | blk_complete_request(cmd->rq); |
| 538 | break; |
| 539 | case NULL_Q_BIO: |
| 540 | /* |
| 541 | * XXX: no proper submitting cpu information available. |
| 542 | */ |
| 543 | end_cmd(cmd); |
| 544 | break; |
| 545 | } |
| 546 | break; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 547 | case NULL_IRQ_NONE: |
| 548 | end_cmd(cmd); |
| 549 | break; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 550 | case NULL_IRQ_TIMER: |
| 551 | null_cmd_end_timer(cmd); |
| 552 | break; |
| 553 | } |
| 554 | } |
| 555 | |
| 556 | static struct nullb_queue *nullb_to_queue(struct nullb *nullb) |
| 557 | { |
| 558 | int index = 0; |
| 559 | |
| 560 | if (nullb->nr_queues != 1) |
| 561 | index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); |
| 562 | |
| 563 | return &nullb->queues[index]; |
| 564 | } |
| 565 | |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 566 | static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 567 | { |
| 568 | struct nullb *nullb = q->queuedata; |
| 569 | struct nullb_queue *nq = nullb_to_queue(nullb); |
| 570 | struct nullb_cmd *cmd; |
| 571 | |
| 572 | cmd = alloc_cmd(nq, 1); |
| 573 | cmd->bio = bio; |
| 574 | |
| 575 | null_handle_cmd(cmd); |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 576 | return BLK_QC_T_NONE; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 577 | } |
| 578 | |
| 579 | static int null_rq_prep_fn(struct request_queue *q, struct request *req) |
| 580 | { |
| 581 | struct nullb *nullb = q->queuedata; |
| 582 | struct nullb_queue *nq = nullb_to_queue(nullb); |
| 583 | struct nullb_cmd *cmd; |
| 584 | |
| 585 | cmd = alloc_cmd(nq, 0); |
| 586 | if (cmd) { |
| 587 | cmd->rq = req; |
| 588 | req->special = cmd; |
| 589 | return BLKPREP_OK; |
| 590 | } |
Akinobu Mita | 8b70f45 | 2015-06-02 08:35:10 +0900 | [diff] [blame] | 591 | blk_stop_queue(q); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 592 | |
| 593 | return BLKPREP_DEFER; |
| 594 | } |
| 595 | |
| 596 | static void null_request_fn(struct request_queue *q) |
| 597 | { |
| 598 | struct request *rq; |
| 599 | |
| 600 | while ((rq = blk_fetch_request(q)) != NULL) { |
| 601 | struct nullb_cmd *cmd = rq->special; |
| 602 | |
| 603 | spin_unlock_irq(q->queue_lock); |
| 604 | null_handle_cmd(cmd); |
| 605 | spin_lock_irq(q->queue_lock); |
| 606 | } |
| 607 | } |
| 608 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 609 | static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 610 | const struct blk_mq_queue_data *bd) |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 611 | { |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 612 | struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 613 | struct nullb_queue *nq = hctx->driver_data; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 614 | |
Jens Axboe | db5bcf8 | 2017-03-30 13:44:26 -0600 | [diff] [blame] | 615 | might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); |
| 616 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 617 | if (nq->dev->irqmode == NULL_IRQ_TIMER) { |
Paolo Valente | 3c395a9 | 2015-12-01 11:48:17 +0100 | [diff] [blame] | 618 | hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 619 | cmd->timer.function = null_cmd_timer_expired; |
| 620 | } |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 621 | cmd->rq = bd->rq; |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 622 | cmd->nq = nq; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 623 | |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 624 | blk_mq_start_request(bd->rq); |
Christoph Hellwig | e249007 | 2014-09-13 16:40:09 -0700 | [diff] [blame] | 625 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 626 | null_handle_cmd(cmd); |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 627 | return BLK_STS_OK; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 628 | } |
| 629 | |
Eric Biggers | f363b08 | 2017-03-30 13:39:16 -0700 | [diff] [blame] | 630 | static const struct blk_mq_ops null_mq_ops = { |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 631 | .queue_rq = null_queue_rq, |
Christoph Hellwig | ce2c350 | 2014-02-10 03:24:40 -0800 | [diff] [blame] | 632 | .complete = null_softirq_done_fn, |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 633 | }; |
| 634 | |
Matias Bjørling | de65d2d | 2015-08-31 14:17:18 +0200 | [diff] [blame] | 635 | static void cleanup_queue(struct nullb_queue *nq) |
| 636 | { |
| 637 | kfree(nq->tag_map); |
| 638 | kfree(nq->cmds); |
| 639 | } |
| 640 | |
| 641 | static void cleanup_queues(struct nullb *nullb) |
| 642 | { |
| 643 | int i; |
| 644 | |
| 645 | for (i = 0; i < nullb->nr_queues; i++) |
| 646 | cleanup_queue(&nullb->queues[i]); |
| 647 | |
| 648 | kfree(nullb->queues); |
| 649 | } |
| 650 | |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 651 | #ifdef CONFIG_NVM |
| 652 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 653 | static void null_lnvm_end_io(struct request *rq, blk_status_t status) |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 654 | { |
| 655 | struct nvm_rq *rqd = rq->end_io_data; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 656 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 657 | /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */ |
| 658 | rqd->error = status ? -EIO : 0; |
Matias Bjørling | 06894ef | 2017-01-31 13:17:17 +0100 | [diff] [blame] | 659 | nvm_end_io(rqd); |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 660 | |
| 661 | blk_put_request(rq); |
| 662 | } |
| 663 | |
Matias Bjørling | 16f26c3 | 2015-12-06 11:25:48 +0100 | [diff] [blame] | 664 | static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 665 | { |
Matias Bjørling | 16f26c3 | 2015-12-06 11:25:48 +0100 | [diff] [blame] | 666 | struct request_queue *q = dev->q; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 667 | struct request *rq; |
| 668 | struct bio *bio = rqd->bio; |
| 669 | |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 670 | rq = blk_mq_alloc_request(q, |
| 671 | op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 672 | if (IS_ERR(rq)) |
| 673 | return -ENOMEM; |
| 674 | |
Bart Van Assche | 2644a3c | 2017-04-19 14:01:25 -0700 | [diff] [blame] | 675 | blk_init_request_from_bio(rq, bio); |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 676 | |
| 677 | rq->end_io_data = rqd; |
| 678 | |
| 679 | blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io); |
| 680 | |
| 681 | return 0; |
| 682 | } |
| 683 | |
Matias Bjørling | 16f26c3 | 2015-12-06 11:25:48 +0100 | [diff] [blame] | 684 | static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 685 | { |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 686 | struct nullb *nullb = dev->q->queuedata; |
| 687 | sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL; |
Matias Bjørling | 5b40db9 | 2015-11-19 12:50:09 +0100 | [diff] [blame] | 688 | sector_t blksize; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 689 | struct nvm_id_group *grp; |
| 690 | |
| 691 | id->ver_id = 0x1; |
| 692 | id->vmnt = 0; |
Matias Bjørling | bf64318 | 2016-02-04 15:13:27 +0100 | [diff] [blame] | 693 | id->cap = 0x2; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 694 | id->dom = 0x1; |
Matias Bjørling | 5b40db9 | 2015-11-19 12:50:09 +0100 | [diff] [blame] | 695 | |
| 696 | id->ppaf.blk_offset = 0; |
| 697 | id->ppaf.blk_len = 16; |
| 698 | id->ppaf.pg_offset = 16; |
| 699 | id->ppaf.pg_len = 16; |
| 700 | id->ppaf.sect_offset = 32; |
| 701 | id->ppaf.sect_len = 8; |
| 702 | id->ppaf.pln_offset = 40; |
| 703 | id->ppaf.pln_len = 8; |
| 704 | id->ppaf.lun_offset = 48; |
| 705 | id->ppaf.lun_len = 8; |
| 706 | id->ppaf.ch_offset = 56; |
| 707 | id->ppaf.ch_len = 8; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 708 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 709 | sector_div(size, nullb->dev->blocksize); /* convert size to pages */ |
Arnd Bergmann | e93d12a | 2016-01-13 23:04:08 +0100 | [diff] [blame] | 710 | size >>= 8; /* concert size to pgs pr blk */ |
Matias Bjørling | 19bd6fe | 2017-01-31 13:17:15 +0100 | [diff] [blame] | 711 | grp = &id->grp; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 712 | grp->mtype = 0; |
Matias Bjørling | 5b40db9 | 2015-11-19 12:50:09 +0100 | [diff] [blame] | 713 | grp->fmtype = 0; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 714 | grp->num_ch = 1; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 715 | grp->num_pg = 256; |
Matias Bjørling | 5b40db9 | 2015-11-19 12:50:09 +0100 | [diff] [blame] | 716 | blksize = size; |
Arnd Bergmann | e93d12a | 2016-01-13 23:04:08 +0100 | [diff] [blame] | 717 | size >>= 16; |
Matias Bjørling | 5b40db9 | 2015-11-19 12:50:09 +0100 | [diff] [blame] | 718 | grp->num_lun = size + 1; |
Arnd Bergmann | e93d12a | 2016-01-13 23:04:08 +0100 | [diff] [blame] | 719 | sector_div(blksize, grp->num_lun); |
Matias Bjørling | 5b40db9 | 2015-11-19 12:50:09 +0100 | [diff] [blame] | 720 | grp->num_blk = blksize; |
| 721 | grp->num_pln = 1; |
| 722 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 723 | grp->fpg_sz = nullb->dev->blocksize; |
| 724 | grp->csecs = nullb->dev->blocksize; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 725 | grp->trdt = 25000; |
| 726 | grp->trdm = 25000; |
| 727 | grp->tprt = 500000; |
| 728 | grp->tprm = 500000; |
| 729 | grp->tbet = 1500000; |
| 730 | grp->tbem = 1500000; |
| 731 | grp->mpos = 0x010101; /* single plane rwe */ |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 732 | grp->cpar = nullb->dev->hw_queue_depth; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 733 | |
| 734 | return 0; |
| 735 | } |
| 736 | |
Matias Bjørling | 16f26c3 | 2015-12-06 11:25:48 +0100 | [diff] [blame] | 737 | static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name) |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 738 | { |
| 739 | mempool_t *virtmem_pool; |
| 740 | |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 741 | virtmem_pool = mempool_create_slab_pool(64, ppa_cache); |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 742 | if (!virtmem_pool) { |
| 743 | pr_err("null_blk: Unable to create virtual memory pool\n"); |
| 744 | return NULL; |
| 745 | } |
| 746 | |
| 747 | return virtmem_pool; |
| 748 | } |
| 749 | |
| 750 | static void null_lnvm_destroy_dma_pool(void *pool) |
| 751 | { |
| 752 | mempool_destroy(pool); |
| 753 | } |
| 754 | |
Matias Bjørling | 16f26c3 | 2015-12-06 11:25:48 +0100 | [diff] [blame] | 755 | static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool, |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 756 | gfp_t mem_flags, dma_addr_t *dma_handler) |
| 757 | { |
| 758 | return mempool_alloc(pool, mem_flags); |
| 759 | } |
| 760 | |
| 761 | static void null_lnvm_dev_dma_free(void *pool, void *entry, |
| 762 | dma_addr_t dma_handler) |
| 763 | { |
| 764 | mempool_free(entry, pool); |
| 765 | } |
| 766 | |
| 767 | static struct nvm_dev_ops null_lnvm_dev_ops = { |
| 768 | .identity = null_lnvm_id, |
| 769 | .submit_io = null_lnvm_submit_io, |
| 770 | |
| 771 | .create_dma_pool = null_lnvm_create_dma_pool, |
| 772 | .destroy_dma_pool = null_lnvm_destroy_dma_pool, |
| 773 | .dev_dma_alloc = null_lnvm_dev_dma_alloc, |
| 774 | .dev_dma_free = null_lnvm_dev_dma_free, |
| 775 | |
| 776 | /* Simulate nvme protocol restriction */ |
| 777 | .max_phys_sect = 64, |
| 778 | }; |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 779 | |
| 780 | static int null_nvm_register(struct nullb *nullb) |
| 781 | { |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 782 | struct nvm_dev *dev; |
| 783 | int rv; |
| 784 | |
| 785 | dev = nvm_alloc_dev(0); |
| 786 | if (!dev) |
| 787 | return -ENOMEM; |
| 788 | |
| 789 | dev->q = nullb->q; |
| 790 | memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN); |
| 791 | dev->ops = &null_lnvm_dev_ops; |
| 792 | |
| 793 | rv = nvm_register(dev); |
| 794 | if (rv) { |
| 795 | kfree(dev); |
| 796 | return rv; |
| 797 | } |
| 798 | nullb->ndev = dev; |
| 799 | return 0; |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 800 | } |
| 801 | |
| 802 | static void null_nvm_unregister(struct nullb *nullb) |
| 803 | { |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 804 | nvm_unregister(nullb->ndev); |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 805 | } |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 806 | #else |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 807 | static int null_nvm_register(struct nullb *nullb) |
| 808 | { |
Yasuaki Ishimatsu | 92153d3 | 2016-11-16 08:26:11 -0700 | [diff] [blame] | 809 | pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n"); |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 810 | return -EINVAL; |
| 811 | } |
| 812 | static void null_nvm_unregister(struct nullb *nullb) {} |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 813 | #endif /* CONFIG_NVM */ |
| 814 | |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 815 | static void null_del_dev(struct nullb *nullb) |
| 816 | { |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 817 | struct nullb_device *dev = nullb->dev; |
| 818 | |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 819 | list_del_init(&nullb->list); |
| 820 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 821 | if (dev->use_lightnvm) |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 822 | null_nvm_unregister(nullb); |
| 823 | else |
| 824 | del_gendisk(nullb->disk); |
| 825 | blk_cleanup_queue(nullb->q); |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 826 | if (dev->queue_mode == NULL_Q_MQ && |
| 827 | nullb->tag_set == &nullb->__tag_set) |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 828 | blk_mq_free_tag_set(nullb->tag_set); |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 829 | if (!dev->use_lightnvm) |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 830 | put_disk(nullb->disk); |
| 831 | cleanup_queues(nullb); |
| 832 | kfree(nullb); |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 833 | dev->nullb = NULL; |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 834 | } |
| 835 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 836 | static int null_open(struct block_device *bdev, fmode_t mode) |
| 837 | { |
| 838 | return 0; |
| 839 | } |
| 840 | |
| 841 | static void null_release(struct gendisk *disk, fmode_t mode) |
| 842 | { |
| 843 | } |
| 844 | |
| 845 | static const struct block_device_operations null_fops = { |
| 846 | .owner = THIS_MODULE, |
| 847 | .open = null_open, |
| 848 | .release = null_release, |
| 849 | }; |
| 850 | |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 851 | static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) |
| 852 | { |
| 853 | BUG_ON(!nullb); |
| 854 | BUG_ON(!nq); |
| 855 | |
| 856 | init_waitqueue_head(&nq->wait); |
| 857 | nq->queue_depth = nullb->queue_depth; |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 858 | nq->dev = nullb->dev; |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 859 | } |
| 860 | |
| 861 | static void null_init_queues(struct nullb *nullb) |
| 862 | { |
| 863 | struct request_queue *q = nullb->q; |
| 864 | struct blk_mq_hw_ctx *hctx; |
| 865 | struct nullb_queue *nq; |
| 866 | int i; |
| 867 | |
| 868 | queue_for_each_hw_ctx(q, hctx, i) { |
| 869 | if (!hctx->nr_ctx || !hctx->tags) |
| 870 | continue; |
| 871 | nq = &nullb->queues[i]; |
| 872 | hctx->driver_data = nq; |
| 873 | null_init_queue(nullb, nq); |
| 874 | nullb->nr_queues++; |
| 875 | } |
| 876 | } |
| 877 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 878 | static int setup_commands(struct nullb_queue *nq) |
| 879 | { |
| 880 | struct nullb_cmd *cmd; |
| 881 | int i, tag_size; |
| 882 | |
| 883 | nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); |
| 884 | if (!nq->cmds) |
Matias Bjorling | 2d263a78 | 2013-12-18 13:41:43 +0100 | [diff] [blame] | 885 | return -ENOMEM; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 886 | |
| 887 | tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; |
| 888 | nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); |
| 889 | if (!nq->tag_map) { |
| 890 | kfree(nq->cmds); |
Matias Bjorling | 2d263a78 | 2013-12-18 13:41:43 +0100 | [diff] [blame] | 891 | return -ENOMEM; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 892 | } |
| 893 | |
| 894 | for (i = 0; i < nq->queue_depth; i++) { |
| 895 | cmd = &nq->cmds[i]; |
| 896 | INIT_LIST_HEAD(&cmd->list); |
| 897 | cmd->ll_list.next = NULL; |
| 898 | cmd->tag = -1U; |
| 899 | } |
| 900 | |
| 901 | return 0; |
| 902 | } |
| 903 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 904 | static int setup_queues(struct nullb *nullb) |
| 905 | { |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 906 | nullb->queues = kzalloc(nullb->dev->submit_queues * |
| 907 | sizeof(struct nullb_queue), GFP_KERNEL); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 908 | if (!nullb->queues) |
Matias Bjorling | 2d263a78 | 2013-12-18 13:41:43 +0100 | [diff] [blame] | 909 | return -ENOMEM; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 910 | |
| 911 | nullb->nr_queues = 0; |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 912 | nullb->queue_depth = nullb->dev->hw_queue_depth; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 913 | |
Matias Bjorling | 2d263a78 | 2013-12-18 13:41:43 +0100 | [diff] [blame] | 914 | return 0; |
| 915 | } |
| 916 | |
| 917 | static int init_driver_queues(struct nullb *nullb) |
| 918 | { |
| 919 | struct nullb_queue *nq; |
| 920 | int i, ret = 0; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 921 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 922 | for (i = 0; i < nullb->dev->submit_queues; i++) { |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 923 | nq = &nullb->queues[i]; |
Matias Bjorling | 2d263a78 | 2013-12-18 13:41:43 +0100 | [diff] [blame] | 924 | |
| 925 | null_init_queue(nullb, nq); |
| 926 | |
| 927 | ret = setup_commands(nq); |
| 928 | if (ret) |
Jan Kara | 31f9690 | 2014-10-22 15:34:21 +0200 | [diff] [blame] | 929 | return ret; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 930 | nullb->nr_queues++; |
| 931 | } |
Matias Bjorling | 2d263a78 | 2013-12-18 13:41:43 +0100 | [diff] [blame] | 932 | return 0; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 933 | } |
| 934 | |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 935 | static int null_gendisk_register(struct nullb *nullb) |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 936 | { |
| 937 | struct gendisk *disk; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 938 | sector_t size; |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 939 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 940 | disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node); |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 941 | if (!disk) |
| 942 | return -ENOMEM; |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 943 | size = (sector_t)nullb->dev->size * 1024 * 1024ULL; |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 944 | set_capacity(disk, size >> 9); |
| 945 | |
| 946 | disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; |
| 947 | disk->major = null_major; |
| 948 | disk->first_minor = nullb->index; |
| 949 | disk->fops = &null_fops; |
| 950 | disk->private_data = nullb; |
| 951 | disk->queue = nullb->q; |
| 952 | strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); |
| 953 | |
| 954 | add_disk(disk); |
| 955 | return 0; |
| 956 | } |
| 957 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 958 | static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set) |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 959 | { |
| 960 | set->ops = &null_mq_ops; |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 961 | set->nr_hw_queues = nullb ? nullb->dev->submit_queues : |
| 962 | g_submit_queues; |
| 963 | set->queue_depth = nullb ? nullb->dev->hw_queue_depth : |
| 964 | g_hw_queue_depth; |
| 965 | set->numa_node = nullb ? nullb->dev->home_node : g_home_node; |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 966 | set->cmd_size = sizeof(struct nullb_cmd); |
| 967 | set->flags = BLK_MQ_F_SHOULD_MERGE; |
| 968 | set->driver_data = NULL; |
| 969 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 970 | if (nullb->dev->blocking) |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 971 | set->flags |= BLK_MQ_F_BLOCKING; |
| 972 | |
| 973 | return blk_mq_alloc_tag_set(set); |
| 974 | } |
| 975 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 976 | static int null_add_dev(struct nullb_device *dev) |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 977 | { |
| 978 | struct nullb *nullb; |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 979 | int rv; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 980 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 981 | nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node); |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 982 | if (!nullb) { |
| 983 | rv = -ENOMEM; |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 984 | goto out; |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 985 | } |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 986 | nullb->dev = dev; |
| 987 | dev->nullb = nullb; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 988 | |
| 989 | spin_lock_init(&nullb->lock); |
| 990 | |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 991 | rv = setup_queues(nullb); |
| 992 | if (rv) |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 993 | goto out_free_nullb; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 994 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 995 | if (dev->queue_mode == NULL_Q_MQ) { |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 996 | if (shared_tags) { |
| 997 | nullb->tag_set = &tag_set; |
| 998 | rv = 0; |
| 999 | } else { |
| 1000 | nullb->tag_set = &nullb->__tag_set; |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1001 | rv = null_init_tag_set(nullb, nullb->tag_set); |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 1002 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1003 | |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 1004 | if (rv) |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 1005 | goto out_cleanup_queues; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1006 | |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 1007 | nullb->q = blk_mq_init_queue(nullb->tag_set); |
Ming Lei | 35b489d | 2015-01-02 14:25:27 +0000 | [diff] [blame] | 1008 | if (IS_ERR(nullb->q)) { |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 1009 | rv = -ENOMEM; |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 1010 | goto out_cleanup_tags; |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 1011 | } |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 1012 | null_init_queues(nullb); |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1013 | } else if (dev->queue_mode == NULL_Q_BIO) { |
| 1014 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node); |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 1015 | if (!nullb->q) { |
| 1016 | rv = -ENOMEM; |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 1017 | goto out_cleanup_queues; |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 1018 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1019 | blk_queue_make_request(nullb->q, null_queue_bio); |
Jan Kara | 31f9690 | 2014-10-22 15:34:21 +0200 | [diff] [blame] | 1020 | rv = init_driver_queues(nullb); |
| 1021 | if (rv) |
| 1022 | goto out_cleanup_blk_queue; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1023 | } else { |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1024 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, |
| 1025 | dev->home_node); |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 1026 | if (!nullb->q) { |
| 1027 | rv = -ENOMEM; |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 1028 | goto out_cleanup_queues; |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 1029 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1030 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 1031 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); |
Jan Kara | 31f9690 | 2014-10-22 15:34:21 +0200 | [diff] [blame] | 1032 | rv = init_driver_queues(nullb); |
| 1033 | if (rv) |
| 1034 | goto out_cleanup_blk_queue; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1035 | } |
| 1036 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1037 | nullb->q->queuedata = nullb; |
| 1038 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); |
Mike Snitzer | b277da0 | 2014-10-04 10:55:32 -0600 | [diff] [blame] | 1039 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1040 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1041 | mutex_lock(&lock); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1042 | nullb->index = nullb_indexes++; |
| 1043 | mutex_unlock(&lock); |
| 1044 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1045 | blk_queue_logical_block_size(nullb->q, dev->blocksize); |
| 1046 | blk_queue_physical_block_size(nullb->q, dev->blocksize); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1047 | |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 1048 | sprintf(nullb->disk_name, "nullb%d", nullb->index); |
| 1049 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1050 | if (dev->use_lightnvm) |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 1051 | rv = null_nvm_register(nullb); |
| 1052 | else |
| 1053 | rv = null_gendisk_register(nullb); |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 1054 | |
Matias Bjørling | 9ae2d0a | 2016-09-16 14:25:05 +0200 | [diff] [blame] | 1055 | if (rv) |
| 1056 | goto out_cleanup_blk_queue; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1057 | |
Matias Bjørling | a514379 | 2016-02-11 14:49:13 +0100 | [diff] [blame] | 1058 | mutex_lock(&lock); |
| 1059 | list_add_tail(&nullb->list, &nullb_list); |
| 1060 | mutex_unlock(&lock); |
Wenwei Tao | 3681c85d | 2016-03-05 00:27:04 +0800 | [diff] [blame] | 1061 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1062 | return 0; |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 1063 | out_cleanup_blk_queue: |
| 1064 | blk_cleanup_queue(nullb->q); |
| 1065 | out_cleanup_tags: |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1066 | if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 1067 | blk_mq_free_tag_set(nullb->tag_set); |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 1068 | out_cleanup_queues: |
| 1069 | cleanup_queues(nullb); |
| 1070 | out_free_nullb: |
| 1071 | kfree(nullb); |
| 1072 | out: |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1073 | null_free_dev(dev); |
Robert Elliott | dc501dc | 2014-09-02 11:38:49 -0500 | [diff] [blame] | 1074 | return rv; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1075 | } |
| 1076 | |
| 1077 | static int __init null_init(void) |
| 1078 | { |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 1079 | int ret = 0; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1080 | unsigned int i; |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 1081 | struct nullb *nullb; |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1082 | struct nullb_device *dev; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1083 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1084 | if (g_bs > PAGE_SIZE) { |
Raghavendra K T | 9967d8a | 2014-01-21 16:59:59 +0530 | [diff] [blame] | 1085 | pr_warn("null_blk: invalid block size\n"); |
| 1086 | pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1087 | g_bs = PAGE_SIZE; |
Raghavendra K T | 9967d8a | 2014-01-21 16:59:59 +0530 | [diff] [blame] | 1088 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1089 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1090 | if (g_use_lightnvm && g_bs != 4096) { |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 1091 | pr_warn("null_blk: LightNVM only supports 4k block size\n"); |
| 1092 | pr_warn("null_blk: defaults block size to 4k\n"); |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1093 | g_bs = 4096; |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 1094 | } |
| 1095 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1096 | if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) { |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 1097 | pr_warn("null_blk: LightNVM only supported for blk-mq\n"); |
| 1098 | pr_warn("null_blk: defaults queue mode to blk-mq\n"); |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1099 | g_queue_mode = NULL_Q_MQ; |
Matias Bjørling | b2b7e00 | 2015-11-12 20:25:10 +0100 | [diff] [blame] | 1100 | } |
| 1101 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1102 | if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) { |
| 1103 | if (g_submit_queues != nr_online_nodes) { |
weiping zhang | 558ab300 | 2017-08-03 00:26:39 +0800 | [diff] [blame] | 1104 | pr_warn("null_blk: submit_queues param is set to %u.\n", |
Matias Bjorling | d15ee6b | 2013-12-18 13:41:44 +0100 | [diff] [blame] | 1105 | nr_online_nodes); |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1106 | g_submit_queues = nr_online_nodes; |
Matias Bjørling | fc1bc35 | 2013-12-21 00:11:01 +0100 | [diff] [blame] | 1107 | } |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1108 | } else if (g_submit_queues > nr_cpu_ids) |
| 1109 | g_submit_queues = nr_cpu_ids; |
| 1110 | else if (g_submit_queues <= 0) |
| 1111 | g_submit_queues = 1; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1112 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1113 | if (g_queue_mode == NULL_Q_MQ && shared_tags) { |
| 1114 | ret = null_init_tag_set(NULL, &tag_set); |
Max Gurtovoy | db2d153 | 2017-07-06 18:00:07 +0300 | [diff] [blame] | 1115 | if (ret) |
| 1116 | return ret; |
| 1117 | } |
| 1118 | |
Shaohua Li | 3bf2bd2 | 2017-08-14 15:04:53 -0700 | [diff] [blame^] | 1119 | config_group_init(&nullb_subsys.su_group); |
| 1120 | mutex_init(&nullb_subsys.su_mutex); |
| 1121 | |
| 1122 | ret = configfs_register_subsystem(&nullb_subsys); |
| 1123 | if (ret) |
| 1124 | goto err_tagset; |
| 1125 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1126 | mutex_init(&lock); |
| 1127 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1128 | null_major = register_blkdev(0, "nullb"); |
Max Gurtovoy | db2d153 | 2017-07-06 18:00:07 +0300 | [diff] [blame] | 1129 | if (null_major < 0) { |
| 1130 | ret = null_major; |
Shaohua Li | 3bf2bd2 | 2017-08-14 15:04:53 -0700 | [diff] [blame^] | 1131 | goto err_conf; |
Max Gurtovoy | db2d153 | 2017-07-06 18:00:07 +0300 | [diff] [blame] | 1132 | } |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1133 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1134 | if (g_use_lightnvm) { |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 1135 | ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), |
| 1136 | 0, 0, NULL); |
| 1137 | if (!ppa_cache) { |
| 1138 | pr_err("null_blk: unable to create ppa cache\n"); |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 1139 | ret = -ENOMEM; |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 1140 | goto err_ppa; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1141 | } |
| 1142 | } |
| 1143 | |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 1144 | for (i = 0; i < nr_devices; i++) { |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1145 | dev = null_alloc_dev(); |
| 1146 | if (!dev) |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 1147 | goto err_dev; |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1148 | ret = null_add_dev(dev); |
| 1149 | if (ret) { |
| 1150 | null_free_dev(dev); |
| 1151 | goto err_dev; |
| 1152 | } |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 1153 | } |
| 1154 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1155 | pr_info("null: module loaded\n"); |
| 1156 | return 0; |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 1157 | |
| 1158 | err_dev: |
| 1159 | while (!list_empty(&nullb_list)) { |
| 1160 | nullb = list_entry(nullb_list.next, struct nullb, list); |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1161 | dev = nullb->dev; |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 1162 | null_del_dev(nullb); |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1163 | null_free_dev(dev); |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 1164 | } |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 1165 | kmem_cache_destroy(ppa_cache); |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 1166 | err_ppa: |
| 1167 | unregister_blkdev(null_major, "nullb"); |
Shaohua Li | 3bf2bd2 | 2017-08-14 15:04:53 -0700 | [diff] [blame^] | 1168 | err_conf: |
| 1169 | configfs_unregister_subsystem(&nullb_subsys); |
Max Gurtovoy | db2d153 | 2017-07-06 18:00:07 +0300 | [diff] [blame] | 1170 | err_tagset: |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1171 | if (g_queue_mode == NULL_Q_MQ && shared_tags) |
Max Gurtovoy | db2d153 | 2017-07-06 18:00:07 +0300 | [diff] [blame] | 1172 | blk_mq_free_tag_set(&tag_set); |
Minfei Huang | af096e2 | 2015-12-08 13:47:34 -0700 | [diff] [blame] | 1173 | return ret; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1174 | } |
| 1175 | |
| 1176 | static void __exit null_exit(void) |
| 1177 | { |
| 1178 | struct nullb *nullb; |
| 1179 | |
Shaohua Li | 3bf2bd2 | 2017-08-14 15:04:53 -0700 | [diff] [blame^] | 1180 | configfs_unregister_subsystem(&nullb_subsys); |
| 1181 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1182 | unregister_blkdev(null_major, "nullb"); |
| 1183 | |
| 1184 | mutex_lock(&lock); |
| 1185 | while (!list_empty(&nullb_list)) { |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1186 | struct nullb_device *dev; |
| 1187 | |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1188 | nullb = list_entry(nullb_list.next, struct nullb, list); |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1189 | dev = nullb->dev; |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1190 | null_del_dev(nullb); |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1191 | null_free_dev(dev); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1192 | } |
| 1193 | mutex_unlock(&lock); |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 1194 | |
Shaohua Li | 2984c86 | 2017-08-14 15:04:52 -0700 | [diff] [blame] | 1195 | if (g_queue_mode == NULL_Q_MQ && shared_tags) |
Jens Axboe | 82f402f | 2017-06-20 14:22:01 -0600 | [diff] [blame] | 1196 | blk_mq_free_tag_set(&tag_set); |
| 1197 | |
Matias Bjørling | 6bb9535 | 2015-11-19 12:50:08 +0100 | [diff] [blame] | 1198 | kmem_cache_destroy(ppa_cache); |
Jens Axboe | f2298c0 | 2013-10-25 11:52:25 +0100 | [diff] [blame] | 1199 | } |
| 1200 | |
| 1201 | module_init(null_init); |
| 1202 | module_exit(null_exit); |
| 1203 | |
| 1204 | MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>"); |
| 1205 | MODULE_LICENSE("GPL"); |