blob: cda69dbefe3ba52b78b61538773191cae4a1e8b5 [file] [log] [blame]
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001/*
2 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
3 * Shaohua Li <shli@fb.com>
4 */
Jens Axboef2298c02013-10-25 11:52:25 +01005#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01006
Jens Axboef2298c02013-10-25 11:52:25 +01007#include <linux/moduleparam.h>
8#include <linux/sched.h>
9#include <linux/fs.h>
10#include <linux/blkdev.h>
11#include <linux/init.h>
12#include <linux/slab.h>
13#include <linux/blk-mq.h>
14#include <linux/hrtimer.h>
Matias Bjørlingb2b7e002015-11-12 20:25:10 +010015#include <linux/lightnvm.h>
Shaohua Li3bf2bd22017-08-14 15:04:53 -070016#include <linux/configfs.h>
Shaohua Li2f54a612017-08-14 15:05:00 -070017#include <linux/badblocks.h>
Jens Axboef2298c02013-10-25 11:52:25 +010018
Shaohua Li5bcd0e02017-08-14 15:04:56 -070019#define SECTOR_SHIFT 9
20#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
21#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
22#define SECTOR_SIZE (1 << SECTOR_SHIFT)
23#define SECTOR_MASK (PAGE_SECTORS - 1)
24
25#define FREE_BATCH 16
26
Shaohua Lieff2c4f2017-08-14 15:04:58 -070027#define TICKS_PER_SEC 50ULL
28#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
29
30static inline u64 mb_per_tick(int mbps)
31{
32 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
33}
Jens Axboef2298c02013-10-25 11:52:25 +010034
35struct nullb_cmd {
36 struct list_head list;
37 struct llist_node ll_list;
Ying Huang966a9672017-08-08 12:30:00 +080038 call_single_data_t csd;
Jens Axboef2298c02013-10-25 11:52:25 +010039 struct request *rq;
40 struct bio *bio;
41 unsigned int tag;
42 struct nullb_queue *nq;
Paolo Valente3c395a92015-12-01 11:48:17 +010043 struct hrtimer timer;
Shaohua Li5bcd0e02017-08-14 15:04:56 -070044 blk_status_t error;
Jens Axboef2298c02013-10-25 11:52:25 +010045};
46
47struct nullb_queue {
48 unsigned long *tag_map;
49 wait_queue_head_t wait;
50 unsigned int queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -070051 struct nullb_device *dev;
Jens Axboef2298c02013-10-25 11:52:25 +010052
53 struct nullb_cmd *cmds;
54};
55
Shaohua Li3bf2bd22017-08-14 15:04:53 -070056/*
57 * Status flags for nullb_device.
58 *
59 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
60 * UP: Device is currently on and visible in userspace.
Shaohua Lieff2c4f2017-08-14 15:04:58 -070061 * THROTTLED: Device is being throttled.
Shaohua Lideb78b42017-08-14 15:04:59 -070062 * CACHE: Device is using a write-back cache.
Shaohua Li3bf2bd22017-08-14 15:04:53 -070063 */
64enum nullb_device_flags {
65 NULLB_DEV_FL_CONFIGURED = 0,
66 NULLB_DEV_FL_UP = 1,
Shaohua Lieff2c4f2017-08-14 15:04:58 -070067 NULLB_DEV_FL_THROTTLED = 2,
Shaohua Lideb78b42017-08-14 15:04:59 -070068 NULLB_DEV_FL_CACHE = 3,
Shaohua Li3bf2bd22017-08-14 15:04:53 -070069};
70
Shaohua Li5bcd0e02017-08-14 15:04:56 -070071/*
72 * nullb_page is a page in memory for nullb devices.
73 *
74 * @page: The page holding the data.
75 * @bitmap: The bitmap represents which sector in the page has data.
76 * Each bit represents one block size. For example, sector 8
77 * will use the 7th bit
Shaohua Lideb78b42017-08-14 15:04:59 -070078 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
79 * page is being flushing to storage. FREE means the cache page is freed and
80 * should be skipped from flushing to storage. Please see
81 * null_make_cache_space
Shaohua Li5bcd0e02017-08-14 15:04:56 -070082 */
83struct nullb_page {
84 struct page *page;
85 unsigned long bitmap;
86};
Shaohua Lideb78b42017-08-14 15:04:59 -070087#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1)
88#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070089
Shaohua Li2984c862017-08-14 15:04:52 -070090struct nullb_device {
91 struct nullb *nullb;
Shaohua Li3bf2bd22017-08-14 15:04:53 -070092 struct config_item item;
Shaohua Li5bcd0e02017-08-14 15:04:56 -070093 struct radix_tree_root data; /* data stored in the disk */
Shaohua Lideb78b42017-08-14 15:04:59 -070094 struct radix_tree_root cache; /* disk cache data */
Shaohua Li3bf2bd22017-08-14 15:04:53 -070095 unsigned long flags; /* device flags */
Shaohua Lideb78b42017-08-14 15:04:59 -070096 unsigned int curr_cache;
Shaohua Li2f54a612017-08-14 15:05:00 -070097 struct badblocks badblocks;
Shaohua Li2984c862017-08-14 15:04:52 -070098
99 unsigned long size; /* device size in MB */
100 unsigned long completion_nsec; /* time in ns to complete a request */
Shaohua Lideb78b42017-08-14 15:04:59 -0700101 unsigned long cache_size; /* disk cache size in MB */
Shaohua Li2984c862017-08-14 15:04:52 -0700102 unsigned int submit_queues; /* number of submission queues */
103 unsigned int home_node; /* home node for the device */
104 unsigned int queue_mode; /* block interface */
105 unsigned int blocksize; /* block size */
106 unsigned int irqmode; /* IRQ completion handler */
107 unsigned int hw_queue_depth; /* queue depth */
Shaohua Licedcafa2017-08-14 15:04:54 -0700108 unsigned int index; /* index of the disk, only valid with a disk */
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700109 unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
Shaohua Li2984c862017-08-14 15:04:52 -0700110 bool use_lightnvm; /* register as a LightNVM device */
111 bool blocking; /* blocking blk-mq device */
112 bool use_per_node_hctx; /* use per-node allocation for hardware context */
Shaohua Licedcafa2017-08-14 15:04:54 -0700113 bool power; /* power on/off the device */
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700114 bool memory_backed; /* if data is stored in memory */
Shaohua Li306eb6b2017-08-14 15:04:57 -0700115 bool discard; /* if support discard */
Shaohua Li2984c862017-08-14 15:04:52 -0700116};
117
Jens Axboef2298c02013-10-25 11:52:25 +0100118struct nullb {
Shaohua Li2984c862017-08-14 15:04:52 -0700119 struct nullb_device *dev;
Jens Axboef2298c02013-10-25 11:52:25 +0100120 struct list_head list;
121 unsigned int index;
122 struct request_queue *q;
123 struct gendisk *disk;
Matias Bjørlingb0b4e092016-09-16 14:25:07 +0200124 struct nvm_dev *ndev;
Jens Axboe82f402f2017-06-20 14:22:01 -0600125 struct blk_mq_tag_set *tag_set;
126 struct blk_mq_tag_set __tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +0100127 unsigned int queue_depth;
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700128 atomic_long_t cur_bytes;
129 struct hrtimer bw_timer;
Shaohua Lideb78b42017-08-14 15:04:59 -0700130 unsigned long cache_flush_pos;
Jens Axboef2298c02013-10-25 11:52:25 +0100131 spinlock_t lock;
132
133 struct nullb_queue *queues;
134 unsigned int nr_queues;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100135 char disk_name[DISK_NAME_LEN];
Jens Axboef2298c02013-10-25 11:52:25 +0100136};
137
138static LIST_HEAD(nullb_list);
139static struct mutex lock;
140static int null_major;
Shaohua Li94bc02e2017-08-14 15:04:55 -0700141static DEFINE_IDA(nullb_indexes);
Matias Bjørling6bb95352015-11-19 12:50:08 +0100142static struct kmem_cache *ppa_cache;
Jens Axboe82f402f2017-06-20 14:22:01 -0600143static struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +0100144
Jens Axboef2298c02013-10-25 11:52:25 +0100145enum {
146 NULL_IRQ_NONE = 0,
147 NULL_IRQ_SOFTIRQ = 1,
148 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800149};
Jens Axboef2298c02013-10-25 11:52:25 +0100150
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800151enum {
Jens Axboef2298c02013-10-25 11:52:25 +0100152 NULL_Q_BIO = 0,
153 NULL_Q_RQ = 1,
154 NULL_Q_MQ = 2,
155};
156
Shaohua Li2984c862017-08-14 15:04:52 -0700157static int g_submit_queues = 1;
158module_param_named(submit_queues, g_submit_queues, int, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100159MODULE_PARM_DESC(submit_queues, "Number of submission queues");
160
Shaohua Li2984c862017-08-14 15:04:52 -0700161static int g_home_node = NUMA_NO_NODE;
162module_param_named(home_node, g_home_node, int, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100163MODULE_PARM_DESC(home_node, "Home node for the device");
164
Shaohua Li2984c862017-08-14 15:04:52 -0700165static int g_queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700166
167static int null_param_store_val(const char *str, int *val, int min, int max)
168{
169 int ret, new_val;
170
171 ret = kstrtoint(str, 10, &new_val);
172 if (ret)
173 return -EINVAL;
174
175 if (new_val < min || new_val > max)
176 return -EINVAL;
177
178 *val = new_val;
179 return 0;
180}
181
182static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
183{
Shaohua Li2984c862017-08-14 15:04:52 -0700184 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
Matias Bjorling709c8662014-11-26 14:45:48 -0700185}
186
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930187static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700188 .set = null_set_queue_mode,
189 .get = param_get_int,
190};
191
Shaohua Li2984c862017-08-14 15:04:52 -0700192device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, S_IRUGO);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400193MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100194
Shaohua Li2984c862017-08-14 15:04:52 -0700195static int g_gb = 250;
196module_param_named(gb, g_gb, int, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100197MODULE_PARM_DESC(gb, "Size in GB");
198
Shaohua Li2984c862017-08-14 15:04:52 -0700199static int g_bs = 512;
200module_param_named(bs, g_bs, int, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100201MODULE_PARM_DESC(bs, "Block size (in bytes)");
202
Jens Axboe82f402f2017-06-20 14:22:01 -0600203static int nr_devices = 1;
Jens Axboef2298c02013-10-25 11:52:25 +0100204module_param(nr_devices, int, S_IRUGO);
205MODULE_PARM_DESC(nr_devices, "Number of devices to register");
206
Shaohua Li2984c862017-08-14 15:04:52 -0700207static bool g_use_lightnvm;
208module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100209MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
210
Shaohua Li2984c862017-08-14 15:04:52 -0700211static bool g_blocking;
212module_param_named(blocking, g_blocking, bool, S_IRUGO);
Jens Axboedb5bcf82017-03-30 13:44:26 -0600213MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
214
Jens Axboe82f402f2017-06-20 14:22:01 -0600215static bool shared_tags;
216module_param(shared_tags, bool, S_IRUGO);
217MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
218
Shaohua Li2984c862017-08-14 15:04:52 -0700219static int g_irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700220
221static int null_set_irqmode(const char *str, const struct kernel_param *kp)
222{
Shaohua Li2984c862017-08-14 15:04:52 -0700223 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
Matias Bjorling709c8662014-11-26 14:45:48 -0700224 NULL_IRQ_TIMER);
225}
226
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930227static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700228 .set = null_set_irqmode,
229 .get = param_get_int,
230};
231
Shaohua Li2984c862017-08-14 15:04:52 -0700232device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100233MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
234
Shaohua Li2984c862017-08-14 15:04:52 -0700235static unsigned long g_completion_nsec = 10000;
236module_param_named(completion_nsec, g_completion_nsec, ulong, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100237MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
238
Shaohua Li2984c862017-08-14 15:04:52 -0700239static int g_hw_queue_depth = 64;
240module_param_named(hw_queue_depth, g_hw_queue_depth, int, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100241MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
242
Shaohua Li2984c862017-08-14 15:04:52 -0700243static bool g_use_per_node_hctx;
244module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, S_IRUGO);
Matias Bjørling20005242013-12-21 00:11:00 +0100245MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100246
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700247static struct nullb_device *null_alloc_dev(void);
248static void null_free_dev(struct nullb_device *dev);
Shaohua Licedcafa2017-08-14 15:04:54 -0700249static void null_del_dev(struct nullb *nullb);
250static int null_add_dev(struct nullb_device *dev);
Shaohua Lideb78b42017-08-14 15:04:59 -0700251static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700252
253static inline struct nullb_device *to_nullb_device(struct config_item *item)
254{
255 return item ? container_of(item, struct nullb_device, item) : NULL;
256}
257
258static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
259{
260 return snprintf(page, PAGE_SIZE, "%u\n", val);
261}
262
263static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
264 char *page)
265{
266 return snprintf(page, PAGE_SIZE, "%lu\n", val);
267}
268
269static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
270{
271 return snprintf(page, PAGE_SIZE, "%u\n", val);
272}
273
274static ssize_t nullb_device_uint_attr_store(unsigned int *val,
275 const char *page, size_t count)
276{
277 unsigned int tmp;
278 int result;
279
280 result = kstrtouint(page, 0, &tmp);
281 if (result)
282 return result;
283
284 *val = tmp;
285 return count;
286}
287
288static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
289 const char *page, size_t count)
290{
291 int result;
292 unsigned long tmp;
293
294 result = kstrtoul(page, 0, &tmp);
295 if (result)
296 return result;
297
298 *val = tmp;
299 return count;
300}
301
302static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
303 size_t count)
304{
305 bool tmp;
306 int result;
307
308 result = kstrtobool(page, &tmp);
309 if (result)
310 return result;
311
312 *val = tmp;
313 return count;
314}
315
316/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
317#define NULLB_DEVICE_ATTR(NAME, TYPE) \
318static ssize_t \
319nullb_device_##NAME##_show(struct config_item *item, char *page) \
320{ \
321 return nullb_device_##TYPE##_attr_show( \
322 to_nullb_device(item)->NAME, page); \
323} \
324static ssize_t \
325nullb_device_##NAME##_store(struct config_item *item, const char *page, \
326 size_t count) \
327{ \
328 if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
329 return -EBUSY; \
330 return nullb_device_##TYPE##_attr_store( \
331 &to_nullb_device(item)->NAME, page, count); \
332} \
333CONFIGFS_ATTR(nullb_device_, NAME);
334
335NULLB_DEVICE_ATTR(size, ulong);
336NULLB_DEVICE_ATTR(completion_nsec, ulong);
337NULLB_DEVICE_ATTR(submit_queues, uint);
338NULLB_DEVICE_ATTR(home_node, uint);
339NULLB_DEVICE_ATTR(queue_mode, uint);
340NULLB_DEVICE_ATTR(blocksize, uint);
341NULLB_DEVICE_ATTR(irqmode, uint);
342NULLB_DEVICE_ATTR(hw_queue_depth, uint);
Shaohua Licedcafa2017-08-14 15:04:54 -0700343NULLB_DEVICE_ATTR(index, uint);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700344NULLB_DEVICE_ATTR(use_lightnvm, bool);
345NULLB_DEVICE_ATTR(blocking, bool);
346NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700347NULLB_DEVICE_ATTR(memory_backed, bool);
Shaohua Li306eb6b2017-08-14 15:04:57 -0700348NULLB_DEVICE_ATTR(discard, bool);
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700349NULLB_DEVICE_ATTR(mbps, uint);
Shaohua Lideb78b42017-08-14 15:04:59 -0700350NULLB_DEVICE_ATTR(cache_size, ulong);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700351
Shaohua Licedcafa2017-08-14 15:04:54 -0700352static ssize_t nullb_device_power_show(struct config_item *item, char *page)
353{
354 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
355}
356
357static ssize_t nullb_device_power_store(struct config_item *item,
358 const char *page, size_t count)
359{
360 struct nullb_device *dev = to_nullb_device(item);
361 bool newp = false;
362 ssize_t ret;
363
364 ret = nullb_device_bool_attr_store(&newp, page, count);
365 if (ret < 0)
366 return ret;
367
368 if (!dev->power && newp) {
369 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
370 return count;
371 if (null_add_dev(dev)) {
372 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
373 return -ENOMEM;
374 }
375
376 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
377 dev->power = newp;
Jens Axboeb3c30512017-08-28 15:06:31 -0600378 } else if (dev->power && !newp) {
Shaohua Licedcafa2017-08-14 15:04:54 -0700379 mutex_lock(&lock);
380 dev->power = newp;
381 null_del_dev(dev->nullb);
382 mutex_unlock(&lock);
383 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
384 }
385
386 return count;
387}
388
389CONFIGFS_ATTR(nullb_device_, power);
390
Shaohua Li2f54a612017-08-14 15:05:00 -0700391static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
392{
393 struct nullb_device *t_dev = to_nullb_device(item);
394
395 return badblocks_show(&t_dev->badblocks, page, 0);
396}
397
398static ssize_t nullb_device_badblocks_store(struct config_item *item,
399 const char *page, size_t count)
400{
401 struct nullb_device *t_dev = to_nullb_device(item);
402 char *orig, *buf, *tmp;
403 u64 start, end;
404 int ret;
405
406 orig = kstrndup(page, count, GFP_KERNEL);
407 if (!orig)
408 return -ENOMEM;
409
410 buf = strstrip(orig);
411
412 ret = -EINVAL;
413 if (buf[0] != '+' && buf[0] != '-')
414 goto out;
415 tmp = strchr(&buf[1], '-');
416 if (!tmp)
417 goto out;
418 *tmp = '\0';
419 ret = kstrtoull(buf + 1, 0, &start);
420 if (ret)
421 goto out;
422 ret = kstrtoull(tmp + 1, 0, &end);
423 if (ret)
424 goto out;
425 ret = -EINVAL;
426 if (start > end)
427 goto out;
428 /* enable badblocks */
429 cmpxchg(&t_dev->badblocks.shift, -1, 0);
430 if (buf[0] == '+')
431 ret = badblocks_set(&t_dev->badblocks, start,
432 end - start + 1, 1);
433 else
434 ret = badblocks_clear(&t_dev->badblocks, start,
435 end - start + 1);
436 if (ret == 0)
437 ret = count;
438out:
439 kfree(orig);
440 return ret;
441}
442CONFIGFS_ATTR(nullb_device_, badblocks);
443
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700444static struct configfs_attribute *nullb_device_attrs[] = {
445 &nullb_device_attr_size,
446 &nullb_device_attr_completion_nsec,
447 &nullb_device_attr_submit_queues,
448 &nullb_device_attr_home_node,
449 &nullb_device_attr_queue_mode,
450 &nullb_device_attr_blocksize,
451 &nullb_device_attr_irqmode,
452 &nullb_device_attr_hw_queue_depth,
Shaohua Licedcafa2017-08-14 15:04:54 -0700453 &nullb_device_attr_index,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700454 &nullb_device_attr_use_lightnvm,
455 &nullb_device_attr_blocking,
456 &nullb_device_attr_use_per_node_hctx,
Shaohua Licedcafa2017-08-14 15:04:54 -0700457 &nullb_device_attr_power,
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700458 &nullb_device_attr_memory_backed,
Shaohua Li306eb6b2017-08-14 15:04:57 -0700459 &nullb_device_attr_discard,
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700460 &nullb_device_attr_mbps,
Shaohua Lideb78b42017-08-14 15:04:59 -0700461 &nullb_device_attr_cache_size,
Shaohua Li2f54a612017-08-14 15:05:00 -0700462 &nullb_device_attr_badblocks,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700463 NULL,
464};
465
466static void nullb_device_release(struct config_item *item)
467{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700468 struct nullb_device *dev = to_nullb_device(item);
469
Shaohua Li2f54a612017-08-14 15:05:00 -0700470 badblocks_exit(&dev->badblocks);
Shaohua Lideb78b42017-08-14 15:04:59 -0700471 null_free_device_storage(dev, false);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700472 null_free_dev(dev);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700473}
474
475static struct configfs_item_operations nullb_device_ops = {
476 .release = nullb_device_release,
477};
478
Bhumika Goyale1919df2017-10-16 17:18:49 +0200479static const struct config_item_type nullb_device_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700480 .ct_item_ops = &nullb_device_ops,
481 .ct_attrs = nullb_device_attrs,
482 .ct_owner = THIS_MODULE,
483};
484
485static struct
486config_item *nullb_group_make_item(struct config_group *group, const char *name)
487{
488 struct nullb_device *dev;
489
490 dev = null_alloc_dev();
491 if (!dev)
492 return ERR_PTR(-ENOMEM);
493
494 config_item_init_type_name(&dev->item, name, &nullb_device_type);
495
496 return &dev->item;
497}
498
499static void
500nullb_group_drop_item(struct config_group *group, struct config_item *item)
501{
Shaohua Licedcafa2017-08-14 15:04:54 -0700502 struct nullb_device *dev = to_nullb_device(item);
503
504 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
505 mutex_lock(&lock);
506 dev->power = false;
507 null_del_dev(dev->nullb);
508 mutex_unlock(&lock);
509 }
510
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700511 config_item_put(item);
512}
513
514static ssize_t memb_group_features_show(struct config_item *item, char *page)
515{
Shaohua Li2f54a612017-08-14 15:05:00 -0700516 return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks\n");
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700517}
518
519CONFIGFS_ATTR_RO(memb_group_, features);
520
521static struct configfs_attribute *nullb_group_attrs[] = {
522 &memb_group_attr_features,
523 NULL,
524};
525
526static struct configfs_group_operations nullb_group_ops = {
527 .make_item = nullb_group_make_item,
528 .drop_item = nullb_group_drop_item,
529};
530
Bhumika Goyale1919df2017-10-16 17:18:49 +0200531static const struct config_item_type nullb_group_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700532 .ct_group_ops = &nullb_group_ops,
533 .ct_attrs = nullb_group_attrs,
534 .ct_owner = THIS_MODULE,
535};
536
537static struct configfs_subsystem nullb_subsys = {
538 .su_group = {
539 .cg_item = {
540 .ci_namebuf = "nullb",
541 .ci_type = &nullb_group_type,
542 },
543 },
544};
545
Shaohua Lideb78b42017-08-14 15:04:59 -0700546static inline int null_cache_active(struct nullb *nullb)
547{
548 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
549}
550
Shaohua Li2984c862017-08-14 15:04:52 -0700551static struct nullb_device *null_alloc_dev(void)
552{
553 struct nullb_device *dev;
554
555 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
556 if (!dev)
557 return NULL;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700558 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
Shaohua Lideb78b42017-08-14 15:04:59 -0700559 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
Shaohua Li2f54a612017-08-14 15:05:00 -0700560 if (badblocks_init(&dev->badblocks, 0)) {
561 kfree(dev);
562 return NULL;
563 }
564
Shaohua Li2984c862017-08-14 15:04:52 -0700565 dev->size = g_gb * 1024;
566 dev->completion_nsec = g_completion_nsec;
567 dev->submit_queues = g_submit_queues;
568 dev->home_node = g_home_node;
569 dev->queue_mode = g_queue_mode;
570 dev->blocksize = g_bs;
571 dev->irqmode = g_irqmode;
572 dev->hw_queue_depth = g_hw_queue_depth;
573 dev->use_lightnvm = g_use_lightnvm;
574 dev->blocking = g_blocking;
575 dev->use_per_node_hctx = g_use_per_node_hctx;
576 return dev;
577}
578
579static void null_free_dev(struct nullb_device *dev)
580{
581 kfree(dev);
582}
583
Jens Axboef2298c02013-10-25 11:52:25 +0100584static void put_tag(struct nullb_queue *nq, unsigned int tag)
585{
586 clear_bit_unlock(tag, nq->tag_map);
587
588 if (waitqueue_active(&nq->wait))
589 wake_up(&nq->wait);
590}
591
592static unsigned int get_tag(struct nullb_queue *nq)
593{
594 unsigned int tag;
595
596 do {
597 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
598 if (tag >= nq->queue_depth)
599 return -1U;
600 } while (test_and_set_bit_lock(tag, nq->tag_map));
601
602 return tag;
603}
604
605static void free_cmd(struct nullb_cmd *cmd)
606{
607 put_tag(cmd->nq, cmd->tag);
608}
609
Paolo Valente3c395a92015-12-01 11:48:17 +0100610static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
611
Jens Axboef2298c02013-10-25 11:52:25 +0100612static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
613{
614 struct nullb_cmd *cmd;
615 unsigned int tag;
616
617 tag = get_tag(nq);
618 if (tag != -1U) {
619 cmd = &nq->cmds[tag];
620 cmd->tag = tag;
621 cmd->nq = nq;
Shaohua Li2984c862017-08-14 15:04:52 -0700622 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +0100623 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
624 HRTIMER_MODE_REL);
625 cmd->timer.function = null_cmd_timer_expired;
626 }
Jens Axboef2298c02013-10-25 11:52:25 +0100627 return cmd;
628 }
629
630 return NULL;
631}
632
633static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
634{
635 struct nullb_cmd *cmd;
636 DEFINE_WAIT(wait);
637
638 cmd = __alloc_cmd(nq);
639 if (cmd || !can_wait)
640 return cmd;
641
642 do {
643 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
644 cmd = __alloc_cmd(nq);
645 if (cmd)
646 break;
647
648 io_schedule();
649 } while (1);
650
651 finish_wait(&nq->wait, &wait);
652 return cmd;
653}
654
655static void end_cmd(struct nullb_cmd *cmd)
656{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100657 struct request_queue *q = NULL;
Shaohua Li2984c862017-08-14 15:04:52 -0700658 int queue_mode = cmd->nq->dev->queue_mode;
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100659
Mike Krinkine8271202015-12-15 12:56:40 +0300660 if (cmd->rq)
661 q = cmd->rq->q;
662
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800663 switch (queue_mode) {
664 case NULL_Q_MQ:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700665 blk_mq_end_request(cmd->rq, cmd->error);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800666 return;
667 case NULL_Q_RQ:
668 INIT_LIST_HEAD(&cmd->rq->queuelist);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700669 blk_end_request_all(cmd->rq, cmd->error);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800670 break;
671 case NULL_Q_BIO:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700672 cmd->bio->bi_status = cmd->error;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200673 bio_endio(cmd->bio);
Jens Axboe48cc6612015-12-28 13:02:47 -0700674 break;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800675 }
Jens Axboef2298c02013-10-25 11:52:25 +0100676
Jens Axboe48cc6612015-12-28 13:02:47 -0700677 free_cmd(cmd);
678
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100679 /* Restart queue if needed, as we are freeing a tag */
Jens Axboe48cc6612015-12-28 13:02:47 -0700680 if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100681 unsigned long flags;
682
683 spin_lock_irqsave(q->queue_lock, flags);
Jens Axboe48cc6612015-12-28 13:02:47 -0700684 blk_start_queue_async(q);
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100685 spin_unlock_irqrestore(q->queue_lock, flags);
686 }
Jens Axboef2298c02013-10-25 11:52:25 +0100687}
688
689static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
690{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100691 end_cmd(container_of(timer, struct nullb_cmd, timer));
Jens Axboef2298c02013-10-25 11:52:25 +0100692
693 return HRTIMER_NORESTART;
694}
695
696static void null_cmd_end_timer(struct nullb_cmd *cmd)
697{
Shaohua Li2984c862017-08-14 15:04:52 -0700698 ktime_t kt = cmd->nq->dev->completion_nsec;
Jens Axboef2298c02013-10-25 11:52:25 +0100699
Paolo Valente3c395a92015-12-01 11:48:17 +0100700 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +0100701}
702
703static void null_softirq_done_fn(struct request *rq)
704{
Shaohua Li2984c862017-08-14 15:04:52 -0700705 struct nullb *nullb = rq->q->queuedata;
706
707 if (nullb->dev->queue_mode == NULL_Q_MQ)
Jens Axboed891fa72014-06-16 11:40:25 -0600708 end_cmd(blk_mq_rq_to_pdu(rq));
709 else
710 end_cmd(rq->special);
Jens Axboef2298c02013-10-25 11:52:25 +0100711}
712
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700713static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
Jens Axboef2298c02013-10-25 11:52:25 +0100714{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700715 struct nullb_page *t_page;
716
717 t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
718 if (!t_page)
719 goto out;
720
721 t_page->page = alloc_pages(gfp_flags, 0);
722 if (!t_page->page)
723 goto out_freepage;
724
725 t_page->bitmap = 0;
726 return t_page;
727out_freepage:
728 kfree(t_page);
729out:
730 return NULL;
731}
732
733static void null_free_page(struct nullb_page *t_page)
734{
Shaohua Lideb78b42017-08-14 15:04:59 -0700735 __set_bit(NULLB_PAGE_FREE, &t_page->bitmap);
736 if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap))
737 return;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700738 __free_page(t_page->page);
739 kfree(t_page);
740}
741
Shaohua Lideb78b42017-08-14 15:04:59 -0700742static void null_free_sector(struct nullb *nullb, sector_t sector,
743 bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700744{
745 unsigned int sector_bit;
746 u64 idx;
747 struct nullb_page *t_page, *ret;
748 struct radix_tree_root *root;
749
Shaohua Lideb78b42017-08-14 15:04:59 -0700750 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700751 idx = sector >> PAGE_SECTORS_SHIFT;
752 sector_bit = (sector & SECTOR_MASK);
753
754 t_page = radix_tree_lookup(root, idx);
755 if (t_page) {
756 __clear_bit(sector_bit, &t_page->bitmap);
757
758 if (!t_page->bitmap) {
759 ret = radix_tree_delete_item(root, idx, t_page);
760 WARN_ON(ret != t_page);
761 null_free_page(ret);
Shaohua Lideb78b42017-08-14 15:04:59 -0700762 if (is_cache)
763 nullb->dev->curr_cache -= PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700764 }
765 }
766}
767
768static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
Shaohua Lideb78b42017-08-14 15:04:59 -0700769 struct nullb_page *t_page, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700770{
771 struct radix_tree_root *root;
772
Shaohua Lideb78b42017-08-14 15:04:59 -0700773 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700774
775 if (radix_tree_insert(root, idx, t_page)) {
776 null_free_page(t_page);
777 t_page = radix_tree_lookup(root, idx);
778 WARN_ON(!t_page || t_page->page->index != idx);
Shaohua Lideb78b42017-08-14 15:04:59 -0700779 } else if (is_cache)
780 nullb->dev->curr_cache += PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700781
782 return t_page;
783}
784
Shaohua Lideb78b42017-08-14 15:04:59 -0700785static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700786{
787 unsigned long pos = 0;
788 int nr_pages;
789 struct nullb_page *ret, *t_pages[FREE_BATCH];
790 struct radix_tree_root *root;
791
Shaohua Lideb78b42017-08-14 15:04:59 -0700792 root = is_cache ? &dev->cache : &dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700793
794 do {
795 int i;
796
797 nr_pages = radix_tree_gang_lookup(root,
798 (void **)t_pages, pos, FREE_BATCH);
799
800 for (i = 0; i < nr_pages; i++) {
801 pos = t_pages[i]->page->index;
802 ret = radix_tree_delete_item(root, pos, t_pages[i]);
803 WARN_ON(ret != t_pages[i]);
804 null_free_page(ret);
805 }
806
807 pos++;
808 } while (nr_pages == FREE_BATCH);
Shaohua Lideb78b42017-08-14 15:04:59 -0700809
810 if (is_cache)
811 dev->curr_cache = 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700812}
813
Shaohua Lideb78b42017-08-14 15:04:59 -0700814static struct nullb_page *__null_lookup_page(struct nullb *nullb,
815 sector_t sector, bool for_write, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700816{
817 unsigned int sector_bit;
818 u64 idx;
819 struct nullb_page *t_page;
Shaohua Lideb78b42017-08-14 15:04:59 -0700820 struct radix_tree_root *root;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700821
822 idx = sector >> PAGE_SECTORS_SHIFT;
823 sector_bit = (sector & SECTOR_MASK);
824
Shaohua Lideb78b42017-08-14 15:04:59 -0700825 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
826 t_page = radix_tree_lookup(root, idx);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700827 WARN_ON(t_page && t_page->page->index != idx);
828
829 if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap)))
830 return t_page;
831
832 return NULL;
833}
834
Shaohua Lideb78b42017-08-14 15:04:59 -0700835static struct nullb_page *null_lookup_page(struct nullb *nullb,
836 sector_t sector, bool for_write, bool ignore_cache)
837{
838 struct nullb_page *page = NULL;
839
840 if (!ignore_cache)
841 page = __null_lookup_page(nullb, sector, for_write, true);
842 if (page)
843 return page;
844 return __null_lookup_page(nullb, sector, for_write, false);
845}
846
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700847static struct nullb_page *null_insert_page(struct nullb *nullb,
Shaohua Lideb78b42017-08-14 15:04:59 -0700848 sector_t sector, bool ignore_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700849{
850 u64 idx;
851 struct nullb_page *t_page;
852
Shaohua Lideb78b42017-08-14 15:04:59 -0700853 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700854 if (t_page)
855 return t_page;
856
857 spin_unlock_irq(&nullb->lock);
858
859 t_page = null_alloc_page(GFP_NOIO);
860 if (!t_page)
861 goto out_lock;
862
863 if (radix_tree_preload(GFP_NOIO))
864 goto out_freepage;
865
866 spin_lock_irq(&nullb->lock);
867 idx = sector >> PAGE_SECTORS_SHIFT;
868 t_page->page->index = idx;
Shaohua Lideb78b42017-08-14 15:04:59 -0700869 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700870 radix_tree_preload_end();
871
872 return t_page;
873out_freepage:
874 null_free_page(t_page);
875out_lock:
876 spin_lock_irq(&nullb->lock);
Shaohua Lideb78b42017-08-14 15:04:59 -0700877 return null_lookup_page(nullb, sector, true, ignore_cache);
878}
879
880static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
881{
882 int i;
883 unsigned int offset;
884 u64 idx;
885 struct nullb_page *t_page, *ret;
886 void *dst, *src;
887
888 idx = c_page->page->index;
889
890 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
891
892 __clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap);
893 if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) {
894 null_free_page(c_page);
895 if (t_page && t_page->bitmap == 0) {
896 ret = radix_tree_delete_item(&nullb->dev->data,
897 idx, t_page);
898 null_free_page(t_page);
899 }
900 return 0;
901 }
902
903 if (!t_page)
904 return -ENOMEM;
905
906 src = kmap_atomic(c_page->page);
907 dst = kmap_atomic(t_page->page);
908
909 for (i = 0; i < PAGE_SECTORS;
910 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
911 if (test_bit(i, &c_page->bitmap)) {
912 offset = (i << SECTOR_SHIFT);
913 memcpy(dst + offset, src + offset,
914 nullb->dev->blocksize);
915 __set_bit(i, &t_page->bitmap);
916 }
917 }
918
919 kunmap_atomic(dst);
920 kunmap_atomic(src);
921
922 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
923 null_free_page(ret);
924 nullb->dev->curr_cache -= PAGE_SIZE;
925
926 return 0;
927}
928
929static int null_make_cache_space(struct nullb *nullb, unsigned long n)
930{
931 int i, err, nr_pages;
932 struct nullb_page *c_pages[FREE_BATCH];
933 unsigned long flushed = 0, one_round;
934
935again:
936 if ((nullb->dev->cache_size * 1024 * 1024) >
937 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
938 return 0;
939
940 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
941 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
942 /*
943 * nullb_flush_cache_page could unlock before using the c_pages. To
944 * avoid race, we don't allow page free
945 */
946 for (i = 0; i < nr_pages; i++) {
947 nullb->cache_flush_pos = c_pages[i]->page->index;
948 /*
949 * We found the page which is being flushed to disk by other
950 * threads
951 */
952 if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap))
953 c_pages[i] = NULL;
954 else
955 __set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap);
956 }
957
958 one_round = 0;
959 for (i = 0; i < nr_pages; i++) {
960 if (c_pages[i] == NULL)
961 continue;
962 err = null_flush_cache_page(nullb, c_pages[i]);
963 if (err)
964 return err;
965 one_round++;
966 }
967 flushed += one_round << PAGE_SHIFT;
968
969 if (n > flushed) {
970 if (nr_pages == 0)
971 nullb->cache_flush_pos = 0;
972 if (one_round == 0) {
973 /* give other threads a chance */
974 spin_unlock_irq(&nullb->lock);
975 spin_lock_irq(&nullb->lock);
976 }
977 goto again;
978 }
979 return 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700980}
981
982static int copy_to_nullb(struct nullb *nullb, struct page *source,
Shaohua Lideb78b42017-08-14 15:04:59 -0700983 unsigned int off, sector_t sector, size_t n, bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700984{
985 size_t temp, count = 0;
986 unsigned int offset;
987 struct nullb_page *t_page;
988 void *dst, *src;
989
990 while (count < n) {
991 temp = min_t(size_t, nullb->dev->blocksize, n - count);
992
Shaohua Lideb78b42017-08-14 15:04:59 -0700993 if (null_cache_active(nullb) && !is_fua)
994 null_make_cache_space(nullb, PAGE_SIZE);
995
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700996 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -0700997 t_page = null_insert_page(nullb, sector,
998 !null_cache_active(nullb) || is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700999 if (!t_page)
1000 return -ENOSPC;
1001
1002 src = kmap_atomic(source);
1003 dst = kmap_atomic(t_page->page);
1004 memcpy(dst + offset, src + off + count, temp);
1005 kunmap_atomic(dst);
1006 kunmap_atomic(src);
1007
1008 __set_bit(sector & SECTOR_MASK, &t_page->bitmap);
1009
Shaohua Lideb78b42017-08-14 15:04:59 -07001010 if (is_fua)
1011 null_free_sector(nullb, sector, true);
1012
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001013 count += temp;
1014 sector += temp >> SECTOR_SHIFT;
1015 }
1016 return 0;
1017}
1018
1019static int copy_from_nullb(struct nullb *nullb, struct page *dest,
1020 unsigned int off, sector_t sector, size_t n)
1021{
1022 size_t temp, count = 0;
1023 unsigned int offset;
1024 struct nullb_page *t_page;
1025 void *dst, *src;
1026
1027 while (count < n) {
1028 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1029
1030 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -07001031 t_page = null_lookup_page(nullb, sector, false,
1032 !null_cache_active(nullb));
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001033
1034 dst = kmap_atomic(dest);
1035 if (!t_page) {
1036 memset(dst + off + count, 0, temp);
1037 goto next;
1038 }
1039 src = kmap_atomic(t_page->page);
1040 memcpy(dst + off + count, src + offset, temp);
1041 kunmap_atomic(src);
1042next:
1043 kunmap_atomic(dst);
1044
1045 count += temp;
1046 sector += temp >> SECTOR_SHIFT;
1047 }
1048 return 0;
1049}
1050
Shaohua Li306eb6b2017-08-14 15:04:57 -07001051static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
1052{
1053 size_t temp;
1054
1055 spin_lock_irq(&nullb->lock);
1056 while (n > 0) {
1057 temp = min_t(size_t, n, nullb->dev->blocksize);
Shaohua Lideb78b42017-08-14 15:04:59 -07001058 null_free_sector(nullb, sector, false);
1059 if (null_cache_active(nullb))
1060 null_free_sector(nullb, sector, true);
Shaohua Li306eb6b2017-08-14 15:04:57 -07001061 sector += temp >> SECTOR_SHIFT;
1062 n -= temp;
1063 }
1064 spin_unlock_irq(&nullb->lock);
1065}
1066
Shaohua Lideb78b42017-08-14 15:04:59 -07001067static int null_handle_flush(struct nullb *nullb)
1068{
1069 int err;
1070
1071 if (!null_cache_active(nullb))
1072 return 0;
1073
1074 spin_lock_irq(&nullb->lock);
1075 while (true) {
1076 err = null_make_cache_space(nullb,
1077 nullb->dev->cache_size * 1024 * 1024);
1078 if (err || nullb->dev->curr_cache == 0)
1079 break;
1080 }
1081
1082 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1083 spin_unlock_irq(&nullb->lock);
1084 return err;
1085}
1086
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001087static int null_transfer(struct nullb *nullb, struct page *page,
Shaohua Lideb78b42017-08-14 15:04:59 -07001088 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1089 bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001090{
1091 int err = 0;
1092
1093 if (!is_write) {
1094 err = copy_from_nullb(nullb, page, off, sector, len);
1095 flush_dcache_page(page);
1096 } else {
1097 flush_dcache_page(page);
Shaohua Lideb78b42017-08-14 15:04:59 -07001098 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001099 }
1100
1101 return err;
1102}
1103
1104static int null_handle_rq(struct nullb_cmd *cmd)
1105{
1106 struct request *rq = cmd->rq;
1107 struct nullb *nullb = cmd->nq->dev->nullb;
1108 int err;
1109 unsigned int len;
1110 sector_t sector;
1111 struct req_iterator iter;
1112 struct bio_vec bvec;
1113
1114 sector = blk_rq_pos(rq);
1115
Shaohua Li306eb6b2017-08-14 15:04:57 -07001116 if (req_op(rq) == REQ_OP_DISCARD) {
1117 null_handle_discard(nullb, sector, blk_rq_bytes(rq));
1118 return 0;
1119 }
1120
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001121 spin_lock_irq(&nullb->lock);
1122 rq_for_each_segment(bvec, rq, iter) {
1123 len = bvec.bv_len;
1124 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001125 op_is_write(req_op(rq)), sector,
1126 req_op(rq) & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001127 if (err) {
1128 spin_unlock_irq(&nullb->lock);
1129 return err;
1130 }
1131 sector += len >> SECTOR_SHIFT;
1132 }
1133 spin_unlock_irq(&nullb->lock);
1134
1135 return 0;
1136}
1137
1138static int null_handle_bio(struct nullb_cmd *cmd)
1139{
1140 struct bio *bio = cmd->bio;
1141 struct nullb *nullb = cmd->nq->dev->nullb;
1142 int err;
1143 unsigned int len;
1144 sector_t sector;
1145 struct bio_vec bvec;
1146 struct bvec_iter iter;
1147
1148 sector = bio->bi_iter.bi_sector;
1149
Shaohua Li306eb6b2017-08-14 15:04:57 -07001150 if (bio_op(bio) == REQ_OP_DISCARD) {
1151 null_handle_discard(nullb, sector,
1152 bio_sectors(bio) << SECTOR_SHIFT);
1153 return 0;
1154 }
1155
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001156 spin_lock_irq(&nullb->lock);
1157 bio_for_each_segment(bvec, bio, iter) {
1158 len = bvec.bv_len;
1159 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001160 op_is_write(bio_op(bio)), sector,
1161 bio_op(bio) & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001162 if (err) {
1163 spin_unlock_irq(&nullb->lock);
1164 return err;
1165 }
1166 sector += len >> SECTOR_SHIFT;
1167 }
1168 spin_unlock_irq(&nullb->lock);
1169 return 0;
1170}
1171
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001172static void null_stop_queue(struct nullb *nullb)
1173{
1174 struct request_queue *q = nullb->q;
1175
1176 if (nullb->dev->queue_mode == NULL_Q_MQ)
1177 blk_mq_stop_hw_queues(q);
1178 else {
1179 spin_lock_irq(q->queue_lock);
1180 blk_stop_queue(q);
1181 spin_unlock_irq(q->queue_lock);
1182 }
1183}
1184
1185static void null_restart_queue_async(struct nullb *nullb)
1186{
1187 struct request_queue *q = nullb->q;
1188 unsigned long flags;
1189
1190 if (nullb->dev->queue_mode == NULL_Q_MQ)
1191 blk_mq_start_stopped_hw_queues(q, true);
1192 else {
1193 spin_lock_irqsave(q->queue_lock, flags);
1194 blk_start_queue_async(q);
1195 spin_unlock_irqrestore(q->queue_lock, flags);
1196 }
1197}
1198
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001199static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1200{
1201 struct nullb_device *dev = cmd->nq->dev;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001202 struct nullb *nullb = dev->nullb;
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001203 int err = 0;
1204
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001205 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1206 struct request *rq = cmd->rq;
1207
1208 if (!hrtimer_active(&nullb->bw_timer))
1209 hrtimer_restart(&nullb->bw_timer);
1210
1211 if (atomic_long_sub_return(blk_rq_bytes(rq),
1212 &nullb->cur_bytes) < 0) {
1213 null_stop_queue(nullb);
1214 /* race with timer */
1215 if (atomic_long_read(&nullb->cur_bytes) > 0)
1216 null_restart_queue_async(nullb);
1217 if (dev->queue_mode == NULL_Q_RQ) {
1218 struct request_queue *q = nullb->q;
1219
1220 spin_lock_irq(q->queue_lock);
1221 rq->rq_flags |= RQF_DONTPREP;
1222 blk_requeue_request(q, rq);
1223 spin_unlock_irq(q->queue_lock);
1224 return BLK_STS_OK;
1225 } else
1226 /* requeue request */
1227 return BLK_STS_RESOURCE;
1228 }
1229 }
1230
Shaohua Li2f54a612017-08-14 15:05:00 -07001231 if (nullb->dev->badblocks.shift != -1) {
1232 int bad_sectors;
1233 sector_t sector, size, first_bad;
1234 bool is_flush = true;
1235
1236 if (dev->queue_mode == NULL_Q_BIO &&
1237 bio_op(cmd->bio) != REQ_OP_FLUSH) {
1238 is_flush = false;
1239 sector = cmd->bio->bi_iter.bi_sector;
1240 size = bio_sectors(cmd->bio);
1241 }
1242 if (dev->queue_mode != NULL_Q_BIO &&
1243 req_op(cmd->rq) != REQ_OP_FLUSH) {
1244 is_flush = false;
1245 sector = blk_rq_pos(cmd->rq);
1246 size = blk_rq_sectors(cmd->rq);
1247 }
1248 if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
1249 size, &first_bad, &bad_sectors)) {
1250 cmd->error = BLK_STS_IOERR;
1251 goto out;
1252 }
1253 }
1254
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001255 if (dev->memory_backed) {
Shaohua Lideb78b42017-08-14 15:04:59 -07001256 if (dev->queue_mode == NULL_Q_BIO) {
1257 if (bio_op(cmd->bio) == REQ_OP_FLUSH)
1258 err = null_handle_flush(nullb);
1259 else
1260 err = null_handle_bio(cmd);
1261 } else {
1262 if (req_op(cmd->rq) == REQ_OP_FLUSH)
1263 err = null_handle_flush(nullb);
1264 else
1265 err = null_handle_rq(cmd);
1266 }
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001267 }
1268 cmd->error = errno_to_blk_status(err);
Shaohua Li2f54a612017-08-14 15:05:00 -07001269out:
Jens Axboef2298c02013-10-25 11:52:25 +01001270 /* Complete IO by inline, softirq or timer */
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001271 switch (dev->irqmode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001272 case NULL_IRQ_SOFTIRQ:
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001273 switch (dev->queue_mode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001274 case NULL_Q_MQ:
Christoph Hellwig08e00292017-04-20 16:03:09 +02001275 blk_mq_complete_request(cmd->rq);
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001276 break;
1277 case NULL_Q_RQ:
1278 blk_complete_request(cmd->rq);
1279 break;
1280 case NULL_Q_BIO:
1281 /*
1282 * XXX: no proper submitting cpu information available.
1283 */
1284 end_cmd(cmd);
1285 break;
1286 }
1287 break;
Jens Axboef2298c02013-10-25 11:52:25 +01001288 case NULL_IRQ_NONE:
1289 end_cmd(cmd);
1290 break;
Jens Axboef2298c02013-10-25 11:52:25 +01001291 case NULL_IRQ_TIMER:
1292 null_cmd_end_timer(cmd);
1293 break;
1294 }
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001295 return BLK_STS_OK;
Jens Axboef2298c02013-10-25 11:52:25 +01001296}
1297
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001298static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1299{
1300 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1301 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1302 unsigned int mbps = nullb->dev->mbps;
1303
1304 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1305 return HRTIMER_NORESTART;
1306
1307 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1308 null_restart_queue_async(nullb);
1309
1310 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1311
1312 return HRTIMER_RESTART;
1313}
1314
1315static void nullb_setup_bwtimer(struct nullb *nullb)
1316{
1317 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1318
1319 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1320 nullb->bw_timer.function = nullb_bwtimer_fn;
1321 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1322 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +01001323}
1324
1325static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1326{
1327 int index = 0;
1328
1329 if (nullb->nr_queues != 1)
1330 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1331
1332 return &nullb->queues[index];
1333}
1334
Jens Axboedece1632015-11-05 10:41:16 -07001335static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +01001336{
1337 struct nullb *nullb = q->queuedata;
1338 struct nullb_queue *nq = nullb_to_queue(nullb);
1339 struct nullb_cmd *cmd;
1340
1341 cmd = alloc_cmd(nq, 1);
1342 cmd->bio = bio;
1343
1344 null_handle_cmd(cmd);
Jens Axboedece1632015-11-05 10:41:16 -07001345 return BLK_QC_T_NONE;
Jens Axboef2298c02013-10-25 11:52:25 +01001346}
1347
1348static int null_rq_prep_fn(struct request_queue *q, struct request *req)
1349{
1350 struct nullb *nullb = q->queuedata;
1351 struct nullb_queue *nq = nullb_to_queue(nullb);
1352 struct nullb_cmd *cmd;
1353
1354 cmd = alloc_cmd(nq, 0);
1355 if (cmd) {
1356 cmd->rq = req;
1357 req->special = cmd;
1358 return BLKPREP_OK;
1359 }
Akinobu Mita8b70f452015-06-02 08:35:10 +09001360 blk_stop_queue(q);
Jens Axboef2298c02013-10-25 11:52:25 +01001361
1362 return BLKPREP_DEFER;
1363}
1364
1365static void null_request_fn(struct request_queue *q)
1366{
1367 struct request *rq;
1368
1369 while ((rq = blk_fetch_request(q)) != NULL) {
1370 struct nullb_cmd *cmd = rq->special;
1371
1372 spin_unlock_irq(q->queue_lock);
1373 null_handle_cmd(cmd);
1374 spin_lock_irq(q->queue_lock);
1375 }
1376}
1377
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001378static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
Jens Axboe74c45052014-10-29 11:14:52 -06001379 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +01001380{
Jens Axboe74c45052014-10-29 11:14:52 -06001381 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Shaohua Li2984c862017-08-14 15:04:52 -07001382 struct nullb_queue *nq = hctx->driver_data;
Jens Axboef2298c02013-10-25 11:52:25 +01001383
Jens Axboedb5bcf82017-03-30 13:44:26 -06001384 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1385
Shaohua Li2984c862017-08-14 15:04:52 -07001386 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +01001387 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1388 cmd->timer.function = null_cmd_timer_expired;
1389 }
Jens Axboe74c45052014-10-29 11:14:52 -06001390 cmd->rq = bd->rq;
Shaohua Li2984c862017-08-14 15:04:52 -07001391 cmd->nq = nq;
Jens Axboef2298c02013-10-25 11:52:25 +01001392
Jens Axboe74c45052014-10-29 11:14:52 -06001393 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -07001394
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001395 return null_handle_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +01001396}
1397
Eric Biggersf363b082017-03-30 13:39:16 -07001398static const struct blk_mq_ops null_mq_ops = {
Jens Axboef2298c02013-10-25 11:52:25 +01001399 .queue_rq = null_queue_rq,
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001400 .complete = null_softirq_done_fn,
Jens Axboef2298c02013-10-25 11:52:25 +01001401};
1402
Matias Bjørlingde65d2d2015-08-31 14:17:18 +02001403static void cleanup_queue(struct nullb_queue *nq)
1404{
1405 kfree(nq->tag_map);
1406 kfree(nq->cmds);
1407}
1408
1409static void cleanup_queues(struct nullb *nullb)
1410{
1411 int i;
1412
1413 for (i = 0; i < nullb->nr_queues; i++)
1414 cleanup_queue(&nullb->queues[i]);
1415
1416 kfree(nullb->queues);
1417}
1418
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001419#ifdef CONFIG_NVM
1420
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02001421static void null_lnvm_end_io(struct request *rq, blk_status_t status)
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001422{
1423 struct nvm_rq *rqd = rq->end_io_data;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001424
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02001425 /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
1426 rqd->error = status ? -EIO : 0;
Matias Bjørling06894ef2017-01-31 13:17:17 +01001427 nvm_end_io(rqd);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001428
1429 blk_put_request(rq);
1430}
1431
Matias Bjørling16f26c32015-12-06 11:25:48 +01001432static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001433{
Matias Bjørling16f26c32015-12-06 11:25:48 +01001434 struct request_queue *q = dev->q;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001435 struct request *rq;
1436 struct bio *bio = rqd->bio;
1437
Christoph Hellwigaebf5262017-01-31 16:57:31 +01001438 rq = blk_mq_alloc_request(q,
1439 op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001440 if (IS_ERR(rq))
1441 return -ENOMEM;
1442
Bart Van Assche2644a3c2017-04-19 14:01:25 -07001443 blk_init_request_from_bio(rq, bio);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001444
1445 rq->end_io_data = rqd;
1446
1447 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
1448
1449 return 0;
1450}
1451
Matias Bjørling16f26c32015-12-06 11:25:48 +01001452static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001453{
Shaohua Li2984c862017-08-14 15:04:52 -07001454 struct nullb *nullb = dev->q->queuedata;
1455 sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
Matias Bjørling5b40db92015-11-19 12:50:09 +01001456 sector_t blksize;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001457 struct nvm_id_group *grp;
1458
1459 id->ver_id = 0x1;
1460 id->vmnt = 0;
Matias Bjørlingbf643182016-02-04 15:13:27 +01001461 id->cap = 0x2;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001462 id->dom = 0x1;
Matias Bjørling5b40db92015-11-19 12:50:09 +01001463
1464 id->ppaf.blk_offset = 0;
1465 id->ppaf.blk_len = 16;
1466 id->ppaf.pg_offset = 16;
1467 id->ppaf.pg_len = 16;
1468 id->ppaf.sect_offset = 32;
1469 id->ppaf.sect_len = 8;
1470 id->ppaf.pln_offset = 40;
1471 id->ppaf.pln_len = 8;
1472 id->ppaf.lun_offset = 48;
1473 id->ppaf.lun_len = 8;
1474 id->ppaf.ch_offset = 56;
1475 id->ppaf.ch_len = 8;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001476
Shaohua Li2984c862017-08-14 15:04:52 -07001477 sector_div(size, nullb->dev->blocksize); /* convert size to pages */
Arnd Bergmanne93d12a2016-01-13 23:04:08 +01001478 size >>= 8; /* concert size to pgs pr blk */
Matias Bjørling19bd6fe2017-01-31 13:17:15 +01001479 grp = &id->grp;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001480 grp->mtype = 0;
Matias Bjørling5b40db92015-11-19 12:50:09 +01001481 grp->fmtype = 0;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001482 grp->num_ch = 1;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001483 grp->num_pg = 256;
Matias Bjørling5b40db92015-11-19 12:50:09 +01001484 blksize = size;
Arnd Bergmanne93d12a2016-01-13 23:04:08 +01001485 size >>= 16;
Matias Bjørling5b40db92015-11-19 12:50:09 +01001486 grp->num_lun = size + 1;
Arnd Bergmanne93d12a2016-01-13 23:04:08 +01001487 sector_div(blksize, grp->num_lun);
Matias Bjørling5b40db92015-11-19 12:50:09 +01001488 grp->num_blk = blksize;
1489 grp->num_pln = 1;
1490
Shaohua Li2984c862017-08-14 15:04:52 -07001491 grp->fpg_sz = nullb->dev->blocksize;
1492 grp->csecs = nullb->dev->blocksize;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001493 grp->trdt = 25000;
1494 grp->trdm = 25000;
1495 grp->tprt = 500000;
1496 grp->tprm = 500000;
1497 grp->tbet = 1500000;
1498 grp->tbem = 1500000;
1499 grp->mpos = 0x010101; /* single plane rwe */
Shaohua Li2984c862017-08-14 15:04:52 -07001500 grp->cpar = nullb->dev->hw_queue_depth;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001501
1502 return 0;
1503}
1504
Matias Bjørling16f26c32015-12-06 11:25:48 +01001505static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001506{
1507 mempool_t *virtmem_pool;
1508
Matias Bjørling6bb95352015-11-19 12:50:08 +01001509 virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001510 if (!virtmem_pool) {
1511 pr_err("null_blk: Unable to create virtual memory pool\n");
1512 return NULL;
1513 }
1514
1515 return virtmem_pool;
1516}
1517
1518static void null_lnvm_destroy_dma_pool(void *pool)
1519{
1520 mempool_destroy(pool);
1521}
1522
Matias Bjørling16f26c32015-12-06 11:25:48 +01001523static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001524 gfp_t mem_flags, dma_addr_t *dma_handler)
1525{
1526 return mempool_alloc(pool, mem_flags);
1527}
1528
1529static void null_lnvm_dev_dma_free(void *pool, void *entry,
1530 dma_addr_t dma_handler)
1531{
1532 mempool_free(entry, pool);
1533}
1534
1535static struct nvm_dev_ops null_lnvm_dev_ops = {
1536 .identity = null_lnvm_id,
1537 .submit_io = null_lnvm_submit_io,
1538
1539 .create_dma_pool = null_lnvm_create_dma_pool,
1540 .destroy_dma_pool = null_lnvm_destroy_dma_pool,
1541 .dev_dma_alloc = null_lnvm_dev_dma_alloc,
1542 .dev_dma_free = null_lnvm_dev_dma_free,
1543
1544 /* Simulate nvme protocol restriction */
1545 .max_phys_sect = 64,
1546};
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001547
1548static int null_nvm_register(struct nullb *nullb)
1549{
Matias Bjørlingb0b4e092016-09-16 14:25:07 +02001550 struct nvm_dev *dev;
1551 int rv;
1552
1553 dev = nvm_alloc_dev(0);
1554 if (!dev)
1555 return -ENOMEM;
1556
1557 dev->q = nullb->q;
1558 memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN);
1559 dev->ops = &null_lnvm_dev_ops;
1560
1561 rv = nvm_register(dev);
1562 if (rv) {
1563 kfree(dev);
1564 return rv;
1565 }
1566 nullb->ndev = dev;
1567 return 0;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001568}
1569
1570static void null_nvm_unregister(struct nullb *nullb)
1571{
Matias Bjørlingb0b4e092016-09-16 14:25:07 +02001572 nvm_unregister(nullb->ndev);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001573}
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001574#else
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001575static int null_nvm_register(struct nullb *nullb)
1576{
Yasuaki Ishimatsu92153d32016-11-16 08:26:11 -07001577 pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001578 return -EINVAL;
1579}
1580static void null_nvm_unregister(struct nullb *nullb) {}
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001581#endif /* CONFIG_NVM */
1582
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001583static void null_del_dev(struct nullb *nullb)
1584{
Shaohua Li2984c862017-08-14 15:04:52 -07001585 struct nullb_device *dev = nullb->dev;
1586
Shaohua Li94bc02e2017-08-14 15:04:55 -07001587 ida_simple_remove(&nullb_indexes, nullb->index);
1588
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001589 list_del_init(&nullb->list);
1590
Shaohua Li2984c862017-08-14 15:04:52 -07001591 if (dev->use_lightnvm)
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001592 null_nvm_unregister(nullb);
1593 else
1594 del_gendisk(nullb->disk);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001595
1596 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1597 hrtimer_cancel(&nullb->bw_timer);
1598 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1599 null_restart_queue_async(nullb);
1600 }
1601
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001602 blk_cleanup_queue(nullb->q);
Shaohua Li2984c862017-08-14 15:04:52 -07001603 if (dev->queue_mode == NULL_Q_MQ &&
1604 nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001605 blk_mq_free_tag_set(nullb->tag_set);
Shaohua Li2984c862017-08-14 15:04:52 -07001606 if (!dev->use_lightnvm)
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001607 put_disk(nullb->disk);
1608 cleanup_queues(nullb);
Shaohua Lideb78b42017-08-14 15:04:59 -07001609 if (null_cache_active(nullb))
1610 null_free_device_storage(nullb->dev, true);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001611 kfree(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001612 dev->nullb = NULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001613}
1614
Shaohua Li306eb6b2017-08-14 15:04:57 -07001615static void null_config_discard(struct nullb *nullb)
1616{
1617 if (nullb->dev->discard == false)
1618 return;
1619 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1620 nullb->q->limits.discard_alignment = nullb->dev->blocksize;
1621 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
1622 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +01001623}
1624
1625static int null_open(struct block_device *bdev, fmode_t mode)
1626{
1627 return 0;
1628}
1629
1630static void null_release(struct gendisk *disk, fmode_t mode)
1631{
1632}
1633
1634static const struct block_device_operations null_fops = {
1635 .owner = THIS_MODULE,
1636 .open = null_open,
1637 .release = null_release,
1638};
1639
Jens Axboe82f402f2017-06-20 14:22:01 -06001640static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1641{
1642 BUG_ON(!nullb);
1643 BUG_ON(!nq);
1644
1645 init_waitqueue_head(&nq->wait);
1646 nq->queue_depth = nullb->queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -07001647 nq->dev = nullb->dev;
Jens Axboe82f402f2017-06-20 14:22:01 -06001648}
1649
1650static void null_init_queues(struct nullb *nullb)
1651{
1652 struct request_queue *q = nullb->q;
1653 struct blk_mq_hw_ctx *hctx;
1654 struct nullb_queue *nq;
1655 int i;
1656
1657 queue_for_each_hw_ctx(q, hctx, i) {
1658 if (!hctx->nr_ctx || !hctx->tags)
1659 continue;
1660 nq = &nullb->queues[i];
1661 hctx->driver_data = nq;
1662 null_init_queue(nullb, nq);
1663 nullb->nr_queues++;
1664 }
1665}
1666
Jens Axboef2298c02013-10-25 11:52:25 +01001667static int setup_commands(struct nullb_queue *nq)
1668{
1669 struct nullb_cmd *cmd;
1670 int i, tag_size;
1671
1672 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
1673 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001674 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001675
1676 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
1677 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
1678 if (!nq->tag_map) {
1679 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +01001680 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001681 }
1682
1683 for (i = 0; i < nq->queue_depth; i++) {
1684 cmd = &nq->cmds[i];
1685 INIT_LIST_HEAD(&cmd->list);
1686 cmd->ll_list.next = NULL;
1687 cmd->tag = -1U;
1688 }
1689
1690 return 0;
1691}
1692
Jens Axboef2298c02013-10-25 11:52:25 +01001693static int setup_queues(struct nullb *nullb)
1694{
Shaohua Li2984c862017-08-14 15:04:52 -07001695 nullb->queues = kzalloc(nullb->dev->submit_queues *
1696 sizeof(struct nullb_queue), GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001697 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001698 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001699
1700 nullb->nr_queues = 0;
Shaohua Li2984c862017-08-14 15:04:52 -07001701 nullb->queue_depth = nullb->dev->hw_queue_depth;
Jens Axboef2298c02013-10-25 11:52:25 +01001702
Matias Bjorling2d263a782013-12-18 13:41:43 +01001703 return 0;
1704}
1705
1706static int init_driver_queues(struct nullb *nullb)
1707{
1708 struct nullb_queue *nq;
1709 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001710
Shaohua Li2984c862017-08-14 15:04:52 -07001711 for (i = 0; i < nullb->dev->submit_queues; i++) {
Jens Axboef2298c02013-10-25 11:52:25 +01001712 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +01001713
1714 null_init_queue(nullb, nq);
1715
1716 ret = setup_commands(nq);
1717 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +02001718 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01001719 nullb->nr_queues++;
1720 }
Matias Bjorling2d263a782013-12-18 13:41:43 +01001721 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001722}
1723
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001724static int null_gendisk_register(struct nullb *nullb)
Jens Axboef2298c02013-10-25 11:52:25 +01001725{
1726 struct gendisk *disk;
Jens Axboef2298c02013-10-25 11:52:25 +01001727 sector_t size;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001728
Shaohua Li2984c862017-08-14 15:04:52 -07001729 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001730 if (!disk)
1731 return -ENOMEM;
Shaohua Li2984c862017-08-14 15:04:52 -07001732 size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001733 set_capacity(disk, size >> 9);
1734
1735 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
1736 disk->major = null_major;
1737 disk->first_minor = nullb->index;
1738 disk->fops = &null_fops;
1739 disk->private_data = nullb;
1740 disk->queue = nullb->q;
1741 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1742
1743 add_disk(disk);
1744 return 0;
1745}
1746
Shaohua Li2984c862017-08-14 15:04:52 -07001747static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001748{
1749 set->ops = &null_mq_ops;
Shaohua Li2984c862017-08-14 15:04:52 -07001750 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
1751 g_submit_queues;
1752 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
1753 g_hw_queue_depth;
1754 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
Jens Axboe82f402f2017-06-20 14:22:01 -06001755 set->cmd_size = sizeof(struct nullb_cmd);
1756 set->flags = BLK_MQ_F_SHOULD_MERGE;
1757 set->driver_data = NULL;
1758
Shaohua Li0d06a422017-08-25 13:46:25 -07001759 if ((nullb && nullb->dev->blocking) || g_blocking)
Jens Axboe82f402f2017-06-20 14:22:01 -06001760 set->flags |= BLK_MQ_F_BLOCKING;
1761
1762 return blk_mq_alloc_tag_set(set);
1763}
1764
Shaohua Licedcafa2017-08-14 15:04:54 -07001765static void null_validate_conf(struct nullb_device *dev)
1766{
1767 dev->blocksize = round_down(dev->blocksize, 512);
1768 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
1769 if (dev->use_lightnvm && dev->blocksize != 4096)
1770 dev->blocksize = 4096;
1771
1772 if (dev->use_lightnvm && dev->queue_mode != NULL_Q_MQ)
1773 dev->queue_mode = NULL_Q_MQ;
1774
1775 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
1776 if (dev->submit_queues != nr_online_nodes)
1777 dev->submit_queues = nr_online_nodes;
1778 } else if (dev->submit_queues > nr_cpu_ids)
1779 dev->submit_queues = nr_cpu_ids;
1780 else if (dev->submit_queues == 0)
1781 dev->submit_queues = 1;
1782
1783 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
1784 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001785
1786 /* Do memory allocation, so set blocking */
1787 if (dev->memory_backed)
1788 dev->blocking = true;
Shaohua Lideb78b42017-08-14 15:04:59 -07001789 else /* cache is meaningless */
1790 dev->cache_size = 0;
1791 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1792 dev->cache_size);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001793 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1794 /* can not stop a queue */
1795 if (dev->queue_mode == NULL_Q_BIO)
1796 dev->mbps = 0;
Shaohua Licedcafa2017-08-14 15:04:54 -07001797}
1798
Shaohua Li2984c862017-08-14 15:04:52 -07001799static int null_add_dev(struct nullb_device *dev)
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001800{
1801 struct nullb *nullb;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001802 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +01001803
Shaohua Licedcafa2017-08-14 15:04:54 -07001804 null_validate_conf(dev);
1805
Shaohua Li2984c862017-08-14 15:04:52 -07001806 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001807 if (!nullb) {
1808 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001809 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001810 }
Shaohua Li2984c862017-08-14 15:04:52 -07001811 nullb->dev = dev;
1812 dev->nullb = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01001813
1814 spin_lock_init(&nullb->lock);
1815
Robert Elliottdc501dc2014-09-02 11:38:49 -05001816 rv = setup_queues(nullb);
1817 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001818 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01001819
Shaohua Li2984c862017-08-14 15:04:52 -07001820 if (dev->queue_mode == NULL_Q_MQ) {
Jens Axboe82f402f2017-06-20 14:22:01 -06001821 if (shared_tags) {
1822 nullb->tag_set = &tag_set;
1823 rv = 0;
1824 } else {
1825 nullb->tag_set = &nullb->__tag_set;
Shaohua Li2984c862017-08-14 15:04:52 -07001826 rv = null_init_tag_set(nullb, nullb->tag_set);
Jens Axboe82f402f2017-06-20 14:22:01 -06001827 }
Jens Axboef2298c02013-10-25 11:52:25 +01001828
Robert Elliottdc501dc2014-09-02 11:38:49 -05001829 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001830 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +01001831
Jens Axboe82f402f2017-06-20 14:22:01 -06001832 nullb->q = blk_mq_init_queue(nullb->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +00001833 if (IS_ERR(nullb->q)) {
Robert Elliottdc501dc2014-09-02 11:38:49 -05001834 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001835 goto out_cleanup_tags;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001836 }
Jens Axboe82f402f2017-06-20 14:22:01 -06001837 null_init_queues(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001838 } else if (dev->queue_mode == NULL_Q_BIO) {
1839 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001840 if (!nullb->q) {
1841 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001842 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001843 }
Jens Axboef2298c02013-10-25 11:52:25 +01001844 blk_queue_make_request(nullb->q, null_queue_bio);
Jan Kara31f96902014-10-22 15:34:21 +02001845 rv = init_driver_queues(nullb);
1846 if (rv)
1847 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001848 } else {
Shaohua Li2984c862017-08-14 15:04:52 -07001849 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock,
1850 dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001851 if (!nullb->q) {
1852 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001853 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001854 }
Jens Axboef2298c02013-10-25 11:52:25 +01001855 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001856 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
Jan Kara31f96902014-10-22 15:34:21 +02001857 rv = init_driver_queues(nullb);
1858 if (rv)
1859 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001860 }
1861
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001862 if (dev->mbps) {
1863 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1864 nullb_setup_bwtimer(nullb);
1865 }
1866
Shaohua Lideb78b42017-08-14 15:04:59 -07001867 if (dev->cache_size > 0) {
1868 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1869 blk_queue_write_cache(nullb->q, true, true);
1870 blk_queue_flush_queueable(nullb->q, true);
1871 }
1872
Jens Axboef2298c02013-10-25 11:52:25 +01001873 nullb->q->queuedata = nullb;
1874 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
Mike Snitzerb277da02014-10-04 10:55:32 -06001875 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +01001876
Jens Axboef2298c02013-10-25 11:52:25 +01001877 mutex_lock(&lock);
Shaohua Li94bc02e2017-08-14 15:04:55 -07001878 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
Shaohua Licedcafa2017-08-14 15:04:54 -07001879 dev->index = nullb->index;
Jens Axboef2298c02013-10-25 11:52:25 +01001880 mutex_unlock(&lock);
1881
Shaohua Li2984c862017-08-14 15:04:52 -07001882 blk_queue_logical_block_size(nullb->q, dev->blocksize);
1883 blk_queue_physical_block_size(nullb->q, dev->blocksize);
Jens Axboef2298c02013-10-25 11:52:25 +01001884
Shaohua Li306eb6b2017-08-14 15:04:57 -07001885 null_config_discard(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +01001886
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001887 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1888
Shaohua Li2984c862017-08-14 15:04:52 -07001889 if (dev->use_lightnvm)
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001890 rv = null_nvm_register(nullb);
1891 else
1892 rv = null_gendisk_register(nullb);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001893
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001894 if (rv)
1895 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001896
Matias Bjørlinga5143792016-02-11 14:49:13 +01001897 mutex_lock(&lock);
1898 list_add_tail(&nullb->list, &nullb_list);
1899 mutex_unlock(&lock);
Wenwei Tao3681c852016-03-05 00:27:04 +08001900
Jens Axboef2298c02013-10-25 11:52:25 +01001901 return 0;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001902out_cleanup_blk_queue:
1903 blk_cleanup_queue(nullb->q);
1904out_cleanup_tags:
Shaohua Li2984c862017-08-14 15:04:52 -07001905 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001906 blk_mq_free_tag_set(nullb->tag_set);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001907out_cleanup_queues:
1908 cleanup_queues(nullb);
1909out_free_nullb:
1910 kfree(nullb);
1911out:
Robert Elliottdc501dc2014-09-02 11:38:49 -05001912 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +01001913}
1914
1915static int __init null_init(void)
1916{
Minfei Huangaf096e22015-12-08 13:47:34 -07001917 int ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001918 unsigned int i;
Minfei Huangaf096e22015-12-08 13:47:34 -07001919 struct nullb *nullb;
Shaohua Li2984c862017-08-14 15:04:52 -07001920 struct nullb_device *dev;
Jens Axboef2298c02013-10-25 11:52:25 +01001921
Shaohua Lideb78b42017-08-14 15:04:59 -07001922 /* check for nullb_page.bitmap */
1923 if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT))
1924 return -EINVAL;
1925
Shaohua Li2984c862017-08-14 15:04:52 -07001926 if (g_bs > PAGE_SIZE) {
Raghavendra K T9967d8a2014-01-21 16:59:59 +05301927 pr_warn("null_blk: invalid block size\n");
1928 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
Shaohua Li2984c862017-08-14 15:04:52 -07001929 g_bs = PAGE_SIZE;
Raghavendra K T9967d8a2014-01-21 16:59:59 +05301930 }
Jens Axboef2298c02013-10-25 11:52:25 +01001931
Shaohua Li2984c862017-08-14 15:04:52 -07001932 if (g_use_lightnvm && g_bs != 4096) {
Matias Bjørling6bb95352015-11-19 12:50:08 +01001933 pr_warn("null_blk: LightNVM only supports 4k block size\n");
1934 pr_warn("null_blk: defaults block size to 4k\n");
Shaohua Li2984c862017-08-14 15:04:52 -07001935 g_bs = 4096;
Matias Bjørling6bb95352015-11-19 12:50:08 +01001936 }
1937
Shaohua Li2984c862017-08-14 15:04:52 -07001938 if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) {
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001939 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
1940 pr_warn("null_blk: defaults queue mode to blk-mq\n");
Shaohua Li2984c862017-08-14 15:04:52 -07001941 g_queue_mode = NULL_Q_MQ;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001942 }
1943
Shaohua Li2984c862017-08-14 15:04:52 -07001944 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
1945 if (g_submit_queues != nr_online_nodes) {
weiping zhang558ab3002017-08-03 00:26:39 +08001946 pr_warn("null_blk: submit_queues param is set to %u.\n",
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +01001947 nr_online_nodes);
Shaohua Li2984c862017-08-14 15:04:52 -07001948 g_submit_queues = nr_online_nodes;
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01001949 }
Shaohua Li2984c862017-08-14 15:04:52 -07001950 } else if (g_submit_queues > nr_cpu_ids)
1951 g_submit_queues = nr_cpu_ids;
1952 else if (g_submit_queues <= 0)
1953 g_submit_queues = 1;
Jens Axboef2298c02013-10-25 11:52:25 +01001954
Shaohua Li2984c862017-08-14 15:04:52 -07001955 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
1956 ret = null_init_tag_set(NULL, &tag_set);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001957 if (ret)
1958 return ret;
1959 }
1960
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001961 config_group_init(&nullb_subsys.su_group);
1962 mutex_init(&nullb_subsys.su_mutex);
1963
1964 ret = configfs_register_subsystem(&nullb_subsys);
1965 if (ret)
1966 goto err_tagset;
1967
Jens Axboef2298c02013-10-25 11:52:25 +01001968 mutex_init(&lock);
1969
Jens Axboef2298c02013-10-25 11:52:25 +01001970 null_major = register_blkdev(0, "nullb");
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001971 if (null_major < 0) {
1972 ret = null_major;
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001973 goto err_conf;
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001974 }
Jens Axboef2298c02013-10-25 11:52:25 +01001975
Shaohua Li2984c862017-08-14 15:04:52 -07001976 if (g_use_lightnvm) {
Matias Bjørling6bb95352015-11-19 12:50:08 +01001977 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
1978 0, 0, NULL);
1979 if (!ppa_cache) {
1980 pr_err("null_blk: unable to create ppa cache\n");
Minfei Huangaf096e22015-12-08 13:47:34 -07001981 ret = -ENOMEM;
Matias Bjørling6bb95352015-11-19 12:50:08 +01001982 goto err_ppa;
Jens Axboef2298c02013-10-25 11:52:25 +01001983 }
1984 }
1985
Minfei Huangaf096e22015-12-08 13:47:34 -07001986 for (i = 0; i < nr_devices; i++) {
Shaohua Li2984c862017-08-14 15:04:52 -07001987 dev = null_alloc_dev();
1988 if (!dev)
Minfei Huangaf096e22015-12-08 13:47:34 -07001989 goto err_dev;
Shaohua Li2984c862017-08-14 15:04:52 -07001990 ret = null_add_dev(dev);
1991 if (ret) {
1992 null_free_dev(dev);
1993 goto err_dev;
1994 }
Minfei Huangaf096e22015-12-08 13:47:34 -07001995 }
1996
Jens Axboef2298c02013-10-25 11:52:25 +01001997 pr_info("null: module loaded\n");
1998 return 0;
Minfei Huangaf096e22015-12-08 13:47:34 -07001999
2000err_dev:
2001 while (!list_empty(&nullb_list)) {
2002 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07002003 dev = nullb->dev;
Minfei Huangaf096e22015-12-08 13:47:34 -07002004 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07002005 null_free_dev(dev);
Minfei Huangaf096e22015-12-08 13:47:34 -07002006 }
Matias Bjørling6bb95352015-11-19 12:50:08 +01002007 kmem_cache_destroy(ppa_cache);
Minfei Huangaf096e22015-12-08 13:47:34 -07002008err_ppa:
2009 unregister_blkdev(null_major, "nullb");
Shaohua Li3bf2bd22017-08-14 15:04:53 -07002010err_conf:
2011 configfs_unregister_subsystem(&nullb_subsys);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03002012err_tagset:
Shaohua Li2984c862017-08-14 15:04:52 -07002013 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Max Gurtovoydb2d1532017-07-06 18:00:07 +03002014 blk_mq_free_tag_set(&tag_set);
Minfei Huangaf096e22015-12-08 13:47:34 -07002015 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01002016}
2017
2018static void __exit null_exit(void)
2019{
2020 struct nullb *nullb;
2021
Shaohua Li3bf2bd22017-08-14 15:04:53 -07002022 configfs_unregister_subsystem(&nullb_subsys);
2023
Jens Axboef2298c02013-10-25 11:52:25 +01002024 unregister_blkdev(null_major, "nullb");
2025
2026 mutex_lock(&lock);
2027 while (!list_empty(&nullb_list)) {
Shaohua Li2984c862017-08-14 15:04:52 -07002028 struct nullb_device *dev;
2029
Jens Axboef2298c02013-10-25 11:52:25 +01002030 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07002031 dev = nullb->dev;
Jens Axboef2298c02013-10-25 11:52:25 +01002032 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07002033 null_free_dev(dev);
Jens Axboef2298c02013-10-25 11:52:25 +01002034 }
2035 mutex_unlock(&lock);
Matias Bjørling6bb95352015-11-19 12:50:08 +01002036
Shaohua Li2984c862017-08-14 15:04:52 -07002037 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Jens Axboe82f402f2017-06-20 14:22:01 -06002038 blk_mq_free_tag_set(&tag_set);
2039
Matias Bjørling6bb95352015-11-19 12:50:08 +01002040 kmem_cache_destroy(ppa_cache);
Jens Axboef2298c02013-10-25 11:52:25 +01002041}
2042
2043module_init(null_init);
2044module_exit(null_exit);
2045
Jens Axboe231b3db2017-08-25 12:53:15 -06002046MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
Jens Axboef2298c02013-10-25 11:52:25 +01002047MODULE_LICENSE("GPL");