blob: 1e1981c6c557ca358cc2534749549296d9845509 [file] [log] [blame]
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001/*
2 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
3 * Shaohua Li <shli@fb.com>
4 */
Jens Axboef2298c02013-10-25 11:52:25 +01005#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01006
Jens Axboef2298c02013-10-25 11:52:25 +01007#include <linux/moduleparam.h>
8#include <linux/sched.h>
9#include <linux/fs.h>
10#include <linux/blkdev.h>
11#include <linux/init.h>
12#include <linux/slab.h>
13#include <linux/blk-mq.h>
14#include <linux/hrtimer.h>
Shaohua Li3bf2bd22017-08-14 15:04:53 -070015#include <linux/configfs.h>
Shaohua Li2f54a612017-08-14 15:05:00 -070016#include <linux/badblocks.h>
Jens Axboef2298c02013-10-25 11:52:25 +010017
Shaohua Li5bcd0e02017-08-14 15:04:56 -070018#define SECTOR_SHIFT 9
19#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
20#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
21#define SECTOR_SIZE (1 << SECTOR_SHIFT)
22#define SECTOR_MASK (PAGE_SECTORS - 1)
23
24#define FREE_BATCH 16
25
Shaohua Lieff2c4f2017-08-14 15:04:58 -070026#define TICKS_PER_SEC 50ULL
27#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
28
29static inline u64 mb_per_tick(int mbps)
30{
31 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
32}
Jens Axboef2298c02013-10-25 11:52:25 +010033
34struct nullb_cmd {
35 struct list_head list;
36 struct llist_node ll_list;
Ying Huang966a9672017-08-08 12:30:00 +080037 call_single_data_t csd;
Jens Axboef2298c02013-10-25 11:52:25 +010038 struct request *rq;
39 struct bio *bio;
40 unsigned int tag;
41 struct nullb_queue *nq;
Paolo Valente3c395a92015-12-01 11:48:17 +010042 struct hrtimer timer;
Shaohua Li5bcd0e02017-08-14 15:04:56 -070043 blk_status_t error;
Jens Axboef2298c02013-10-25 11:52:25 +010044};
45
46struct nullb_queue {
47 unsigned long *tag_map;
48 wait_queue_head_t wait;
49 unsigned int queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -070050 struct nullb_device *dev;
Jens Axboef2298c02013-10-25 11:52:25 +010051
52 struct nullb_cmd *cmds;
53};
54
Shaohua Li3bf2bd22017-08-14 15:04:53 -070055/*
56 * Status flags for nullb_device.
57 *
58 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
59 * UP: Device is currently on and visible in userspace.
Shaohua Lieff2c4f2017-08-14 15:04:58 -070060 * THROTTLED: Device is being throttled.
Shaohua Lideb78b42017-08-14 15:04:59 -070061 * CACHE: Device is using a write-back cache.
Shaohua Li3bf2bd22017-08-14 15:04:53 -070062 */
63enum nullb_device_flags {
64 NULLB_DEV_FL_CONFIGURED = 0,
65 NULLB_DEV_FL_UP = 1,
Shaohua Lieff2c4f2017-08-14 15:04:58 -070066 NULLB_DEV_FL_THROTTLED = 2,
Shaohua Lideb78b42017-08-14 15:04:59 -070067 NULLB_DEV_FL_CACHE = 3,
Shaohua Li3bf2bd22017-08-14 15:04:53 -070068};
69
Shaohua Li5bcd0e02017-08-14 15:04:56 -070070/*
71 * nullb_page is a page in memory for nullb devices.
72 *
73 * @page: The page holding the data.
74 * @bitmap: The bitmap represents which sector in the page has data.
75 * Each bit represents one block size. For example, sector 8
76 * will use the 7th bit
Shaohua Lideb78b42017-08-14 15:04:59 -070077 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
78 * page is being flushing to storage. FREE means the cache page is freed and
79 * should be skipped from flushing to storage. Please see
80 * null_make_cache_space
Shaohua Li5bcd0e02017-08-14 15:04:56 -070081 */
82struct nullb_page {
83 struct page *page;
84 unsigned long bitmap;
85};
Shaohua Lideb78b42017-08-14 15:04:59 -070086#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1)
87#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070088
Shaohua Li2984c862017-08-14 15:04:52 -070089struct nullb_device {
90 struct nullb *nullb;
Shaohua Li3bf2bd22017-08-14 15:04:53 -070091 struct config_item item;
Shaohua Li5bcd0e02017-08-14 15:04:56 -070092 struct radix_tree_root data; /* data stored in the disk */
Shaohua Lideb78b42017-08-14 15:04:59 -070093 struct radix_tree_root cache; /* disk cache data */
Shaohua Li3bf2bd22017-08-14 15:04:53 -070094 unsigned long flags; /* device flags */
Shaohua Lideb78b42017-08-14 15:04:59 -070095 unsigned int curr_cache;
Shaohua Li2f54a612017-08-14 15:05:00 -070096 struct badblocks badblocks;
Shaohua Li2984c862017-08-14 15:04:52 -070097
98 unsigned long size; /* device size in MB */
99 unsigned long completion_nsec; /* time in ns to complete a request */
Shaohua Lideb78b42017-08-14 15:04:59 -0700100 unsigned long cache_size; /* disk cache size in MB */
Shaohua Li2984c862017-08-14 15:04:52 -0700101 unsigned int submit_queues; /* number of submission queues */
102 unsigned int home_node; /* home node for the device */
103 unsigned int queue_mode; /* block interface */
104 unsigned int blocksize; /* block size */
105 unsigned int irqmode; /* IRQ completion handler */
106 unsigned int hw_queue_depth; /* queue depth */
Shaohua Licedcafa2017-08-14 15:04:54 -0700107 unsigned int index; /* index of the disk, only valid with a disk */
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700108 unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
Shaohua Li2984c862017-08-14 15:04:52 -0700109 bool blocking; /* blocking blk-mq device */
110 bool use_per_node_hctx; /* use per-node allocation for hardware context */
Shaohua Licedcafa2017-08-14 15:04:54 -0700111 bool power; /* power on/off the device */
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700112 bool memory_backed; /* if data is stored in memory */
Shaohua Li306eb6b2017-08-14 15:04:57 -0700113 bool discard; /* if support discard */
Shaohua Li2984c862017-08-14 15:04:52 -0700114};
115
Jens Axboef2298c02013-10-25 11:52:25 +0100116struct nullb {
Shaohua Li2984c862017-08-14 15:04:52 -0700117 struct nullb_device *dev;
Jens Axboef2298c02013-10-25 11:52:25 +0100118 struct list_head list;
119 unsigned int index;
120 struct request_queue *q;
121 struct gendisk *disk;
Jens Axboe82f402f2017-06-20 14:22:01 -0600122 struct blk_mq_tag_set *tag_set;
123 struct blk_mq_tag_set __tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +0100124 unsigned int queue_depth;
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700125 atomic_long_t cur_bytes;
126 struct hrtimer bw_timer;
Shaohua Lideb78b42017-08-14 15:04:59 -0700127 unsigned long cache_flush_pos;
Jens Axboef2298c02013-10-25 11:52:25 +0100128 spinlock_t lock;
129
130 struct nullb_queue *queues;
131 unsigned int nr_queues;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100132 char disk_name[DISK_NAME_LEN];
Jens Axboef2298c02013-10-25 11:52:25 +0100133};
134
135static LIST_HEAD(nullb_list);
136static struct mutex lock;
137static int null_major;
Shaohua Li94bc02e2017-08-14 15:04:55 -0700138static DEFINE_IDA(nullb_indexes);
Jens Axboe82f402f2017-06-20 14:22:01 -0600139static struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +0100140
Jens Axboef2298c02013-10-25 11:52:25 +0100141enum {
142 NULL_IRQ_NONE = 0,
143 NULL_IRQ_SOFTIRQ = 1,
144 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800145};
Jens Axboef2298c02013-10-25 11:52:25 +0100146
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800147enum {
Jens Axboef2298c02013-10-25 11:52:25 +0100148 NULL_Q_BIO = 0,
149 NULL_Q_RQ = 1,
150 NULL_Q_MQ = 2,
151};
152
weiping zhangb3cffc32017-09-30 09:49:21 +0800153static int g_no_sched;
154module_param_named(no_sched, g_no_sched, int, S_IRUGO);
155MODULE_PARM_DESC(no_sched, "No io scheduler");
156
Shaohua Li2984c862017-08-14 15:04:52 -0700157static int g_submit_queues = 1;
158module_param_named(submit_queues, g_submit_queues, int, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100159MODULE_PARM_DESC(submit_queues, "Number of submission queues");
160
Shaohua Li2984c862017-08-14 15:04:52 -0700161static int g_home_node = NUMA_NO_NODE;
162module_param_named(home_node, g_home_node, int, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100163MODULE_PARM_DESC(home_node, "Home node for the device");
164
Shaohua Li2984c862017-08-14 15:04:52 -0700165static int g_queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700166
167static int null_param_store_val(const char *str, int *val, int min, int max)
168{
169 int ret, new_val;
170
171 ret = kstrtoint(str, 10, &new_val);
172 if (ret)
173 return -EINVAL;
174
175 if (new_val < min || new_val > max)
176 return -EINVAL;
177
178 *val = new_val;
179 return 0;
180}
181
182static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
183{
Shaohua Li2984c862017-08-14 15:04:52 -0700184 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
Matias Bjorling709c8662014-11-26 14:45:48 -0700185}
186
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930187static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700188 .set = null_set_queue_mode,
189 .get = param_get_int,
190};
191
Shaohua Li2984c862017-08-14 15:04:52 -0700192device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, S_IRUGO);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400193MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100194
Shaohua Li2984c862017-08-14 15:04:52 -0700195static int g_gb = 250;
196module_param_named(gb, g_gb, int, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100197MODULE_PARM_DESC(gb, "Size in GB");
198
Shaohua Li2984c862017-08-14 15:04:52 -0700199static int g_bs = 512;
200module_param_named(bs, g_bs, int, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100201MODULE_PARM_DESC(bs, "Block size (in bytes)");
202
Jens Axboe82f402f2017-06-20 14:22:01 -0600203static int nr_devices = 1;
Jens Axboef2298c02013-10-25 11:52:25 +0100204module_param(nr_devices, int, S_IRUGO);
205MODULE_PARM_DESC(nr_devices, "Number of devices to register");
206
Shaohua Li2984c862017-08-14 15:04:52 -0700207static bool g_blocking;
208module_param_named(blocking, g_blocking, bool, S_IRUGO);
Jens Axboedb5bcf82017-03-30 13:44:26 -0600209MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
210
Jens Axboe82f402f2017-06-20 14:22:01 -0600211static bool shared_tags;
212module_param(shared_tags, bool, S_IRUGO);
213MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
214
Shaohua Li2984c862017-08-14 15:04:52 -0700215static int g_irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700216
217static int null_set_irqmode(const char *str, const struct kernel_param *kp)
218{
Shaohua Li2984c862017-08-14 15:04:52 -0700219 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
Matias Bjorling709c8662014-11-26 14:45:48 -0700220 NULL_IRQ_TIMER);
221}
222
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930223static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700224 .set = null_set_irqmode,
225 .get = param_get_int,
226};
227
Shaohua Li2984c862017-08-14 15:04:52 -0700228device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100229MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
230
Shaohua Li2984c862017-08-14 15:04:52 -0700231static unsigned long g_completion_nsec = 10000;
232module_param_named(completion_nsec, g_completion_nsec, ulong, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100233MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
234
Shaohua Li2984c862017-08-14 15:04:52 -0700235static int g_hw_queue_depth = 64;
236module_param_named(hw_queue_depth, g_hw_queue_depth, int, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100237MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
238
Shaohua Li2984c862017-08-14 15:04:52 -0700239static bool g_use_per_node_hctx;
240module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, S_IRUGO);
Matias Bjørling20005242013-12-21 00:11:00 +0100241MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100242
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700243static struct nullb_device *null_alloc_dev(void);
244static void null_free_dev(struct nullb_device *dev);
Shaohua Licedcafa2017-08-14 15:04:54 -0700245static void null_del_dev(struct nullb *nullb);
246static int null_add_dev(struct nullb_device *dev);
Shaohua Lideb78b42017-08-14 15:04:59 -0700247static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700248
249static inline struct nullb_device *to_nullb_device(struct config_item *item)
250{
251 return item ? container_of(item, struct nullb_device, item) : NULL;
252}
253
254static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
255{
256 return snprintf(page, PAGE_SIZE, "%u\n", val);
257}
258
259static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
260 char *page)
261{
262 return snprintf(page, PAGE_SIZE, "%lu\n", val);
263}
264
265static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
266{
267 return snprintf(page, PAGE_SIZE, "%u\n", val);
268}
269
270static ssize_t nullb_device_uint_attr_store(unsigned int *val,
271 const char *page, size_t count)
272{
273 unsigned int tmp;
274 int result;
275
276 result = kstrtouint(page, 0, &tmp);
277 if (result)
278 return result;
279
280 *val = tmp;
281 return count;
282}
283
284static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
285 const char *page, size_t count)
286{
287 int result;
288 unsigned long tmp;
289
290 result = kstrtoul(page, 0, &tmp);
291 if (result)
292 return result;
293
294 *val = tmp;
295 return count;
296}
297
298static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
299 size_t count)
300{
301 bool tmp;
302 int result;
303
304 result = kstrtobool(page, &tmp);
305 if (result)
306 return result;
307
308 *val = tmp;
309 return count;
310}
311
312/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
313#define NULLB_DEVICE_ATTR(NAME, TYPE) \
314static ssize_t \
315nullb_device_##NAME##_show(struct config_item *item, char *page) \
316{ \
317 return nullb_device_##TYPE##_attr_show( \
318 to_nullb_device(item)->NAME, page); \
319} \
320static ssize_t \
321nullb_device_##NAME##_store(struct config_item *item, const char *page, \
322 size_t count) \
323{ \
324 if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
325 return -EBUSY; \
326 return nullb_device_##TYPE##_attr_store( \
327 &to_nullb_device(item)->NAME, page, count); \
328} \
329CONFIGFS_ATTR(nullb_device_, NAME);
330
331NULLB_DEVICE_ATTR(size, ulong);
332NULLB_DEVICE_ATTR(completion_nsec, ulong);
333NULLB_DEVICE_ATTR(submit_queues, uint);
334NULLB_DEVICE_ATTR(home_node, uint);
335NULLB_DEVICE_ATTR(queue_mode, uint);
336NULLB_DEVICE_ATTR(blocksize, uint);
337NULLB_DEVICE_ATTR(irqmode, uint);
338NULLB_DEVICE_ATTR(hw_queue_depth, uint);
Shaohua Licedcafa2017-08-14 15:04:54 -0700339NULLB_DEVICE_ATTR(index, uint);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700340NULLB_DEVICE_ATTR(blocking, bool);
341NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700342NULLB_DEVICE_ATTR(memory_backed, bool);
Shaohua Li306eb6b2017-08-14 15:04:57 -0700343NULLB_DEVICE_ATTR(discard, bool);
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700344NULLB_DEVICE_ATTR(mbps, uint);
Shaohua Lideb78b42017-08-14 15:04:59 -0700345NULLB_DEVICE_ATTR(cache_size, ulong);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700346
Shaohua Licedcafa2017-08-14 15:04:54 -0700347static ssize_t nullb_device_power_show(struct config_item *item, char *page)
348{
349 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
350}
351
352static ssize_t nullb_device_power_store(struct config_item *item,
353 const char *page, size_t count)
354{
355 struct nullb_device *dev = to_nullb_device(item);
356 bool newp = false;
357 ssize_t ret;
358
359 ret = nullb_device_bool_attr_store(&newp, page, count);
360 if (ret < 0)
361 return ret;
362
363 if (!dev->power && newp) {
364 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
365 return count;
366 if (null_add_dev(dev)) {
367 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
368 return -ENOMEM;
369 }
370
371 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
372 dev->power = newp;
Jens Axboeb3c30512017-08-28 15:06:31 -0600373 } else if (dev->power && !newp) {
Shaohua Licedcafa2017-08-14 15:04:54 -0700374 mutex_lock(&lock);
375 dev->power = newp;
376 null_del_dev(dev->nullb);
377 mutex_unlock(&lock);
378 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
379 }
380
381 return count;
382}
383
384CONFIGFS_ATTR(nullb_device_, power);
385
Shaohua Li2f54a612017-08-14 15:05:00 -0700386static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
387{
388 struct nullb_device *t_dev = to_nullb_device(item);
389
390 return badblocks_show(&t_dev->badblocks, page, 0);
391}
392
393static ssize_t nullb_device_badblocks_store(struct config_item *item,
394 const char *page, size_t count)
395{
396 struct nullb_device *t_dev = to_nullb_device(item);
397 char *orig, *buf, *tmp;
398 u64 start, end;
399 int ret;
400
401 orig = kstrndup(page, count, GFP_KERNEL);
402 if (!orig)
403 return -ENOMEM;
404
405 buf = strstrip(orig);
406
407 ret = -EINVAL;
408 if (buf[0] != '+' && buf[0] != '-')
409 goto out;
410 tmp = strchr(&buf[1], '-');
411 if (!tmp)
412 goto out;
413 *tmp = '\0';
414 ret = kstrtoull(buf + 1, 0, &start);
415 if (ret)
416 goto out;
417 ret = kstrtoull(tmp + 1, 0, &end);
418 if (ret)
419 goto out;
420 ret = -EINVAL;
421 if (start > end)
422 goto out;
423 /* enable badblocks */
424 cmpxchg(&t_dev->badblocks.shift, -1, 0);
425 if (buf[0] == '+')
426 ret = badblocks_set(&t_dev->badblocks, start,
427 end - start + 1, 1);
428 else
429 ret = badblocks_clear(&t_dev->badblocks, start,
430 end - start + 1);
431 if (ret == 0)
432 ret = count;
433out:
434 kfree(orig);
435 return ret;
436}
437CONFIGFS_ATTR(nullb_device_, badblocks);
438
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700439static struct configfs_attribute *nullb_device_attrs[] = {
440 &nullb_device_attr_size,
441 &nullb_device_attr_completion_nsec,
442 &nullb_device_attr_submit_queues,
443 &nullb_device_attr_home_node,
444 &nullb_device_attr_queue_mode,
445 &nullb_device_attr_blocksize,
446 &nullb_device_attr_irqmode,
447 &nullb_device_attr_hw_queue_depth,
Shaohua Licedcafa2017-08-14 15:04:54 -0700448 &nullb_device_attr_index,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700449 &nullb_device_attr_blocking,
450 &nullb_device_attr_use_per_node_hctx,
Shaohua Licedcafa2017-08-14 15:04:54 -0700451 &nullb_device_attr_power,
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700452 &nullb_device_attr_memory_backed,
Shaohua Li306eb6b2017-08-14 15:04:57 -0700453 &nullb_device_attr_discard,
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700454 &nullb_device_attr_mbps,
Shaohua Lideb78b42017-08-14 15:04:59 -0700455 &nullb_device_attr_cache_size,
Shaohua Li2f54a612017-08-14 15:05:00 -0700456 &nullb_device_attr_badblocks,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700457 NULL,
458};
459
460static void nullb_device_release(struct config_item *item)
461{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700462 struct nullb_device *dev = to_nullb_device(item);
463
Shaohua Lideb78b42017-08-14 15:04:59 -0700464 null_free_device_storage(dev, false);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700465 null_free_dev(dev);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700466}
467
468static struct configfs_item_operations nullb_device_ops = {
469 .release = nullb_device_release,
470};
471
Bhumika Goyale1919df2017-10-16 17:18:49 +0200472static const struct config_item_type nullb_device_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700473 .ct_item_ops = &nullb_device_ops,
474 .ct_attrs = nullb_device_attrs,
475 .ct_owner = THIS_MODULE,
476};
477
478static struct
479config_item *nullb_group_make_item(struct config_group *group, const char *name)
480{
481 struct nullb_device *dev;
482
483 dev = null_alloc_dev();
484 if (!dev)
485 return ERR_PTR(-ENOMEM);
486
487 config_item_init_type_name(&dev->item, name, &nullb_device_type);
488
489 return &dev->item;
490}
491
492static void
493nullb_group_drop_item(struct config_group *group, struct config_item *item)
494{
Shaohua Licedcafa2017-08-14 15:04:54 -0700495 struct nullb_device *dev = to_nullb_device(item);
496
497 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
498 mutex_lock(&lock);
499 dev->power = false;
500 null_del_dev(dev->nullb);
501 mutex_unlock(&lock);
502 }
503
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700504 config_item_put(item);
505}
506
507static ssize_t memb_group_features_show(struct config_item *item, char *page)
508{
Shaohua Li2f54a612017-08-14 15:05:00 -0700509 return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks\n");
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700510}
511
512CONFIGFS_ATTR_RO(memb_group_, features);
513
514static struct configfs_attribute *nullb_group_attrs[] = {
515 &memb_group_attr_features,
516 NULL,
517};
518
519static struct configfs_group_operations nullb_group_ops = {
520 .make_item = nullb_group_make_item,
521 .drop_item = nullb_group_drop_item,
522};
523
Bhumika Goyale1919df2017-10-16 17:18:49 +0200524static const struct config_item_type nullb_group_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700525 .ct_group_ops = &nullb_group_ops,
526 .ct_attrs = nullb_group_attrs,
527 .ct_owner = THIS_MODULE,
528};
529
530static struct configfs_subsystem nullb_subsys = {
531 .su_group = {
532 .cg_item = {
533 .ci_namebuf = "nullb",
534 .ci_type = &nullb_group_type,
535 },
536 },
537};
538
Shaohua Lideb78b42017-08-14 15:04:59 -0700539static inline int null_cache_active(struct nullb *nullb)
540{
541 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
542}
543
Shaohua Li2984c862017-08-14 15:04:52 -0700544static struct nullb_device *null_alloc_dev(void)
545{
546 struct nullb_device *dev;
547
548 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
549 if (!dev)
550 return NULL;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700551 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
Shaohua Lideb78b42017-08-14 15:04:59 -0700552 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
Shaohua Li2f54a612017-08-14 15:05:00 -0700553 if (badblocks_init(&dev->badblocks, 0)) {
554 kfree(dev);
555 return NULL;
556 }
557
Shaohua Li2984c862017-08-14 15:04:52 -0700558 dev->size = g_gb * 1024;
559 dev->completion_nsec = g_completion_nsec;
560 dev->submit_queues = g_submit_queues;
561 dev->home_node = g_home_node;
562 dev->queue_mode = g_queue_mode;
563 dev->blocksize = g_bs;
564 dev->irqmode = g_irqmode;
565 dev->hw_queue_depth = g_hw_queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -0700566 dev->blocking = g_blocking;
567 dev->use_per_node_hctx = g_use_per_node_hctx;
568 return dev;
569}
570
571static void null_free_dev(struct nullb_device *dev)
572{
David Disseldorp1addb792017-11-08 17:29:44 +0100573 if (!dev)
574 return;
575
576 badblocks_exit(&dev->badblocks);
Shaohua Li2984c862017-08-14 15:04:52 -0700577 kfree(dev);
578}
579
Jens Axboef2298c02013-10-25 11:52:25 +0100580static void put_tag(struct nullb_queue *nq, unsigned int tag)
581{
582 clear_bit_unlock(tag, nq->tag_map);
583
584 if (waitqueue_active(&nq->wait))
585 wake_up(&nq->wait);
586}
587
588static unsigned int get_tag(struct nullb_queue *nq)
589{
590 unsigned int tag;
591
592 do {
593 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
594 if (tag >= nq->queue_depth)
595 return -1U;
596 } while (test_and_set_bit_lock(tag, nq->tag_map));
597
598 return tag;
599}
600
601static void free_cmd(struct nullb_cmd *cmd)
602{
603 put_tag(cmd->nq, cmd->tag);
604}
605
Paolo Valente3c395a92015-12-01 11:48:17 +0100606static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
607
Jens Axboef2298c02013-10-25 11:52:25 +0100608static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
609{
610 struct nullb_cmd *cmd;
611 unsigned int tag;
612
613 tag = get_tag(nq);
614 if (tag != -1U) {
615 cmd = &nq->cmds[tag];
616 cmd->tag = tag;
617 cmd->nq = nq;
Shaohua Li2984c862017-08-14 15:04:52 -0700618 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +0100619 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
620 HRTIMER_MODE_REL);
621 cmd->timer.function = null_cmd_timer_expired;
622 }
Jens Axboef2298c02013-10-25 11:52:25 +0100623 return cmd;
624 }
625
626 return NULL;
627}
628
629static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
630{
631 struct nullb_cmd *cmd;
632 DEFINE_WAIT(wait);
633
634 cmd = __alloc_cmd(nq);
635 if (cmd || !can_wait)
636 return cmd;
637
638 do {
639 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
640 cmd = __alloc_cmd(nq);
641 if (cmd)
642 break;
643
644 io_schedule();
645 } while (1);
646
647 finish_wait(&nq->wait, &wait);
648 return cmd;
649}
650
651static void end_cmd(struct nullb_cmd *cmd)
652{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100653 struct request_queue *q = NULL;
Shaohua Li2984c862017-08-14 15:04:52 -0700654 int queue_mode = cmd->nq->dev->queue_mode;
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100655
Mike Krinkine8271202015-12-15 12:56:40 +0300656 if (cmd->rq)
657 q = cmd->rq->q;
658
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800659 switch (queue_mode) {
660 case NULL_Q_MQ:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700661 blk_mq_end_request(cmd->rq, cmd->error);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800662 return;
663 case NULL_Q_RQ:
664 INIT_LIST_HEAD(&cmd->rq->queuelist);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700665 blk_end_request_all(cmd->rq, cmd->error);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800666 break;
667 case NULL_Q_BIO:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700668 cmd->bio->bi_status = cmd->error;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200669 bio_endio(cmd->bio);
Jens Axboe48cc6612015-12-28 13:02:47 -0700670 break;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800671 }
Jens Axboef2298c02013-10-25 11:52:25 +0100672
Jens Axboe48cc6612015-12-28 13:02:47 -0700673 free_cmd(cmd);
674
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100675 /* Restart queue if needed, as we are freeing a tag */
Jens Axboe48cc6612015-12-28 13:02:47 -0700676 if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100677 unsigned long flags;
678
679 spin_lock_irqsave(q->queue_lock, flags);
Jens Axboe48cc6612015-12-28 13:02:47 -0700680 blk_start_queue_async(q);
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100681 spin_unlock_irqrestore(q->queue_lock, flags);
682 }
Jens Axboef2298c02013-10-25 11:52:25 +0100683}
684
685static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
686{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100687 end_cmd(container_of(timer, struct nullb_cmd, timer));
Jens Axboef2298c02013-10-25 11:52:25 +0100688
689 return HRTIMER_NORESTART;
690}
691
692static void null_cmd_end_timer(struct nullb_cmd *cmd)
693{
Shaohua Li2984c862017-08-14 15:04:52 -0700694 ktime_t kt = cmd->nq->dev->completion_nsec;
Jens Axboef2298c02013-10-25 11:52:25 +0100695
Paolo Valente3c395a92015-12-01 11:48:17 +0100696 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +0100697}
698
699static void null_softirq_done_fn(struct request *rq)
700{
Shaohua Li2984c862017-08-14 15:04:52 -0700701 struct nullb *nullb = rq->q->queuedata;
702
703 if (nullb->dev->queue_mode == NULL_Q_MQ)
Jens Axboed891fa72014-06-16 11:40:25 -0600704 end_cmd(blk_mq_rq_to_pdu(rq));
705 else
706 end_cmd(rq->special);
Jens Axboef2298c02013-10-25 11:52:25 +0100707}
708
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700709static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
Jens Axboef2298c02013-10-25 11:52:25 +0100710{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700711 struct nullb_page *t_page;
712
713 t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
714 if (!t_page)
715 goto out;
716
717 t_page->page = alloc_pages(gfp_flags, 0);
718 if (!t_page->page)
719 goto out_freepage;
720
721 t_page->bitmap = 0;
722 return t_page;
723out_freepage:
724 kfree(t_page);
725out:
726 return NULL;
727}
728
729static void null_free_page(struct nullb_page *t_page)
730{
Shaohua Lideb78b42017-08-14 15:04:59 -0700731 __set_bit(NULLB_PAGE_FREE, &t_page->bitmap);
732 if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap))
733 return;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700734 __free_page(t_page->page);
735 kfree(t_page);
736}
737
Shaohua Lideb78b42017-08-14 15:04:59 -0700738static void null_free_sector(struct nullb *nullb, sector_t sector,
739 bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700740{
741 unsigned int sector_bit;
742 u64 idx;
743 struct nullb_page *t_page, *ret;
744 struct radix_tree_root *root;
745
Shaohua Lideb78b42017-08-14 15:04:59 -0700746 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700747 idx = sector >> PAGE_SECTORS_SHIFT;
748 sector_bit = (sector & SECTOR_MASK);
749
750 t_page = radix_tree_lookup(root, idx);
751 if (t_page) {
752 __clear_bit(sector_bit, &t_page->bitmap);
753
754 if (!t_page->bitmap) {
755 ret = radix_tree_delete_item(root, idx, t_page);
756 WARN_ON(ret != t_page);
757 null_free_page(ret);
Shaohua Lideb78b42017-08-14 15:04:59 -0700758 if (is_cache)
759 nullb->dev->curr_cache -= PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700760 }
761 }
762}
763
764static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
Shaohua Lideb78b42017-08-14 15:04:59 -0700765 struct nullb_page *t_page, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700766{
767 struct radix_tree_root *root;
768
Shaohua Lideb78b42017-08-14 15:04:59 -0700769 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700770
771 if (radix_tree_insert(root, idx, t_page)) {
772 null_free_page(t_page);
773 t_page = radix_tree_lookup(root, idx);
774 WARN_ON(!t_page || t_page->page->index != idx);
Shaohua Lideb78b42017-08-14 15:04:59 -0700775 } else if (is_cache)
776 nullb->dev->curr_cache += PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700777
778 return t_page;
779}
780
Shaohua Lideb78b42017-08-14 15:04:59 -0700781static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700782{
783 unsigned long pos = 0;
784 int nr_pages;
785 struct nullb_page *ret, *t_pages[FREE_BATCH];
786 struct radix_tree_root *root;
787
Shaohua Lideb78b42017-08-14 15:04:59 -0700788 root = is_cache ? &dev->cache : &dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700789
790 do {
791 int i;
792
793 nr_pages = radix_tree_gang_lookup(root,
794 (void **)t_pages, pos, FREE_BATCH);
795
796 for (i = 0; i < nr_pages; i++) {
797 pos = t_pages[i]->page->index;
798 ret = radix_tree_delete_item(root, pos, t_pages[i]);
799 WARN_ON(ret != t_pages[i]);
800 null_free_page(ret);
801 }
802
803 pos++;
804 } while (nr_pages == FREE_BATCH);
Shaohua Lideb78b42017-08-14 15:04:59 -0700805
806 if (is_cache)
807 dev->curr_cache = 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700808}
809
Shaohua Lideb78b42017-08-14 15:04:59 -0700810static struct nullb_page *__null_lookup_page(struct nullb *nullb,
811 sector_t sector, bool for_write, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700812{
813 unsigned int sector_bit;
814 u64 idx;
815 struct nullb_page *t_page;
Shaohua Lideb78b42017-08-14 15:04:59 -0700816 struct radix_tree_root *root;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700817
818 idx = sector >> PAGE_SECTORS_SHIFT;
819 sector_bit = (sector & SECTOR_MASK);
820
Shaohua Lideb78b42017-08-14 15:04:59 -0700821 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
822 t_page = radix_tree_lookup(root, idx);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700823 WARN_ON(t_page && t_page->page->index != idx);
824
825 if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap)))
826 return t_page;
827
828 return NULL;
829}
830
Shaohua Lideb78b42017-08-14 15:04:59 -0700831static struct nullb_page *null_lookup_page(struct nullb *nullb,
832 sector_t sector, bool for_write, bool ignore_cache)
833{
834 struct nullb_page *page = NULL;
835
836 if (!ignore_cache)
837 page = __null_lookup_page(nullb, sector, for_write, true);
838 if (page)
839 return page;
840 return __null_lookup_page(nullb, sector, for_write, false);
841}
842
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700843static struct nullb_page *null_insert_page(struct nullb *nullb,
Shaohua Lideb78b42017-08-14 15:04:59 -0700844 sector_t sector, bool ignore_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700845{
846 u64 idx;
847 struct nullb_page *t_page;
848
Shaohua Lideb78b42017-08-14 15:04:59 -0700849 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700850 if (t_page)
851 return t_page;
852
853 spin_unlock_irq(&nullb->lock);
854
855 t_page = null_alloc_page(GFP_NOIO);
856 if (!t_page)
857 goto out_lock;
858
859 if (radix_tree_preload(GFP_NOIO))
860 goto out_freepage;
861
862 spin_lock_irq(&nullb->lock);
863 idx = sector >> PAGE_SECTORS_SHIFT;
864 t_page->page->index = idx;
Shaohua Lideb78b42017-08-14 15:04:59 -0700865 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700866 radix_tree_preload_end();
867
868 return t_page;
869out_freepage:
870 null_free_page(t_page);
871out_lock:
872 spin_lock_irq(&nullb->lock);
Shaohua Lideb78b42017-08-14 15:04:59 -0700873 return null_lookup_page(nullb, sector, true, ignore_cache);
874}
875
876static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
877{
878 int i;
879 unsigned int offset;
880 u64 idx;
881 struct nullb_page *t_page, *ret;
882 void *dst, *src;
883
884 idx = c_page->page->index;
885
886 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
887
888 __clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap);
889 if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) {
890 null_free_page(c_page);
891 if (t_page && t_page->bitmap == 0) {
892 ret = radix_tree_delete_item(&nullb->dev->data,
893 idx, t_page);
894 null_free_page(t_page);
895 }
896 return 0;
897 }
898
899 if (!t_page)
900 return -ENOMEM;
901
902 src = kmap_atomic(c_page->page);
903 dst = kmap_atomic(t_page->page);
904
905 for (i = 0; i < PAGE_SECTORS;
906 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
907 if (test_bit(i, &c_page->bitmap)) {
908 offset = (i << SECTOR_SHIFT);
909 memcpy(dst + offset, src + offset,
910 nullb->dev->blocksize);
911 __set_bit(i, &t_page->bitmap);
912 }
913 }
914
915 kunmap_atomic(dst);
916 kunmap_atomic(src);
917
918 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
919 null_free_page(ret);
920 nullb->dev->curr_cache -= PAGE_SIZE;
921
922 return 0;
923}
924
925static int null_make_cache_space(struct nullb *nullb, unsigned long n)
926{
927 int i, err, nr_pages;
928 struct nullb_page *c_pages[FREE_BATCH];
929 unsigned long flushed = 0, one_round;
930
931again:
932 if ((nullb->dev->cache_size * 1024 * 1024) >
933 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
934 return 0;
935
936 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
937 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
938 /*
939 * nullb_flush_cache_page could unlock before using the c_pages. To
940 * avoid race, we don't allow page free
941 */
942 for (i = 0; i < nr_pages; i++) {
943 nullb->cache_flush_pos = c_pages[i]->page->index;
944 /*
945 * We found the page which is being flushed to disk by other
946 * threads
947 */
948 if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap))
949 c_pages[i] = NULL;
950 else
951 __set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap);
952 }
953
954 one_round = 0;
955 for (i = 0; i < nr_pages; i++) {
956 if (c_pages[i] == NULL)
957 continue;
958 err = null_flush_cache_page(nullb, c_pages[i]);
959 if (err)
960 return err;
961 one_round++;
962 }
963 flushed += one_round << PAGE_SHIFT;
964
965 if (n > flushed) {
966 if (nr_pages == 0)
967 nullb->cache_flush_pos = 0;
968 if (one_round == 0) {
969 /* give other threads a chance */
970 spin_unlock_irq(&nullb->lock);
971 spin_lock_irq(&nullb->lock);
972 }
973 goto again;
974 }
975 return 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700976}
977
978static int copy_to_nullb(struct nullb *nullb, struct page *source,
Shaohua Lideb78b42017-08-14 15:04:59 -0700979 unsigned int off, sector_t sector, size_t n, bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700980{
981 size_t temp, count = 0;
982 unsigned int offset;
983 struct nullb_page *t_page;
984 void *dst, *src;
985
986 while (count < n) {
987 temp = min_t(size_t, nullb->dev->blocksize, n - count);
988
Shaohua Lideb78b42017-08-14 15:04:59 -0700989 if (null_cache_active(nullb) && !is_fua)
990 null_make_cache_space(nullb, PAGE_SIZE);
991
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700992 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -0700993 t_page = null_insert_page(nullb, sector,
994 !null_cache_active(nullb) || is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700995 if (!t_page)
996 return -ENOSPC;
997
998 src = kmap_atomic(source);
999 dst = kmap_atomic(t_page->page);
1000 memcpy(dst + offset, src + off + count, temp);
1001 kunmap_atomic(dst);
1002 kunmap_atomic(src);
1003
1004 __set_bit(sector & SECTOR_MASK, &t_page->bitmap);
1005
Shaohua Lideb78b42017-08-14 15:04:59 -07001006 if (is_fua)
1007 null_free_sector(nullb, sector, true);
1008
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001009 count += temp;
1010 sector += temp >> SECTOR_SHIFT;
1011 }
1012 return 0;
1013}
1014
1015static int copy_from_nullb(struct nullb *nullb, struct page *dest,
1016 unsigned int off, sector_t sector, size_t n)
1017{
1018 size_t temp, count = 0;
1019 unsigned int offset;
1020 struct nullb_page *t_page;
1021 void *dst, *src;
1022
1023 while (count < n) {
1024 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1025
1026 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -07001027 t_page = null_lookup_page(nullb, sector, false,
1028 !null_cache_active(nullb));
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001029
1030 dst = kmap_atomic(dest);
1031 if (!t_page) {
1032 memset(dst + off + count, 0, temp);
1033 goto next;
1034 }
1035 src = kmap_atomic(t_page->page);
1036 memcpy(dst + off + count, src + offset, temp);
1037 kunmap_atomic(src);
1038next:
1039 kunmap_atomic(dst);
1040
1041 count += temp;
1042 sector += temp >> SECTOR_SHIFT;
1043 }
1044 return 0;
1045}
1046
Shaohua Li306eb6b2017-08-14 15:04:57 -07001047static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
1048{
1049 size_t temp;
1050
1051 spin_lock_irq(&nullb->lock);
1052 while (n > 0) {
1053 temp = min_t(size_t, n, nullb->dev->blocksize);
Shaohua Lideb78b42017-08-14 15:04:59 -07001054 null_free_sector(nullb, sector, false);
1055 if (null_cache_active(nullb))
1056 null_free_sector(nullb, sector, true);
Shaohua Li306eb6b2017-08-14 15:04:57 -07001057 sector += temp >> SECTOR_SHIFT;
1058 n -= temp;
1059 }
1060 spin_unlock_irq(&nullb->lock);
1061}
1062
Shaohua Lideb78b42017-08-14 15:04:59 -07001063static int null_handle_flush(struct nullb *nullb)
1064{
1065 int err;
1066
1067 if (!null_cache_active(nullb))
1068 return 0;
1069
1070 spin_lock_irq(&nullb->lock);
1071 while (true) {
1072 err = null_make_cache_space(nullb,
1073 nullb->dev->cache_size * 1024 * 1024);
1074 if (err || nullb->dev->curr_cache == 0)
1075 break;
1076 }
1077
1078 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1079 spin_unlock_irq(&nullb->lock);
1080 return err;
1081}
1082
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001083static int null_transfer(struct nullb *nullb, struct page *page,
Shaohua Lideb78b42017-08-14 15:04:59 -07001084 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1085 bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001086{
1087 int err = 0;
1088
1089 if (!is_write) {
1090 err = copy_from_nullb(nullb, page, off, sector, len);
1091 flush_dcache_page(page);
1092 } else {
1093 flush_dcache_page(page);
Shaohua Lideb78b42017-08-14 15:04:59 -07001094 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001095 }
1096
1097 return err;
1098}
1099
1100static int null_handle_rq(struct nullb_cmd *cmd)
1101{
1102 struct request *rq = cmd->rq;
1103 struct nullb *nullb = cmd->nq->dev->nullb;
1104 int err;
1105 unsigned int len;
1106 sector_t sector;
1107 struct req_iterator iter;
1108 struct bio_vec bvec;
1109
1110 sector = blk_rq_pos(rq);
1111
Shaohua Li306eb6b2017-08-14 15:04:57 -07001112 if (req_op(rq) == REQ_OP_DISCARD) {
1113 null_handle_discard(nullb, sector, blk_rq_bytes(rq));
1114 return 0;
1115 }
1116
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001117 spin_lock_irq(&nullb->lock);
1118 rq_for_each_segment(bvec, rq, iter) {
1119 len = bvec.bv_len;
1120 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001121 op_is_write(req_op(rq)), sector,
1122 req_op(rq) & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001123 if (err) {
1124 spin_unlock_irq(&nullb->lock);
1125 return err;
1126 }
1127 sector += len >> SECTOR_SHIFT;
1128 }
1129 spin_unlock_irq(&nullb->lock);
1130
1131 return 0;
1132}
1133
1134static int null_handle_bio(struct nullb_cmd *cmd)
1135{
1136 struct bio *bio = cmd->bio;
1137 struct nullb *nullb = cmd->nq->dev->nullb;
1138 int err;
1139 unsigned int len;
1140 sector_t sector;
1141 struct bio_vec bvec;
1142 struct bvec_iter iter;
1143
1144 sector = bio->bi_iter.bi_sector;
1145
Shaohua Li306eb6b2017-08-14 15:04:57 -07001146 if (bio_op(bio) == REQ_OP_DISCARD) {
1147 null_handle_discard(nullb, sector,
1148 bio_sectors(bio) << SECTOR_SHIFT);
1149 return 0;
1150 }
1151
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001152 spin_lock_irq(&nullb->lock);
1153 bio_for_each_segment(bvec, bio, iter) {
1154 len = bvec.bv_len;
1155 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001156 op_is_write(bio_op(bio)), sector,
1157 bio_op(bio) & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001158 if (err) {
1159 spin_unlock_irq(&nullb->lock);
1160 return err;
1161 }
1162 sector += len >> SECTOR_SHIFT;
1163 }
1164 spin_unlock_irq(&nullb->lock);
1165 return 0;
1166}
1167
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001168static void null_stop_queue(struct nullb *nullb)
1169{
1170 struct request_queue *q = nullb->q;
1171
1172 if (nullb->dev->queue_mode == NULL_Q_MQ)
1173 blk_mq_stop_hw_queues(q);
1174 else {
1175 spin_lock_irq(q->queue_lock);
1176 blk_stop_queue(q);
1177 spin_unlock_irq(q->queue_lock);
1178 }
1179}
1180
1181static void null_restart_queue_async(struct nullb *nullb)
1182{
1183 struct request_queue *q = nullb->q;
1184 unsigned long flags;
1185
1186 if (nullb->dev->queue_mode == NULL_Q_MQ)
1187 blk_mq_start_stopped_hw_queues(q, true);
1188 else {
1189 spin_lock_irqsave(q->queue_lock, flags);
1190 blk_start_queue_async(q);
1191 spin_unlock_irqrestore(q->queue_lock, flags);
1192 }
1193}
1194
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001195static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1196{
1197 struct nullb_device *dev = cmd->nq->dev;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001198 struct nullb *nullb = dev->nullb;
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001199 int err = 0;
1200
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001201 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1202 struct request *rq = cmd->rq;
1203
1204 if (!hrtimer_active(&nullb->bw_timer))
1205 hrtimer_restart(&nullb->bw_timer);
1206
1207 if (atomic_long_sub_return(blk_rq_bytes(rq),
1208 &nullb->cur_bytes) < 0) {
1209 null_stop_queue(nullb);
1210 /* race with timer */
1211 if (atomic_long_read(&nullb->cur_bytes) > 0)
1212 null_restart_queue_async(nullb);
1213 if (dev->queue_mode == NULL_Q_RQ) {
1214 struct request_queue *q = nullb->q;
1215
1216 spin_lock_irq(q->queue_lock);
1217 rq->rq_flags |= RQF_DONTPREP;
1218 blk_requeue_request(q, rq);
1219 spin_unlock_irq(q->queue_lock);
1220 return BLK_STS_OK;
1221 } else
1222 /* requeue request */
1223 return BLK_STS_RESOURCE;
1224 }
1225 }
1226
Shaohua Li2f54a612017-08-14 15:05:00 -07001227 if (nullb->dev->badblocks.shift != -1) {
1228 int bad_sectors;
1229 sector_t sector, size, first_bad;
1230 bool is_flush = true;
1231
1232 if (dev->queue_mode == NULL_Q_BIO &&
1233 bio_op(cmd->bio) != REQ_OP_FLUSH) {
1234 is_flush = false;
1235 sector = cmd->bio->bi_iter.bi_sector;
1236 size = bio_sectors(cmd->bio);
1237 }
1238 if (dev->queue_mode != NULL_Q_BIO &&
1239 req_op(cmd->rq) != REQ_OP_FLUSH) {
1240 is_flush = false;
1241 sector = blk_rq_pos(cmd->rq);
1242 size = blk_rq_sectors(cmd->rq);
1243 }
1244 if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
1245 size, &first_bad, &bad_sectors)) {
1246 cmd->error = BLK_STS_IOERR;
1247 goto out;
1248 }
1249 }
1250
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001251 if (dev->memory_backed) {
Shaohua Lideb78b42017-08-14 15:04:59 -07001252 if (dev->queue_mode == NULL_Q_BIO) {
1253 if (bio_op(cmd->bio) == REQ_OP_FLUSH)
1254 err = null_handle_flush(nullb);
1255 else
1256 err = null_handle_bio(cmd);
1257 } else {
1258 if (req_op(cmd->rq) == REQ_OP_FLUSH)
1259 err = null_handle_flush(nullb);
1260 else
1261 err = null_handle_rq(cmd);
1262 }
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001263 }
1264 cmd->error = errno_to_blk_status(err);
Shaohua Li2f54a612017-08-14 15:05:00 -07001265out:
Jens Axboef2298c02013-10-25 11:52:25 +01001266 /* Complete IO by inline, softirq or timer */
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001267 switch (dev->irqmode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001268 case NULL_IRQ_SOFTIRQ:
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001269 switch (dev->queue_mode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001270 case NULL_Q_MQ:
Christoph Hellwig08e00292017-04-20 16:03:09 +02001271 blk_mq_complete_request(cmd->rq);
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001272 break;
1273 case NULL_Q_RQ:
1274 blk_complete_request(cmd->rq);
1275 break;
1276 case NULL_Q_BIO:
1277 /*
1278 * XXX: no proper submitting cpu information available.
1279 */
1280 end_cmd(cmd);
1281 break;
1282 }
1283 break;
Jens Axboef2298c02013-10-25 11:52:25 +01001284 case NULL_IRQ_NONE:
1285 end_cmd(cmd);
1286 break;
Jens Axboef2298c02013-10-25 11:52:25 +01001287 case NULL_IRQ_TIMER:
1288 null_cmd_end_timer(cmd);
1289 break;
1290 }
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001291 return BLK_STS_OK;
Jens Axboef2298c02013-10-25 11:52:25 +01001292}
1293
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001294static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1295{
1296 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1297 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1298 unsigned int mbps = nullb->dev->mbps;
1299
1300 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1301 return HRTIMER_NORESTART;
1302
1303 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1304 null_restart_queue_async(nullb);
1305
1306 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1307
1308 return HRTIMER_RESTART;
1309}
1310
1311static void nullb_setup_bwtimer(struct nullb *nullb)
1312{
1313 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1314
1315 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1316 nullb->bw_timer.function = nullb_bwtimer_fn;
1317 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1318 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +01001319}
1320
1321static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1322{
1323 int index = 0;
1324
1325 if (nullb->nr_queues != 1)
1326 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1327
1328 return &nullb->queues[index];
1329}
1330
Jens Axboedece1632015-11-05 10:41:16 -07001331static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +01001332{
1333 struct nullb *nullb = q->queuedata;
1334 struct nullb_queue *nq = nullb_to_queue(nullb);
1335 struct nullb_cmd *cmd;
1336
1337 cmd = alloc_cmd(nq, 1);
1338 cmd->bio = bio;
1339
1340 null_handle_cmd(cmd);
Jens Axboedece1632015-11-05 10:41:16 -07001341 return BLK_QC_T_NONE;
Jens Axboef2298c02013-10-25 11:52:25 +01001342}
1343
1344static int null_rq_prep_fn(struct request_queue *q, struct request *req)
1345{
1346 struct nullb *nullb = q->queuedata;
1347 struct nullb_queue *nq = nullb_to_queue(nullb);
1348 struct nullb_cmd *cmd;
1349
1350 cmd = alloc_cmd(nq, 0);
1351 if (cmd) {
1352 cmd->rq = req;
1353 req->special = cmd;
1354 return BLKPREP_OK;
1355 }
Akinobu Mita8b70f452015-06-02 08:35:10 +09001356 blk_stop_queue(q);
Jens Axboef2298c02013-10-25 11:52:25 +01001357
1358 return BLKPREP_DEFER;
1359}
1360
1361static void null_request_fn(struct request_queue *q)
1362{
1363 struct request *rq;
1364
1365 while ((rq = blk_fetch_request(q)) != NULL) {
1366 struct nullb_cmd *cmd = rq->special;
1367
1368 spin_unlock_irq(q->queue_lock);
1369 null_handle_cmd(cmd);
1370 spin_lock_irq(q->queue_lock);
1371 }
1372}
1373
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001374static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
Jens Axboe74c45052014-10-29 11:14:52 -06001375 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +01001376{
Jens Axboe74c45052014-10-29 11:14:52 -06001377 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Shaohua Li2984c862017-08-14 15:04:52 -07001378 struct nullb_queue *nq = hctx->driver_data;
Jens Axboef2298c02013-10-25 11:52:25 +01001379
Jens Axboedb5bcf82017-03-30 13:44:26 -06001380 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1381
Shaohua Li2984c862017-08-14 15:04:52 -07001382 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +01001383 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1384 cmd->timer.function = null_cmd_timer_expired;
1385 }
Jens Axboe74c45052014-10-29 11:14:52 -06001386 cmd->rq = bd->rq;
Shaohua Li2984c862017-08-14 15:04:52 -07001387 cmd->nq = nq;
Jens Axboef2298c02013-10-25 11:52:25 +01001388
Jens Axboe74c45052014-10-29 11:14:52 -06001389 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -07001390
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001391 return null_handle_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +01001392}
1393
Eric Biggersf363b082017-03-30 13:39:16 -07001394static const struct blk_mq_ops null_mq_ops = {
Jens Axboef2298c02013-10-25 11:52:25 +01001395 .queue_rq = null_queue_rq,
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001396 .complete = null_softirq_done_fn,
Jens Axboef2298c02013-10-25 11:52:25 +01001397};
1398
Matias Bjørlingde65d2d2015-08-31 14:17:18 +02001399static void cleanup_queue(struct nullb_queue *nq)
1400{
1401 kfree(nq->tag_map);
1402 kfree(nq->cmds);
1403}
1404
1405static void cleanup_queues(struct nullb *nullb)
1406{
1407 int i;
1408
1409 for (i = 0; i < nullb->nr_queues; i++)
1410 cleanup_queue(&nullb->queues[i]);
1411
1412 kfree(nullb->queues);
1413}
1414
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001415static void null_del_dev(struct nullb *nullb)
1416{
Shaohua Li2984c862017-08-14 15:04:52 -07001417 struct nullb_device *dev = nullb->dev;
1418
Shaohua Li94bc02e2017-08-14 15:04:55 -07001419 ida_simple_remove(&nullb_indexes, nullb->index);
1420
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001421 list_del_init(&nullb->list);
1422
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001423 del_gendisk(nullb->disk);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001424
1425 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1426 hrtimer_cancel(&nullb->bw_timer);
1427 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1428 null_restart_queue_async(nullb);
1429 }
1430
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001431 blk_cleanup_queue(nullb->q);
Shaohua Li2984c862017-08-14 15:04:52 -07001432 if (dev->queue_mode == NULL_Q_MQ &&
1433 nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001434 blk_mq_free_tag_set(nullb->tag_set);
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001435 put_disk(nullb->disk);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001436 cleanup_queues(nullb);
Shaohua Lideb78b42017-08-14 15:04:59 -07001437 if (null_cache_active(nullb))
1438 null_free_device_storage(nullb->dev, true);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001439 kfree(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001440 dev->nullb = NULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001441}
1442
Shaohua Li306eb6b2017-08-14 15:04:57 -07001443static void null_config_discard(struct nullb *nullb)
1444{
1445 if (nullb->dev->discard == false)
1446 return;
1447 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1448 nullb->q->limits.discard_alignment = nullb->dev->blocksize;
1449 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
1450 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001451}
1452
Jens Axboef2298c02013-10-25 11:52:25 +01001453static int null_open(struct block_device *bdev, fmode_t mode)
1454{
1455 return 0;
1456}
1457
1458static void null_release(struct gendisk *disk, fmode_t mode)
1459{
1460}
1461
1462static const struct block_device_operations null_fops = {
1463 .owner = THIS_MODULE,
1464 .open = null_open,
1465 .release = null_release,
1466};
1467
Jens Axboe82f402f2017-06-20 14:22:01 -06001468static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1469{
1470 BUG_ON(!nullb);
1471 BUG_ON(!nq);
1472
1473 init_waitqueue_head(&nq->wait);
1474 nq->queue_depth = nullb->queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -07001475 nq->dev = nullb->dev;
Jens Axboe82f402f2017-06-20 14:22:01 -06001476}
1477
1478static void null_init_queues(struct nullb *nullb)
1479{
1480 struct request_queue *q = nullb->q;
1481 struct blk_mq_hw_ctx *hctx;
1482 struct nullb_queue *nq;
1483 int i;
1484
1485 queue_for_each_hw_ctx(q, hctx, i) {
1486 if (!hctx->nr_ctx || !hctx->tags)
1487 continue;
1488 nq = &nullb->queues[i];
1489 hctx->driver_data = nq;
1490 null_init_queue(nullb, nq);
1491 nullb->nr_queues++;
1492 }
1493}
1494
Jens Axboef2298c02013-10-25 11:52:25 +01001495static int setup_commands(struct nullb_queue *nq)
1496{
1497 struct nullb_cmd *cmd;
1498 int i, tag_size;
1499
1500 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
1501 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001502 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001503
1504 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
1505 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
1506 if (!nq->tag_map) {
1507 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +01001508 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001509 }
1510
1511 for (i = 0; i < nq->queue_depth; i++) {
1512 cmd = &nq->cmds[i];
1513 INIT_LIST_HEAD(&cmd->list);
1514 cmd->ll_list.next = NULL;
1515 cmd->tag = -1U;
1516 }
1517
1518 return 0;
1519}
1520
Jens Axboef2298c02013-10-25 11:52:25 +01001521static int setup_queues(struct nullb *nullb)
1522{
Shaohua Li2984c862017-08-14 15:04:52 -07001523 nullb->queues = kzalloc(nullb->dev->submit_queues *
1524 sizeof(struct nullb_queue), GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001525 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001526 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001527
1528 nullb->nr_queues = 0;
Shaohua Li2984c862017-08-14 15:04:52 -07001529 nullb->queue_depth = nullb->dev->hw_queue_depth;
Jens Axboef2298c02013-10-25 11:52:25 +01001530
Matias Bjorling2d263a782013-12-18 13:41:43 +01001531 return 0;
1532}
1533
1534static int init_driver_queues(struct nullb *nullb)
1535{
1536 struct nullb_queue *nq;
1537 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001538
Shaohua Li2984c862017-08-14 15:04:52 -07001539 for (i = 0; i < nullb->dev->submit_queues; i++) {
Jens Axboef2298c02013-10-25 11:52:25 +01001540 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +01001541
1542 null_init_queue(nullb, nq);
1543
1544 ret = setup_commands(nq);
1545 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +02001546 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01001547 nullb->nr_queues++;
1548 }
Matias Bjorling2d263a782013-12-18 13:41:43 +01001549 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001550}
1551
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001552static int null_gendisk_register(struct nullb *nullb)
Jens Axboef2298c02013-10-25 11:52:25 +01001553{
1554 struct gendisk *disk;
Jens Axboef2298c02013-10-25 11:52:25 +01001555 sector_t size;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001556
Shaohua Li2984c862017-08-14 15:04:52 -07001557 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001558 if (!disk)
1559 return -ENOMEM;
Shaohua Li2984c862017-08-14 15:04:52 -07001560 size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001561 set_capacity(disk, size >> 9);
1562
1563 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
1564 disk->major = null_major;
1565 disk->first_minor = nullb->index;
1566 disk->fops = &null_fops;
1567 disk->private_data = nullb;
1568 disk->queue = nullb->q;
1569 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1570
1571 add_disk(disk);
1572 return 0;
1573}
1574
Shaohua Li2984c862017-08-14 15:04:52 -07001575static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001576{
1577 set->ops = &null_mq_ops;
Shaohua Li2984c862017-08-14 15:04:52 -07001578 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
1579 g_submit_queues;
1580 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
1581 g_hw_queue_depth;
1582 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
Jens Axboe82f402f2017-06-20 14:22:01 -06001583 set->cmd_size = sizeof(struct nullb_cmd);
1584 set->flags = BLK_MQ_F_SHOULD_MERGE;
weiping zhangb3cffc32017-09-30 09:49:21 +08001585 if (g_no_sched)
1586 set->flags |= BLK_MQ_F_NO_SCHED;
Jens Axboe82f402f2017-06-20 14:22:01 -06001587 set->driver_data = NULL;
1588
Shaohua Li0d06a422017-08-25 13:46:25 -07001589 if ((nullb && nullb->dev->blocking) || g_blocking)
Jens Axboe82f402f2017-06-20 14:22:01 -06001590 set->flags |= BLK_MQ_F_BLOCKING;
1591
1592 return blk_mq_alloc_tag_set(set);
1593}
1594
Shaohua Licedcafa2017-08-14 15:04:54 -07001595static void null_validate_conf(struct nullb_device *dev)
1596{
1597 dev->blocksize = round_down(dev->blocksize, 512);
1598 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
Shaohua Licedcafa2017-08-14 15:04:54 -07001599
1600 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
1601 if (dev->submit_queues != nr_online_nodes)
1602 dev->submit_queues = nr_online_nodes;
1603 } else if (dev->submit_queues > nr_cpu_ids)
1604 dev->submit_queues = nr_cpu_ids;
1605 else if (dev->submit_queues == 0)
1606 dev->submit_queues = 1;
1607
1608 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
1609 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001610
1611 /* Do memory allocation, so set blocking */
1612 if (dev->memory_backed)
1613 dev->blocking = true;
Shaohua Lideb78b42017-08-14 15:04:59 -07001614 else /* cache is meaningless */
1615 dev->cache_size = 0;
1616 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1617 dev->cache_size);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001618 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1619 /* can not stop a queue */
1620 if (dev->queue_mode == NULL_Q_BIO)
1621 dev->mbps = 0;
Shaohua Licedcafa2017-08-14 15:04:54 -07001622}
1623
Shaohua Li2984c862017-08-14 15:04:52 -07001624static int null_add_dev(struct nullb_device *dev)
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001625{
1626 struct nullb *nullb;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001627 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +01001628
Shaohua Licedcafa2017-08-14 15:04:54 -07001629 null_validate_conf(dev);
1630
Shaohua Li2984c862017-08-14 15:04:52 -07001631 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001632 if (!nullb) {
1633 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001634 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001635 }
Shaohua Li2984c862017-08-14 15:04:52 -07001636 nullb->dev = dev;
1637 dev->nullb = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01001638
1639 spin_lock_init(&nullb->lock);
1640
Robert Elliottdc501dc2014-09-02 11:38:49 -05001641 rv = setup_queues(nullb);
1642 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001643 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01001644
Shaohua Li2984c862017-08-14 15:04:52 -07001645 if (dev->queue_mode == NULL_Q_MQ) {
Jens Axboe82f402f2017-06-20 14:22:01 -06001646 if (shared_tags) {
1647 nullb->tag_set = &tag_set;
1648 rv = 0;
1649 } else {
1650 nullb->tag_set = &nullb->__tag_set;
Shaohua Li2984c862017-08-14 15:04:52 -07001651 rv = null_init_tag_set(nullb, nullb->tag_set);
Jens Axboe82f402f2017-06-20 14:22:01 -06001652 }
Jens Axboef2298c02013-10-25 11:52:25 +01001653
Robert Elliottdc501dc2014-09-02 11:38:49 -05001654 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001655 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +01001656
Jens Axboe82f402f2017-06-20 14:22:01 -06001657 nullb->q = blk_mq_init_queue(nullb->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +00001658 if (IS_ERR(nullb->q)) {
Robert Elliottdc501dc2014-09-02 11:38:49 -05001659 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001660 goto out_cleanup_tags;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001661 }
Jens Axboe82f402f2017-06-20 14:22:01 -06001662 null_init_queues(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001663 } else if (dev->queue_mode == NULL_Q_BIO) {
1664 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001665 if (!nullb->q) {
1666 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001667 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001668 }
Jens Axboef2298c02013-10-25 11:52:25 +01001669 blk_queue_make_request(nullb->q, null_queue_bio);
Jan Kara31f96902014-10-22 15:34:21 +02001670 rv = init_driver_queues(nullb);
1671 if (rv)
1672 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001673 } else {
Shaohua Li2984c862017-08-14 15:04:52 -07001674 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock,
1675 dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001676 if (!nullb->q) {
1677 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001678 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001679 }
Jens Axboef2298c02013-10-25 11:52:25 +01001680 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001681 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
Jan Kara31f96902014-10-22 15:34:21 +02001682 rv = init_driver_queues(nullb);
1683 if (rv)
1684 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001685 }
1686
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001687 if (dev->mbps) {
1688 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1689 nullb_setup_bwtimer(nullb);
1690 }
1691
Shaohua Lideb78b42017-08-14 15:04:59 -07001692 if (dev->cache_size > 0) {
1693 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1694 blk_queue_write_cache(nullb->q, true, true);
1695 blk_queue_flush_queueable(nullb->q, true);
1696 }
1697
Jens Axboef2298c02013-10-25 11:52:25 +01001698 nullb->q->queuedata = nullb;
1699 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
Mike Snitzerb277da02014-10-04 10:55:32 -06001700 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +01001701
Jens Axboef2298c02013-10-25 11:52:25 +01001702 mutex_lock(&lock);
Shaohua Li94bc02e2017-08-14 15:04:55 -07001703 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
Shaohua Licedcafa2017-08-14 15:04:54 -07001704 dev->index = nullb->index;
Jens Axboef2298c02013-10-25 11:52:25 +01001705 mutex_unlock(&lock);
1706
Shaohua Li2984c862017-08-14 15:04:52 -07001707 blk_queue_logical_block_size(nullb->q, dev->blocksize);
1708 blk_queue_physical_block_size(nullb->q, dev->blocksize);
Jens Axboef2298c02013-10-25 11:52:25 +01001709
Shaohua Li306eb6b2017-08-14 15:04:57 -07001710 null_config_discard(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +01001711
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001712 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1713
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001714 rv = null_gendisk_register(nullb);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001715 if (rv)
1716 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001717
Matias Bjørlinga5143792016-02-11 14:49:13 +01001718 mutex_lock(&lock);
1719 list_add_tail(&nullb->list, &nullb_list);
1720 mutex_unlock(&lock);
Wenwei Tao3681c852016-03-05 00:27:04 +08001721
Jens Axboef2298c02013-10-25 11:52:25 +01001722 return 0;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001723out_cleanup_blk_queue:
1724 blk_cleanup_queue(nullb->q);
1725out_cleanup_tags:
Shaohua Li2984c862017-08-14 15:04:52 -07001726 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001727 blk_mq_free_tag_set(nullb->tag_set);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001728out_cleanup_queues:
1729 cleanup_queues(nullb);
1730out_free_nullb:
1731 kfree(nullb);
1732out:
Robert Elliottdc501dc2014-09-02 11:38:49 -05001733 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +01001734}
1735
1736static int __init null_init(void)
1737{
Minfei Huangaf096e22015-12-08 13:47:34 -07001738 int ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001739 unsigned int i;
Minfei Huangaf096e22015-12-08 13:47:34 -07001740 struct nullb *nullb;
Shaohua Li2984c862017-08-14 15:04:52 -07001741 struct nullb_device *dev;
Jens Axboef2298c02013-10-25 11:52:25 +01001742
Shaohua Lideb78b42017-08-14 15:04:59 -07001743 /* check for nullb_page.bitmap */
1744 if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT))
1745 return -EINVAL;
1746
Shaohua Li2984c862017-08-14 15:04:52 -07001747 if (g_bs > PAGE_SIZE) {
Raghavendra K T9967d8a2014-01-21 16:59:59 +05301748 pr_warn("null_blk: invalid block size\n");
1749 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
Shaohua Li2984c862017-08-14 15:04:52 -07001750 g_bs = PAGE_SIZE;
Raghavendra K T9967d8a2014-01-21 16:59:59 +05301751 }
Jens Axboef2298c02013-10-25 11:52:25 +01001752
Shaohua Li2984c862017-08-14 15:04:52 -07001753 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
1754 if (g_submit_queues != nr_online_nodes) {
weiping zhang558ab3002017-08-03 00:26:39 +08001755 pr_warn("null_blk: submit_queues param is set to %u.\n",
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +01001756 nr_online_nodes);
Shaohua Li2984c862017-08-14 15:04:52 -07001757 g_submit_queues = nr_online_nodes;
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01001758 }
Shaohua Li2984c862017-08-14 15:04:52 -07001759 } else if (g_submit_queues > nr_cpu_ids)
1760 g_submit_queues = nr_cpu_ids;
1761 else if (g_submit_queues <= 0)
1762 g_submit_queues = 1;
Jens Axboef2298c02013-10-25 11:52:25 +01001763
Shaohua Li2984c862017-08-14 15:04:52 -07001764 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
1765 ret = null_init_tag_set(NULL, &tag_set);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001766 if (ret)
1767 return ret;
1768 }
1769
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001770 config_group_init(&nullb_subsys.su_group);
1771 mutex_init(&nullb_subsys.su_mutex);
1772
1773 ret = configfs_register_subsystem(&nullb_subsys);
1774 if (ret)
1775 goto err_tagset;
1776
Jens Axboef2298c02013-10-25 11:52:25 +01001777 mutex_init(&lock);
1778
Jens Axboef2298c02013-10-25 11:52:25 +01001779 null_major = register_blkdev(0, "nullb");
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001780 if (null_major < 0) {
1781 ret = null_major;
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001782 goto err_conf;
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001783 }
Jens Axboef2298c02013-10-25 11:52:25 +01001784
Minfei Huangaf096e22015-12-08 13:47:34 -07001785 for (i = 0; i < nr_devices; i++) {
Shaohua Li2984c862017-08-14 15:04:52 -07001786 dev = null_alloc_dev();
Wei Yongjun30c516d2017-10-17 12:11:46 +00001787 if (!dev) {
1788 ret = -ENOMEM;
Minfei Huangaf096e22015-12-08 13:47:34 -07001789 goto err_dev;
Wei Yongjun30c516d2017-10-17 12:11:46 +00001790 }
Shaohua Li2984c862017-08-14 15:04:52 -07001791 ret = null_add_dev(dev);
1792 if (ret) {
1793 null_free_dev(dev);
1794 goto err_dev;
1795 }
Minfei Huangaf096e22015-12-08 13:47:34 -07001796 }
1797
Jens Axboef2298c02013-10-25 11:52:25 +01001798 pr_info("null: module loaded\n");
1799 return 0;
Minfei Huangaf096e22015-12-08 13:47:34 -07001800
1801err_dev:
1802 while (!list_empty(&nullb_list)) {
1803 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07001804 dev = nullb->dev;
Minfei Huangaf096e22015-12-08 13:47:34 -07001805 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001806 null_free_dev(dev);
Minfei Huangaf096e22015-12-08 13:47:34 -07001807 }
Minfei Huangaf096e22015-12-08 13:47:34 -07001808 unregister_blkdev(null_major, "nullb");
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001809err_conf:
1810 configfs_unregister_subsystem(&nullb_subsys);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001811err_tagset:
Shaohua Li2984c862017-08-14 15:04:52 -07001812 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001813 blk_mq_free_tag_set(&tag_set);
Minfei Huangaf096e22015-12-08 13:47:34 -07001814 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01001815}
1816
1817static void __exit null_exit(void)
1818{
1819 struct nullb *nullb;
1820
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001821 configfs_unregister_subsystem(&nullb_subsys);
1822
Jens Axboef2298c02013-10-25 11:52:25 +01001823 unregister_blkdev(null_major, "nullb");
1824
1825 mutex_lock(&lock);
1826 while (!list_empty(&nullb_list)) {
Shaohua Li2984c862017-08-14 15:04:52 -07001827 struct nullb_device *dev;
1828
Jens Axboef2298c02013-10-25 11:52:25 +01001829 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07001830 dev = nullb->dev;
Jens Axboef2298c02013-10-25 11:52:25 +01001831 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001832 null_free_dev(dev);
Jens Axboef2298c02013-10-25 11:52:25 +01001833 }
1834 mutex_unlock(&lock);
Matias Bjørling6bb95352015-11-19 12:50:08 +01001835
Shaohua Li2984c862017-08-14 15:04:52 -07001836 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Jens Axboe82f402f2017-06-20 14:22:01 -06001837 blk_mq_free_tag_set(&tag_set);
Jens Axboef2298c02013-10-25 11:52:25 +01001838}
1839
1840module_init(null_init);
1841module_exit(null_exit);
1842
Jens Axboe231b3db2017-08-25 12:53:15 -06001843MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
Jens Axboef2298c02013-10-25 11:52:25 +01001844MODULE_LICENSE("GPL");