blob: 2bdadd7f14542fab264c02b73534bbfbd7379edc [file] [log] [blame]
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001/*
2 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
3 * Shaohua Li <shli@fb.com>
4 */
Jens Axboef2298c02013-10-25 11:52:25 +01005#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01006
Jens Axboef2298c02013-10-25 11:52:25 +01007#include <linux/moduleparam.h>
8#include <linux/sched.h>
9#include <linux/fs.h>
10#include <linux/blkdev.h>
11#include <linux/init.h>
12#include <linux/slab.h>
13#include <linux/blk-mq.h>
14#include <linux/hrtimer.h>
Shaohua Li3bf2bd22017-08-14 15:04:53 -070015#include <linux/configfs.h>
Shaohua Li2f54a612017-08-14 15:05:00 -070016#include <linux/badblocks.h>
Jens Axboe93b57042018-01-10 09:06:23 -070017#include <linux/fault-inject.h>
Jens Axboef2298c02013-10-25 11:52:25 +010018
Shaohua Li5bcd0e02017-08-14 15:04:56 -070019#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
20#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070021#define SECTOR_MASK (PAGE_SECTORS - 1)
22
23#define FREE_BATCH 16
24
Shaohua Lieff2c4f2017-08-14 15:04:58 -070025#define TICKS_PER_SEC 50ULL
26#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
27
Arnd Bergmann33f782c2018-01-11 11:31:25 +010028#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -070029static DECLARE_FAULT_ATTR(null_timeout_attr);
Jens Axboe24941b92018-02-28 09:18:57 -070030static DECLARE_FAULT_ATTR(null_requeue_attr);
Arnd Bergmann33f782c2018-01-11 11:31:25 +010031#endif
Jens Axboe93b57042018-01-10 09:06:23 -070032
Shaohua Lieff2c4f2017-08-14 15:04:58 -070033static inline u64 mb_per_tick(int mbps)
34{
35 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
36}
Jens Axboef2298c02013-10-25 11:52:25 +010037
38struct nullb_cmd {
39 struct list_head list;
40 struct llist_node ll_list;
Jens Axboe0864fe02017-12-20 13:14:42 -070041 struct __call_single_data csd;
Jens Axboef2298c02013-10-25 11:52:25 +010042 struct request *rq;
43 struct bio *bio;
44 unsigned int tag;
Jens Axboe0864fe02017-12-20 13:14:42 -070045 blk_status_t error;
Jens Axboef2298c02013-10-25 11:52:25 +010046 struct nullb_queue *nq;
Paolo Valente3c395a92015-12-01 11:48:17 +010047 struct hrtimer timer;
Jens Axboef2298c02013-10-25 11:52:25 +010048};
49
50struct nullb_queue {
51 unsigned long *tag_map;
52 wait_queue_head_t wait;
53 unsigned int queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -070054 struct nullb_device *dev;
Jens Axboe24941b92018-02-28 09:18:57 -070055 unsigned int requeue_selection;
Jens Axboef2298c02013-10-25 11:52:25 +010056
57 struct nullb_cmd *cmds;
58};
59
Shaohua Li3bf2bd22017-08-14 15:04:53 -070060/*
61 * Status flags for nullb_device.
62 *
63 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
64 * UP: Device is currently on and visible in userspace.
Shaohua Lieff2c4f2017-08-14 15:04:58 -070065 * THROTTLED: Device is being throttled.
Shaohua Lideb78b42017-08-14 15:04:59 -070066 * CACHE: Device is using a write-back cache.
Shaohua Li3bf2bd22017-08-14 15:04:53 -070067 */
68enum nullb_device_flags {
69 NULLB_DEV_FL_CONFIGURED = 0,
70 NULLB_DEV_FL_UP = 1,
Shaohua Lieff2c4f2017-08-14 15:04:58 -070071 NULLB_DEV_FL_THROTTLED = 2,
Shaohua Lideb78b42017-08-14 15:04:59 -070072 NULLB_DEV_FL_CACHE = 3,
Shaohua Li3bf2bd22017-08-14 15:04:53 -070073};
74
Ming Lei66231ad2018-03-06 12:07:13 +080075#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070076/*
77 * nullb_page is a page in memory for nullb devices.
78 *
79 * @page: The page holding the data.
80 * @bitmap: The bitmap represents which sector in the page has data.
81 * Each bit represents one block size. For example, sector 8
82 * will use the 7th bit
Shaohua Lideb78b42017-08-14 15:04:59 -070083 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
84 * page is being flushing to storage. FREE means the cache page is freed and
85 * should be skipped from flushing to storage. Please see
86 * null_make_cache_space
Shaohua Li5bcd0e02017-08-14 15:04:56 -070087 */
88struct nullb_page {
89 struct page *page;
Ming Lei66231ad2018-03-06 12:07:13 +080090 DECLARE_BITMAP(bitmap, MAP_SZ);
Shaohua Li5bcd0e02017-08-14 15:04:56 -070091};
Ming Lei66231ad2018-03-06 12:07:13 +080092#define NULLB_PAGE_LOCK (MAP_SZ - 1)
93#define NULLB_PAGE_FREE (MAP_SZ - 2)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070094
Shaohua Li2984c862017-08-14 15:04:52 -070095struct nullb_device {
96 struct nullb *nullb;
Shaohua Li3bf2bd22017-08-14 15:04:53 -070097 struct config_item item;
Shaohua Li5bcd0e02017-08-14 15:04:56 -070098 struct radix_tree_root data; /* data stored in the disk */
Shaohua Lideb78b42017-08-14 15:04:59 -070099 struct radix_tree_root cache; /* disk cache data */
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700100 unsigned long flags; /* device flags */
Shaohua Lideb78b42017-08-14 15:04:59 -0700101 unsigned int curr_cache;
Shaohua Li2f54a612017-08-14 15:05:00 -0700102 struct badblocks badblocks;
Shaohua Li2984c862017-08-14 15:04:52 -0700103
104 unsigned long size; /* device size in MB */
105 unsigned long completion_nsec; /* time in ns to complete a request */
Shaohua Lideb78b42017-08-14 15:04:59 -0700106 unsigned long cache_size; /* disk cache size in MB */
Shaohua Li2984c862017-08-14 15:04:52 -0700107 unsigned int submit_queues; /* number of submission queues */
108 unsigned int home_node; /* home node for the device */
109 unsigned int queue_mode; /* block interface */
110 unsigned int blocksize; /* block size */
111 unsigned int irqmode; /* IRQ completion handler */
112 unsigned int hw_queue_depth; /* queue depth */
Shaohua Licedcafa2017-08-14 15:04:54 -0700113 unsigned int index; /* index of the disk, only valid with a disk */
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700114 unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
Shaohua Li2984c862017-08-14 15:04:52 -0700115 bool blocking; /* blocking blk-mq device */
116 bool use_per_node_hctx; /* use per-node allocation for hardware context */
Shaohua Licedcafa2017-08-14 15:04:54 -0700117 bool power; /* power on/off the device */
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700118 bool memory_backed; /* if data is stored in memory */
Shaohua Li306eb6b2017-08-14 15:04:57 -0700119 bool discard; /* if support discard */
Shaohua Li2984c862017-08-14 15:04:52 -0700120};
121
Jens Axboef2298c02013-10-25 11:52:25 +0100122struct nullb {
Shaohua Li2984c862017-08-14 15:04:52 -0700123 struct nullb_device *dev;
Jens Axboef2298c02013-10-25 11:52:25 +0100124 struct list_head list;
125 unsigned int index;
126 struct request_queue *q;
127 struct gendisk *disk;
Jens Axboe82f402f2017-06-20 14:22:01 -0600128 struct blk_mq_tag_set *tag_set;
129 struct blk_mq_tag_set __tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +0100130 unsigned int queue_depth;
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700131 atomic_long_t cur_bytes;
132 struct hrtimer bw_timer;
Shaohua Lideb78b42017-08-14 15:04:59 -0700133 unsigned long cache_flush_pos;
Jens Axboef2298c02013-10-25 11:52:25 +0100134 spinlock_t lock;
135
136 struct nullb_queue *queues;
137 unsigned int nr_queues;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100138 char disk_name[DISK_NAME_LEN];
Jens Axboef2298c02013-10-25 11:52:25 +0100139};
140
141static LIST_HEAD(nullb_list);
142static struct mutex lock;
143static int null_major;
Shaohua Li94bc02e2017-08-14 15:04:55 -0700144static DEFINE_IDA(nullb_indexes);
Jens Axboe82f402f2017-06-20 14:22:01 -0600145static struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +0100146
Jens Axboef2298c02013-10-25 11:52:25 +0100147enum {
148 NULL_IRQ_NONE = 0,
149 NULL_IRQ_SOFTIRQ = 1,
150 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800151};
Jens Axboef2298c02013-10-25 11:52:25 +0100152
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800153enum {
Jens Axboef2298c02013-10-25 11:52:25 +0100154 NULL_Q_BIO = 0,
155 NULL_Q_RQ = 1,
156 NULL_Q_MQ = 2,
157};
158
weiping zhangb3cffc32017-09-30 09:49:21 +0800159static int g_no_sched;
Joe Perches5657a812018-05-24 13:38:59 -0600160module_param_named(no_sched, g_no_sched, int, 0444);
weiping zhangb3cffc32017-09-30 09:49:21 +0800161MODULE_PARM_DESC(no_sched, "No io scheduler");
162
Shaohua Li2984c862017-08-14 15:04:52 -0700163static int g_submit_queues = 1;
Joe Perches5657a812018-05-24 13:38:59 -0600164module_param_named(submit_queues, g_submit_queues, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100165MODULE_PARM_DESC(submit_queues, "Number of submission queues");
166
Shaohua Li2984c862017-08-14 15:04:52 -0700167static int g_home_node = NUMA_NO_NODE;
Joe Perches5657a812018-05-24 13:38:59 -0600168module_param_named(home_node, g_home_node, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100169MODULE_PARM_DESC(home_node, "Home node for the device");
170
Arnd Bergmann33f782c2018-01-11 11:31:25 +0100171#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -0700172static char g_timeout_str[80];
Joe Perches5657a812018-05-24 13:38:59 -0600173module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
Jens Axboe24941b92018-02-28 09:18:57 -0700174
175static char g_requeue_str[80];
Joe Perches5657a812018-05-24 13:38:59 -0600176module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
Arnd Bergmann33f782c2018-01-11 11:31:25 +0100177#endif
Jens Axboe93b57042018-01-10 09:06:23 -0700178
Shaohua Li2984c862017-08-14 15:04:52 -0700179static int g_queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700180
181static int null_param_store_val(const char *str, int *val, int min, int max)
182{
183 int ret, new_val;
184
185 ret = kstrtoint(str, 10, &new_val);
186 if (ret)
187 return -EINVAL;
188
189 if (new_val < min || new_val > max)
190 return -EINVAL;
191
192 *val = new_val;
193 return 0;
194}
195
196static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
197{
Shaohua Li2984c862017-08-14 15:04:52 -0700198 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
Matias Bjorling709c8662014-11-26 14:45:48 -0700199}
200
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930201static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700202 .set = null_set_queue_mode,
203 .get = param_get_int,
204};
205
Joe Perches5657a812018-05-24 13:38:59 -0600206device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400207MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100208
Shaohua Li2984c862017-08-14 15:04:52 -0700209static int g_gb = 250;
Joe Perches5657a812018-05-24 13:38:59 -0600210module_param_named(gb, g_gb, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100211MODULE_PARM_DESC(gb, "Size in GB");
212
Shaohua Li2984c862017-08-14 15:04:52 -0700213static int g_bs = 512;
Joe Perches5657a812018-05-24 13:38:59 -0600214module_param_named(bs, g_bs, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100215MODULE_PARM_DESC(bs, "Block size (in bytes)");
216
Jens Axboe82f402f2017-06-20 14:22:01 -0600217static int nr_devices = 1;
Joe Perches5657a812018-05-24 13:38:59 -0600218module_param(nr_devices, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100219MODULE_PARM_DESC(nr_devices, "Number of devices to register");
220
Shaohua Li2984c862017-08-14 15:04:52 -0700221static bool g_blocking;
Joe Perches5657a812018-05-24 13:38:59 -0600222module_param_named(blocking, g_blocking, bool, 0444);
Jens Axboedb5bcf82017-03-30 13:44:26 -0600223MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
224
Jens Axboe82f402f2017-06-20 14:22:01 -0600225static bool shared_tags;
Joe Perches5657a812018-05-24 13:38:59 -0600226module_param(shared_tags, bool, 0444);
Jens Axboe82f402f2017-06-20 14:22:01 -0600227MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
228
Shaohua Li2984c862017-08-14 15:04:52 -0700229static int g_irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700230
231static int null_set_irqmode(const char *str, const struct kernel_param *kp)
232{
Shaohua Li2984c862017-08-14 15:04:52 -0700233 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
Matias Bjorling709c8662014-11-26 14:45:48 -0700234 NULL_IRQ_TIMER);
235}
236
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930237static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700238 .set = null_set_irqmode,
239 .get = param_get_int,
240};
241
Joe Perches5657a812018-05-24 13:38:59 -0600242device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100243MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
244
Shaohua Li2984c862017-08-14 15:04:52 -0700245static unsigned long g_completion_nsec = 10000;
Joe Perches5657a812018-05-24 13:38:59 -0600246module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100247MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
248
Shaohua Li2984c862017-08-14 15:04:52 -0700249static int g_hw_queue_depth = 64;
Joe Perches5657a812018-05-24 13:38:59 -0600250module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100251MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
252
Shaohua Li2984c862017-08-14 15:04:52 -0700253static bool g_use_per_node_hctx;
Joe Perches5657a812018-05-24 13:38:59 -0600254module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
Matias Bjørling20005242013-12-21 00:11:00 +0100255MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100256
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700257static struct nullb_device *null_alloc_dev(void);
258static void null_free_dev(struct nullb_device *dev);
Shaohua Licedcafa2017-08-14 15:04:54 -0700259static void null_del_dev(struct nullb *nullb);
260static int null_add_dev(struct nullb_device *dev);
Shaohua Lideb78b42017-08-14 15:04:59 -0700261static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700262
263static inline struct nullb_device *to_nullb_device(struct config_item *item)
264{
265 return item ? container_of(item, struct nullb_device, item) : NULL;
266}
267
268static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
269{
270 return snprintf(page, PAGE_SIZE, "%u\n", val);
271}
272
273static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
274 char *page)
275{
276 return snprintf(page, PAGE_SIZE, "%lu\n", val);
277}
278
279static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
280{
281 return snprintf(page, PAGE_SIZE, "%u\n", val);
282}
283
284static ssize_t nullb_device_uint_attr_store(unsigned int *val,
285 const char *page, size_t count)
286{
287 unsigned int tmp;
288 int result;
289
290 result = kstrtouint(page, 0, &tmp);
291 if (result)
292 return result;
293
294 *val = tmp;
295 return count;
296}
297
298static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
299 const char *page, size_t count)
300{
301 int result;
302 unsigned long tmp;
303
304 result = kstrtoul(page, 0, &tmp);
305 if (result)
306 return result;
307
308 *val = tmp;
309 return count;
310}
311
312static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
313 size_t count)
314{
315 bool tmp;
316 int result;
317
318 result = kstrtobool(page, &tmp);
319 if (result)
320 return result;
321
322 *val = tmp;
323 return count;
324}
325
326/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
327#define NULLB_DEVICE_ATTR(NAME, TYPE) \
328static ssize_t \
329nullb_device_##NAME##_show(struct config_item *item, char *page) \
330{ \
331 return nullb_device_##TYPE##_attr_show( \
332 to_nullb_device(item)->NAME, page); \
333} \
334static ssize_t \
335nullb_device_##NAME##_store(struct config_item *item, const char *page, \
336 size_t count) \
337{ \
338 if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
339 return -EBUSY; \
340 return nullb_device_##TYPE##_attr_store( \
341 &to_nullb_device(item)->NAME, page, count); \
342} \
343CONFIGFS_ATTR(nullb_device_, NAME);
344
345NULLB_DEVICE_ATTR(size, ulong);
346NULLB_DEVICE_ATTR(completion_nsec, ulong);
347NULLB_DEVICE_ATTR(submit_queues, uint);
348NULLB_DEVICE_ATTR(home_node, uint);
349NULLB_DEVICE_ATTR(queue_mode, uint);
350NULLB_DEVICE_ATTR(blocksize, uint);
351NULLB_DEVICE_ATTR(irqmode, uint);
352NULLB_DEVICE_ATTR(hw_queue_depth, uint);
Shaohua Licedcafa2017-08-14 15:04:54 -0700353NULLB_DEVICE_ATTR(index, uint);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700354NULLB_DEVICE_ATTR(blocking, bool);
355NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700356NULLB_DEVICE_ATTR(memory_backed, bool);
Shaohua Li306eb6b2017-08-14 15:04:57 -0700357NULLB_DEVICE_ATTR(discard, bool);
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700358NULLB_DEVICE_ATTR(mbps, uint);
Shaohua Lideb78b42017-08-14 15:04:59 -0700359NULLB_DEVICE_ATTR(cache_size, ulong);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700360
Shaohua Licedcafa2017-08-14 15:04:54 -0700361static ssize_t nullb_device_power_show(struct config_item *item, char *page)
362{
363 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
364}
365
366static ssize_t nullb_device_power_store(struct config_item *item,
367 const char *page, size_t count)
368{
369 struct nullb_device *dev = to_nullb_device(item);
370 bool newp = false;
371 ssize_t ret;
372
373 ret = nullb_device_bool_attr_store(&newp, page, count);
374 if (ret < 0)
375 return ret;
376
377 if (!dev->power && newp) {
378 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
379 return count;
380 if (null_add_dev(dev)) {
381 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
382 return -ENOMEM;
383 }
384
385 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
386 dev->power = newp;
Jens Axboeb3c30512017-08-28 15:06:31 -0600387 } else if (dev->power && !newp) {
Shaohua Licedcafa2017-08-14 15:04:54 -0700388 mutex_lock(&lock);
389 dev->power = newp;
390 null_del_dev(dev->nullb);
391 mutex_unlock(&lock);
392 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
393 }
394
395 return count;
396}
397
398CONFIGFS_ATTR(nullb_device_, power);
399
Shaohua Li2f54a612017-08-14 15:05:00 -0700400static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
401{
402 struct nullb_device *t_dev = to_nullb_device(item);
403
404 return badblocks_show(&t_dev->badblocks, page, 0);
405}
406
407static ssize_t nullb_device_badblocks_store(struct config_item *item,
408 const char *page, size_t count)
409{
410 struct nullb_device *t_dev = to_nullb_device(item);
411 char *orig, *buf, *tmp;
412 u64 start, end;
413 int ret;
414
415 orig = kstrndup(page, count, GFP_KERNEL);
416 if (!orig)
417 return -ENOMEM;
418
419 buf = strstrip(orig);
420
421 ret = -EINVAL;
422 if (buf[0] != '+' && buf[0] != '-')
423 goto out;
424 tmp = strchr(&buf[1], '-');
425 if (!tmp)
426 goto out;
427 *tmp = '\0';
428 ret = kstrtoull(buf + 1, 0, &start);
429 if (ret)
430 goto out;
431 ret = kstrtoull(tmp + 1, 0, &end);
432 if (ret)
433 goto out;
434 ret = -EINVAL;
435 if (start > end)
436 goto out;
437 /* enable badblocks */
438 cmpxchg(&t_dev->badblocks.shift, -1, 0);
439 if (buf[0] == '+')
440 ret = badblocks_set(&t_dev->badblocks, start,
441 end - start + 1, 1);
442 else
443 ret = badblocks_clear(&t_dev->badblocks, start,
444 end - start + 1);
445 if (ret == 0)
446 ret = count;
447out:
448 kfree(orig);
449 return ret;
450}
451CONFIGFS_ATTR(nullb_device_, badblocks);
452
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700453static struct configfs_attribute *nullb_device_attrs[] = {
454 &nullb_device_attr_size,
455 &nullb_device_attr_completion_nsec,
456 &nullb_device_attr_submit_queues,
457 &nullb_device_attr_home_node,
458 &nullb_device_attr_queue_mode,
459 &nullb_device_attr_blocksize,
460 &nullb_device_attr_irqmode,
461 &nullb_device_attr_hw_queue_depth,
Shaohua Licedcafa2017-08-14 15:04:54 -0700462 &nullb_device_attr_index,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700463 &nullb_device_attr_blocking,
464 &nullb_device_attr_use_per_node_hctx,
Shaohua Licedcafa2017-08-14 15:04:54 -0700465 &nullb_device_attr_power,
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700466 &nullb_device_attr_memory_backed,
Shaohua Li306eb6b2017-08-14 15:04:57 -0700467 &nullb_device_attr_discard,
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700468 &nullb_device_attr_mbps,
Shaohua Lideb78b42017-08-14 15:04:59 -0700469 &nullb_device_attr_cache_size,
Shaohua Li2f54a612017-08-14 15:05:00 -0700470 &nullb_device_attr_badblocks,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700471 NULL,
472};
473
474static void nullb_device_release(struct config_item *item)
475{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700476 struct nullb_device *dev = to_nullb_device(item);
477
Shaohua Lideb78b42017-08-14 15:04:59 -0700478 null_free_device_storage(dev, false);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700479 null_free_dev(dev);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700480}
481
482static struct configfs_item_operations nullb_device_ops = {
483 .release = nullb_device_release,
484};
485
Bhumika Goyale1919df2017-10-16 17:18:49 +0200486static const struct config_item_type nullb_device_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700487 .ct_item_ops = &nullb_device_ops,
488 .ct_attrs = nullb_device_attrs,
489 .ct_owner = THIS_MODULE,
490};
491
492static struct
493config_item *nullb_group_make_item(struct config_group *group, const char *name)
494{
495 struct nullb_device *dev;
496
497 dev = null_alloc_dev();
498 if (!dev)
499 return ERR_PTR(-ENOMEM);
500
501 config_item_init_type_name(&dev->item, name, &nullb_device_type);
502
503 return &dev->item;
504}
505
506static void
507nullb_group_drop_item(struct config_group *group, struct config_item *item)
508{
Shaohua Licedcafa2017-08-14 15:04:54 -0700509 struct nullb_device *dev = to_nullb_device(item);
510
511 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
512 mutex_lock(&lock);
513 dev->power = false;
514 null_del_dev(dev->nullb);
515 mutex_unlock(&lock);
516 }
517
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700518 config_item_put(item);
519}
520
521static ssize_t memb_group_features_show(struct config_item *item, char *page)
522{
Shaohua Li2f54a612017-08-14 15:05:00 -0700523 return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks\n");
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700524}
525
526CONFIGFS_ATTR_RO(memb_group_, features);
527
528static struct configfs_attribute *nullb_group_attrs[] = {
529 &memb_group_attr_features,
530 NULL,
531};
532
533static struct configfs_group_operations nullb_group_ops = {
534 .make_item = nullb_group_make_item,
535 .drop_item = nullb_group_drop_item,
536};
537
Bhumika Goyale1919df2017-10-16 17:18:49 +0200538static const struct config_item_type nullb_group_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700539 .ct_group_ops = &nullb_group_ops,
540 .ct_attrs = nullb_group_attrs,
541 .ct_owner = THIS_MODULE,
542};
543
544static struct configfs_subsystem nullb_subsys = {
545 .su_group = {
546 .cg_item = {
547 .ci_namebuf = "nullb",
548 .ci_type = &nullb_group_type,
549 },
550 },
551};
552
Shaohua Lideb78b42017-08-14 15:04:59 -0700553static inline int null_cache_active(struct nullb *nullb)
554{
555 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
556}
557
Shaohua Li2984c862017-08-14 15:04:52 -0700558static struct nullb_device *null_alloc_dev(void)
559{
560 struct nullb_device *dev;
561
562 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
563 if (!dev)
564 return NULL;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700565 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
Shaohua Lideb78b42017-08-14 15:04:59 -0700566 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
Shaohua Li2f54a612017-08-14 15:05:00 -0700567 if (badblocks_init(&dev->badblocks, 0)) {
568 kfree(dev);
569 return NULL;
570 }
571
Shaohua Li2984c862017-08-14 15:04:52 -0700572 dev->size = g_gb * 1024;
573 dev->completion_nsec = g_completion_nsec;
574 dev->submit_queues = g_submit_queues;
575 dev->home_node = g_home_node;
576 dev->queue_mode = g_queue_mode;
577 dev->blocksize = g_bs;
578 dev->irqmode = g_irqmode;
579 dev->hw_queue_depth = g_hw_queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -0700580 dev->blocking = g_blocking;
581 dev->use_per_node_hctx = g_use_per_node_hctx;
582 return dev;
583}
584
585static void null_free_dev(struct nullb_device *dev)
586{
David Disseldorp1addb792017-11-08 17:29:44 +0100587 if (!dev)
588 return;
589
590 badblocks_exit(&dev->badblocks);
Shaohua Li2984c862017-08-14 15:04:52 -0700591 kfree(dev);
592}
593
Jens Axboef2298c02013-10-25 11:52:25 +0100594static void put_tag(struct nullb_queue *nq, unsigned int tag)
595{
596 clear_bit_unlock(tag, nq->tag_map);
597
598 if (waitqueue_active(&nq->wait))
599 wake_up(&nq->wait);
600}
601
602static unsigned int get_tag(struct nullb_queue *nq)
603{
604 unsigned int tag;
605
606 do {
607 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
608 if (tag >= nq->queue_depth)
609 return -1U;
610 } while (test_and_set_bit_lock(tag, nq->tag_map));
611
612 return tag;
613}
614
615static void free_cmd(struct nullb_cmd *cmd)
616{
617 put_tag(cmd->nq, cmd->tag);
618}
619
Paolo Valente3c395a92015-12-01 11:48:17 +0100620static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
621
Jens Axboef2298c02013-10-25 11:52:25 +0100622static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
623{
624 struct nullb_cmd *cmd;
625 unsigned int tag;
626
627 tag = get_tag(nq);
628 if (tag != -1U) {
629 cmd = &nq->cmds[tag];
630 cmd->tag = tag;
631 cmd->nq = nq;
Shaohua Li2984c862017-08-14 15:04:52 -0700632 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +0100633 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
634 HRTIMER_MODE_REL);
635 cmd->timer.function = null_cmd_timer_expired;
636 }
Jens Axboef2298c02013-10-25 11:52:25 +0100637 return cmd;
638 }
639
640 return NULL;
641}
642
643static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
644{
645 struct nullb_cmd *cmd;
646 DEFINE_WAIT(wait);
647
648 cmd = __alloc_cmd(nq);
649 if (cmd || !can_wait)
650 return cmd;
651
652 do {
653 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
654 cmd = __alloc_cmd(nq);
655 if (cmd)
656 break;
657
658 io_schedule();
659 } while (1);
660
661 finish_wait(&nq->wait, &wait);
662 return cmd;
663}
664
665static void end_cmd(struct nullb_cmd *cmd)
666{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100667 struct request_queue *q = NULL;
Shaohua Li2984c862017-08-14 15:04:52 -0700668 int queue_mode = cmd->nq->dev->queue_mode;
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100669
Mike Krinkine8271202015-12-15 12:56:40 +0300670 if (cmd->rq)
671 q = cmd->rq->q;
672
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800673 switch (queue_mode) {
674 case NULL_Q_MQ:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700675 blk_mq_end_request(cmd->rq, cmd->error);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800676 return;
677 case NULL_Q_RQ:
678 INIT_LIST_HEAD(&cmd->rq->queuelist);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700679 blk_end_request_all(cmd->rq, cmd->error);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800680 break;
681 case NULL_Q_BIO:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700682 cmd->bio->bi_status = cmd->error;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200683 bio_endio(cmd->bio);
Jens Axboe48cc6612015-12-28 13:02:47 -0700684 break;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800685 }
Jens Axboef2298c02013-10-25 11:52:25 +0100686
Jens Axboe48cc6612015-12-28 13:02:47 -0700687 free_cmd(cmd);
688
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100689 /* Restart queue if needed, as we are freeing a tag */
Jens Axboe48cc6612015-12-28 13:02:47 -0700690 if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100691 unsigned long flags;
692
693 spin_lock_irqsave(q->queue_lock, flags);
Jens Axboe48cc6612015-12-28 13:02:47 -0700694 blk_start_queue_async(q);
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100695 spin_unlock_irqrestore(q->queue_lock, flags);
696 }
Jens Axboef2298c02013-10-25 11:52:25 +0100697}
698
699static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
700{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100701 end_cmd(container_of(timer, struct nullb_cmd, timer));
Jens Axboef2298c02013-10-25 11:52:25 +0100702
703 return HRTIMER_NORESTART;
704}
705
706static void null_cmd_end_timer(struct nullb_cmd *cmd)
707{
Shaohua Li2984c862017-08-14 15:04:52 -0700708 ktime_t kt = cmd->nq->dev->completion_nsec;
Jens Axboef2298c02013-10-25 11:52:25 +0100709
Paolo Valente3c395a92015-12-01 11:48:17 +0100710 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +0100711}
712
713static void null_softirq_done_fn(struct request *rq)
714{
Shaohua Li2984c862017-08-14 15:04:52 -0700715 struct nullb *nullb = rq->q->queuedata;
716
717 if (nullb->dev->queue_mode == NULL_Q_MQ)
Jens Axboed891fa72014-06-16 11:40:25 -0600718 end_cmd(blk_mq_rq_to_pdu(rq));
719 else
720 end_cmd(rq->special);
Jens Axboef2298c02013-10-25 11:52:25 +0100721}
722
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700723static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
Jens Axboef2298c02013-10-25 11:52:25 +0100724{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700725 struct nullb_page *t_page;
726
727 t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
728 if (!t_page)
729 goto out;
730
731 t_page->page = alloc_pages(gfp_flags, 0);
732 if (!t_page->page)
733 goto out_freepage;
734
Ming Lei66231ad2018-03-06 12:07:13 +0800735 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700736 return t_page;
737out_freepage:
738 kfree(t_page);
739out:
740 return NULL;
741}
742
743static void null_free_page(struct nullb_page *t_page)
744{
Ming Lei66231ad2018-03-06 12:07:13 +0800745 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
746 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
Shaohua Lideb78b42017-08-14 15:04:59 -0700747 return;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700748 __free_page(t_page->page);
749 kfree(t_page);
750}
751
Ming Lei66231ad2018-03-06 12:07:13 +0800752static bool null_page_empty(struct nullb_page *page)
753{
754 int size = MAP_SZ - 2;
755
756 return find_first_bit(page->bitmap, size) == size;
757}
758
Shaohua Lideb78b42017-08-14 15:04:59 -0700759static void null_free_sector(struct nullb *nullb, sector_t sector,
760 bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700761{
762 unsigned int sector_bit;
763 u64 idx;
764 struct nullb_page *t_page, *ret;
765 struct radix_tree_root *root;
766
Shaohua Lideb78b42017-08-14 15:04:59 -0700767 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700768 idx = sector >> PAGE_SECTORS_SHIFT;
769 sector_bit = (sector & SECTOR_MASK);
770
771 t_page = radix_tree_lookup(root, idx);
772 if (t_page) {
Ming Lei66231ad2018-03-06 12:07:13 +0800773 __clear_bit(sector_bit, t_page->bitmap);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700774
Ming Lei66231ad2018-03-06 12:07:13 +0800775 if (null_page_empty(t_page)) {
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700776 ret = radix_tree_delete_item(root, idx, t_page);
777 WARN_ON(ret != t_page);
778 null_free_page(ret);
Shaohua Lideb78b42017-08-14 15:04:59 -0700779 if (is_cache)
780 nullb->dev->curr_cache -= PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700781 }
782 }
783}
784
785static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
Shaohua Lideb78b42017-08-14 15:04:59 -0700786 struct nullb_page *t_page, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700787{
788 struct radix_tree_root *root;
789
Shaohua Lideb78b42017-08-14 15:04:59 -0700790 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700791
792 if (radix_tree_insert(root, idx, t_page)) {
793 null_free_page(t_page);
794 t_page = radix_tree_lookup(root, idx);
795 WARN_ON(!t_page || t_page->page->index != idx);
Shaohua Lideb78b42017-08-14 15:04:59 -0700796 } else if (is_cache)
797 nullb->dev->curr_cache += PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700798
799 return t_page;
800}
801
Shaohua Lideb78b42017-08-14 15:04:59 -0700802static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700803{
804 unsigned long pos = 0;
805 int nr_pages;
806 struct nullb_page *ret, *t_pages[FREE_BATCH];
807 struct radix_tree_root *root;
808
Shaohua Lideb78b42017-08-14 15:04:59 -0700809 root = is_cache ? &dev->cache : &dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700810
811 do {
812 int i;
813
814 nr_pages = radix_tree_gang_lookup(root,
815 (void **)t_pages, pos, FREE_BATCH);
816
817 for (i = 0; i < nr_pages; i++) {
818 pos = t_pages[i]->page->index;
819 ret = radix_tree_delete_item(root, pos, t_pages[i]);
820 WARN_ON(ret != t_pages[i]);
821 null_free_page(ret);
822 }
823
824 pos++;
825 } while (nr_pages == FREE_BATCH);
Shaohua Lideb78b42017-08-14 15:04:59 -0700826
827 if (is_cache)
828 dev->curr_cache = 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700829}
830
Shaohua Lideb78b42017-08-14 15:04:59 -0700831static struct nullb_page *__null_lookup_page(struct nullb *nullb,
832 sector_t sector, bool for_write, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700833{
834 unsigned int sector_bit;
835 u64 idx;
836 struct nullb_page *t_page;
Shaohua Lideb78b42017-08-14 15:04:59 -0700837 struct radix_tree_root *root;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700838
839 idx = sector >> PAGE_SECTORS_SHIFT;
840 sector_bit = (sector & SECTOR_MASK);
841
Shaohua Lideb78b42017-08-14 15:04:59 -0700842 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
843 t_page = radix_tree_lookup(root, idx);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700844 WARN_ON(t_page && t_page->page->index != idx);
845
Ming Lei66231ad2018-03-06 12:07:13 +0800846 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700847 return t_page;
848
849 return NULL;
850}
851
Shaohua Lideb78b42017-08-14 15:04:59 -0700852static struct nullb_page *null_lookup_page(struct nullb *nullb,
853 sector_t sector, bool for_write, bool ignore_cache)
854{
855 struct nullb_page *page = NULL;
856
857 if (!ignore_cache)
858 page = __null_lookup_page(nullb, sector, for_write, true);
859 if (page)
860 return page;
861 return __null_lookup_page(nullb, sector, for_write, false);
862}
863
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700864static struct nullb_page *null_insert_page(struct nullb *nullb,
Shaohua Lideb78b42017-08-14 15:04:59 -0700865 sector_t sector, bool ignore_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700866{
867 u64 idx;
868 struct nullb_page *t_page;
869
Shaohua Lideb78b42017-08-14 15:04:59 -0700870 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700871 if (t_page)
872 return t_page;
873
874 spin_unlock_irq(&nullb->lock);
875
876 t_page = null_alloc_page(GFP_NOIO);
877 if (!t_page)
878 goto out_lock;
879
880 if (radix_tree_preload(GFP_NOIO))
881 goto out_freepage;
882
883 spin_lock_irq(&nullb->lock);
884 idx = sector >> PAGE_SECTORS_SHIFT;
885 t_page->page->index = idx;
Shaohua Lideb78b42017-08-14 15:04:59 -0700886 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700887 radix_tree_preload_end();
888
889 return t_page;
890out_freepage:
891 null_free_page(t_page);
892out_lock:
893 spin_lock_irq(&nullb->lock);
Shaohua Lideb78b42017-08-14 15:04:59 -0700894 return null_lookup_page(nullb, sector, true, ignore_cache);
895}
896
897static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
898{
899 int i;
900 unsigned int offset;
901 u64 idx;
902 struct nullb_page *t_page, *ret;
903 void *dst, *src;
904
905 idx = c_page->page->index;
906
907 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
908
Ming Lei66231ad2018-03-06 12:07:13 +0800909 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
910 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
Shaohua Lideb78b42017-08-14 15:04:59 -0700911 null_free_page(c_page);
Ming Lei66231ad2018-03-06 12:07:13 +0800912 if (t_page && null_page_empty(t_page)) {
Shaohua Lideb78b42017-08-14 15:04:59 -0700913 ret = radix_tree_delete_item(&nullb->dev->data,
914 idx, t_page);
915 null_free_page(t_page);
916 }
917 return 0;
918 }
919
920 if (!t_page)
921 return -ENOMEM;
922
923 src = kmap_atomic(c_page->page);
924 dst = kmap_atomic(t_page->page);
925
926 for (i = 0; i < PAGE_SECTORS;
927 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
Ming Lei66231ad2018-03-06 12:07:13 +0800928 if (test_bit(i, c_page->bitmap)) {
Shaohua Lideb78b42017-08-14 15:04:59 -0700929 offset = (i << SECTOR_SHIFT);
930 memcpy(dst + offset, src + offset,
931 nullb->dev->blocksize);
Ming Lei66231ad2018-03-06 12:07:13 +0800932 __set_bit(i, t_page->bitmap);
Shaohua Lideb78b42017-08-14 15:04:59 -0700933 }
934 }
935
936 kunmap_atomic(dst);
937 kunmap_atomic(src);
938
939 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
940 null_free_page(ret);
941 nullb->dev->curr_cache -= PAGE_SIZE;
942
943 return 0;
944}
945
946static int null_make_cache_space(struct nullb *nullb, unsigned long n)
947{
948 int i, err, nr_pages;
949 struct nullb_page *c_pages[FREE_BATCH];
950 unsigned long flushed = 0, one_round;
951
952again:
953 if ((nullb->dev->cache_size * 1024 * 1024) >
954 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
955 return 0;
956
957 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
958 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
959 /*
960 * nullb_flush_cache_page could unlock before using the c_pages. To
961 * avoid race, we don't allow page free
962 */
963 for (i = 0; i < nr_pages; i++) {
964 nullb->cache_flush_pos = c_pages[i]->page->index;
965 /*
966 * We found the page which is being flushed to disk by other
967 * threads
968 */
Ming Lei66231ad2018-03-06 12:07:13 +0800969 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
Shaohua Lideb78b42017-08-14 15:04:59 -0700970 c_pages[i] = NULL;
971 else
Ming Lei66231ad2018-03-06 12:07:13 +0800972 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
Shaohua Lideb78b42017-08-14 15:04:59 -0700973 }
974
975 one_round = 0;
976 for (i = 0; i < nr_pages; i++) {
977 if (c_pages[i] == NULL)
978 continue;
979 err = null_flush_cache_page(nullb, c_pages[i]);
980 if (err)
981 return err;
982 one_round++;
983 }
984 flushed += one_round << PAGE_SHIFT;
985
986 if (n > flushed) {
987 if (nr_pages == 0)
988 nullb->cache_flush_pos = 0;
989 if (one_round == 0) {
990 /* give other threads a chance */
991 spin_unlock_irq(&nullb->lock);
992 spin_lock_irq(&nullb->lock);
993 }
994 goto again;
995 }
996 return 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700997}
998
999static int copy_to_nullb(struct nullb *nullb, struct page *source,
Shaohua Lideb78b42017-08-14 15:04:59 -07001000 unsigned int off, sector_t sector, size_t n, bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001001{
1002 size_t temp, count = 0;
1003 unsigned int offset;
1004 struct nullb_page *t_page;
1005 void *dst, *src;
1006
1007 while (count < n) {
1008 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1009
Shaohua Lideb78b42017-08-14 15:04:59 -07001010 if (null_cache_active(nullb) && !is_fua)
1011 null_make_cache_space(nullb, PAGE_SIZE);
1012
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001013 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -07001014 t_page = null_insert_page(nullb, sector,
1015 !null_cache_active(nullb) || is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001016 if (!t_page)
1017 return -ENOSPC;
1018
1019 src = kmap_atomic(source);
1020 dst = kmap_atomic(t_page->page);
1021 memcpy(dst + offset, src + off + count, temp);
1022 kunmap_atomic(dst);
1023 kunmap_atomic(src);
1024
Ming Lei66231ad2018-03-06 12:07:13 +08001025 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001026
Shaohua Lideb78b42017-08-14 15:04:59 -07001027 if (is_fua)
1028 null_free_sector(nullb, sector, true);
1029
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001030 count += temp;
1031 sector += temp >> SECTOR_SHIFT;
1032 }
1033 return 0;
1034}
1035
1036static int copy_from_nullb(struct nullb *nullb, struct page *dest,
1037 unsigned int off, sector_t sector, size_t n)
1038{
1039 size_t temp, count = 0;
1040 unsigned int offset;
1041 struct nullb_page *t_page;
1042 void *dst, *src;
1043
1044 while (count < n) {
1045 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1046
1047 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -07001048 t_page = null_lookup_page(nullb, sector, false,
1049 !null_cache_active(nullb));
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001050
1051 dst = kmap_atomic(dest);
1052 if (!t_page) {
1053 memset(dst + off + count, 0, temp);
1054 goto next;
1055 }
1056 src = kmap_atomic(t_page->page);
1057 memcpy(dst + off + count, src + offset, temp);
1058 kunmap_atomic(src);
1059next:
1060 kunmap_atomic(dst);
1061
1062 count += temp;
1063 sector += temp >> SECTOR_SHIFT;
1064 }
1065 return 0;
1066}
1067
Shaohua Li306eb6b2017-08-14 15:04:57 -07001068static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
1069{
1070 size_t temp;
1071
1072 spin_lock_irq(&nullb->lock);
1073 while (n > 0) {
1074 temp = min_t(size_t, n, nullb->dev->blocksize);
Shaohua Lideb78b42017-08-14 15:04:59 -07001075 null_free_sector(nullb, sector, false);
1076 if (null_cache_active(nullb))
1077 null_free_sector(nullb, sector, true);
Shaohua Li306eb6b2017-08-14 15:04:57 -07001078 sector += temp >> SECTOR_SHIFT;
1079 n -= temp;
1080 }
1081 spin_unlock_irq(&nullb->lock);
1082}
1083
Shaohua Lideb78b42017-08-14 15:04:59 -07001084static int null_handle_flush(struct nullb *nullb)
1085{
1086 int err;
1087
1088 if (!null_cache_active(nullb))
1089 return 0;
1090
1091 spin_lock_irq(&nullb->lock);
1092 while (true) {
1093 err = null_make_cache_space(nullb,
1094 nullb->dev->cache_size * 1024 * 1024);
1095 if (err || nullb->dev->curr_cache == 0)
1096 break;
1097 }
1098
1099 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1100 spin_unlock_irq(&nullb->lock);
1101 return err;
1102}
1103
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001104static int null_transfer(struct nullb *nullb, struct page *page,
Shaohua Lideb78b42017-08-14 15:04:59 -07001105 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1106 bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001107{
1108 int err = 0;
1109
1110 if (!is_write) {
1111 err = copy_from_nullb(nullb, page, off, sector, len);
1112 flush_dcache_page(page);
1113 } else {
1114 flush_dcache_page(page);
Shaohua Lideb78b42017-08-14 15:04:59 -07001115 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001116 }
1117
1118 return err;
1119}
1120
1121static int null_handle_rq(struct nullb_cmd *cmd)
1122{
1123 struct request *rq = cmd->rq;
1124 struct nullb *nullb = cmd->nq->dev->nullb;
1125 int err;
1126 unsigned int len;
1127 sector_t sector;
1128 struct req_iterator iter;
1129 struct bio_vec bvec;
1130
1131 sector = blk_rq_pos(rq);
1132
Shaohua Li306eb6b2017-08-14 15:04:57 -07001133 if (req_op(rq) == REQ_OP_DISCARD) {
1134 null_handle_discard(nullb, sector, blk_rq_bytes(rq));
1135 return 0;
1136 }
1137
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001138 spin_lock_irq(&nullb->lock);
1139 rq_for_each_segment(bvec, rq, iter) {
1140 len = bvec.bv_len;
1141 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001142 op_is_write(req_op(rq)), sector,
1143 req_op(rq) & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001144 if (err) {
1145 spin_unlock_irq(&nullb->lock);
1146 return err;
1147 }
1148 sector += len >> SECTOR_SHIFT;
1149 }
1150 spin_unlock_irq(&nullb->lock);
1151
1152 return 0;
1153}
1154
1155static int null_handle_bio(struct nullb_cmd *cmd)
1156{
1157 struct bio *bio = cmd->bio;
1158 struct nullb *nullb = cmd->nq->dev->nullb;
1159 int err;
1160 unsigned int len;
1161 sector_t sector;
1162 struct bio_vec bvec;
1163 struct bvec_iter iter;
1164
1165 sector = bio->bi_iter.bi_sector;
1166
Shaohua Li306eb6b2017-08-14 15:04:57 -07001167 if (bio_op(bio) == REQ_OP_DISCARD) {
1168 null_handle_discard(nullb, sector,
1169 bio_sectors(bio) << SECTOR_SHIFT);
1170 return 0;
1171 }
1172
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001173 spin_lock_irq(&nullb->lock);
1174 bio_for_each_segment(bvec, bio, iter) {
1175 len = bvec.bv_len;
1176 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001177 op_is_write(bio_op(bio)), sector,
1178 bio_op(bio) & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001179 if (err) {
1180 spin_unlock_irq(&nullb->lock);
1181 return err;
1182 }
1183 sector += len >> SECTOR_SHIFT;
1184 }
1185 spin_unlock_irq(&nullb->lock);
1186 return 0;
1187}
1188
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001189static void null_stop_queue(struct nullb *nullb)
1190{
1191 struct request_queue *q = nullb->q;
1192
1193 if (nullb->dev->queue_mode == NULL_Q_MQ)
1194 blk_mq_stop_hw_queues(q);
1195 else {
1196 spin_lock_irq(q->queue_lock);
1197 blk_stop_queue(q);
1198 spin_unlock_irq(q->queue_lock);
1199 }
1200}
1201
1202static void null_restart_queue_async(struct nullb *nullb)
1203{
1204 struct request_queue *q = nullb->q;
1205 unsigned long flags;
1206
1207 if (nullb->dev->queue_mode == NULL_Q_MQ)
1208 blk_mq_start_stopped_hw_queues(q, true);
1209 else {
1210 spin_lock_irqsave(q->queue_lock, flags);
1211 blk_start_queue_async(q);
1212 spin_unlock_irqrestore(q->queue_lock, flags);
1213 }
1214}
1215
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001216static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1217{
1218 struct nullb_device *dev = cmd->nq->dev;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001219 struct nullb *nullb = dev->nullb;
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001220 int err = 0;
1221
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001222 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1223 struct request *rq = cmd->rq;
1224
1225 if (!hrtimer_active(&nullb->bw_timer))
1226 hrtimer_restart(&nullb->bw_timer);
1227
1228 if (atomic_long_sub_return(blk_rq_bytes(rq),
1229 &nullb->cur_bytes) < 0) {
1230 null_stop_queue(nullb);
1231 /* race with timer */
1232 if (atomic_long_read(&nullb->cur_bytes) > 0)
1233 null_restart_queue_async(nullb);
1234 if (dev->queue_mode == NULL_Q_RQ) {
1235 struct request_queue *q = nullb->q;
1236
1237 spin_lock_irq(q->queue_lock);
1238 rq->rq_flags |= RQF_DONTPREP;
1239 blk_requeue_request(q, rq);
1240 spin_unlock_irq(q->queue_lock);
1241 return BLK_STS_OK;
1242 } else
1243 /* requeue request */
Ming Lei86ff7c22018-01-30 22:04:57 -05001244 return BLK_STS_DEV_RESOURCE;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001245 }
1246 }
1247
Shaohua Li2f54a612017-08-14 15:05:00 -07001248 if (nullb->dev->badblocks.shift != -1) {
1249 int bad_sectors;
1250 sector_t sector, size, first_bad;
1251 bool is_flush = true;
1252
1253 if (dev->queue_mode == NULL_Q_BIO &&
1254 bio_op(cmd->bio) != REQ_OP_FLUSH) {
1255 is_flush = false;
1256 sector = cmd->bio->bi_iter.bi_sector;
1257 size = bio_sectors(cmd->bio);
1258 }
1259 if (dev->queue_mode != NULL_Q_BIO &&
1260 req_op(cmd->rq) != REQ_OP_FLUSH) {
1261 is_flush = false;
1262 sector = blk_rq_pos(cmd->rq);
1263 size = blk_rq_sectors(cmd->rq);
1264 }
1265 if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
1266 size, &first_bad, &bad_sectors)) {
1267 cmd->error = BLK_STS_IOERR;
1268 goto out;
1269 }
1270 }
1271
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001272 if (dev->memory_backed) {
Shaohua Lideb78b42017-08-14 15:04:59 -07001273 if (dev->queue_mode == NULL_Q_BIO) {
1274 if (bio_op(cmd->bio) == REQ_OP_FLUSH)
1275 err = null_handle_flush(nullb);
1276 else
1277 err = null_handle_bio(cmd);
1278 } else {
1279 if (req_op(cmd->rq) == REQ_OP_FLUSH)
1280 err = null_handle_flush(nullb);
1281 else
1282 err = null_handle_rq(cmd);
1283 }
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001284 }
1285 cmd->error = errno_to_blk_status(err);
Shaohua Li2f54a612017-08-14 15:05:00 -07001286out:
Jens Axboef2298c02013-10-25 11:52:25 +01001287 /* Complete IO by inline, softirq or timer */
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001288 switch (dev->irqmode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001289 case NULL_IRQ_SOFTIRQ:
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001290 switch (dev->queue_mode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001291 case NULL_Q_MQ:
Christoph Hellwig08e00292017-04-20 16:03:09 +02001292 blk_mq_complete_request(cmd->rq);
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001293 break;
1294 case NULL_Q_RQ:
1295 blk_complete_request(cmd->rq);
1296 break;
1297 case NULL_Q_BIO:
1298 /*
1299 * XXX: no proper submitting cpu information available.
1300 */
1301 end_cmd(cmd);
1302 break;
1303 }
1304 break;
Jens Axboef2298c02013-10-25 11:52:25 +01001305 case NULL_IRQ_NONE:
1306 end_cmd(cmd);
1307 break;
Jens Axboef2298c02013-10-25 11:52:25 +01001308 case NULL_IRQ_TIMER:
1309 null_cmd_end_timer(cmd);
1310 break;
1311 }
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001312 return BLK_STS_OK;
Jens Axboef2298c02013-10-25 11:52:25 +01001313}
1314
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001315static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1316{
1317 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1318 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1319 unsigned int mbps = nullb->dev->mbps;
1320
1321 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1322 return HRTIMER_NORESTART;
1323
1324 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1325 null_restart_queue_async(nullb);
1326
1327 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1328
1329 return HRTIMER_RESTART;
1330}
1331
1332static void nullb_setup_bwtimer(struct nullb *nullb)
1333{
1334 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1335
1336 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1337 nullb->bw_timer.function = nullb_bwtimer_fn;
1338 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1339 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +01001340}
1341
1342static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1343{
1344 int index = 0;
1345
1346 if (nullb->nr_queues != 1)
1347 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1348
1349 return &nullb->queues[index];
1350}
1351
Jens Axboedece1632015-11-05 10:41:16 -07001352static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +01001353{
1354 struct nullb *nullb = q->queuedata;
1355 struct nullb_queue *nq = nullb_to_queue(nullb);
1356 struct nullb_cmd *cmd;
1357
1358 cmd = alloc_cmd(nq, 1);
1359 cmd->bio = bio;
1360
1361 null_handle_cmd(cmd);
Jens Axboedece1632015-11-05 10:41:16 -07001362 return BLK_QC_T_NONE;
Jens Axboef2298c02013-10-25 11:52:25 +01001363}
1364
Jens Axboe5448aca2018-01-09 12:47:24 -07001365static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
1366{
1367 pr_info("null: rq %p timed out\n", rq);
Christoph Hellwig0df0bb02018-05-29 15:52:33 +02001368 blk_mq_complete_request(rq);
1369 return BLK_EH_DONE;
Jens Axboe5448aca2018-01-09 12:47:24 -07001370}
1371
Jens Axboef2298c02013-10-25 11:52:25 +01001372static int null_rq_prep_fn(struct request_queue *q, struct request *req)
1373{
1374 struct nullb *nullb = q->queuedata;
1375 struct nullb_queue *nq = nullb_to_queue(nullb);
1376 struct nullb_cmd *cmd;
1377
1378 cmd = alloc_cmd(nq, 0);
1379 if (cmd) {
1380 cmd->rq = req;
1381 req->special = cmd;
1382 return BLKPREP_OK;
1383 }
Akinobu Mita8b70f452015-06-02 08:35:10 +09001384 blk_stop_queue(q);
Jens Axboef2298c02013-10-25 11:52:25 +01001385
1386 return BLKPREP_DEFER;
1387}
1388
Jens Axboe93b57042018-01-10 09:06:23 -07001389static bool should_timeout_request(struct request *rq)
1390{
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001391#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -07001392 if (g_timeout_str[0])
1393 return should_fail(&null_timeout_attr, 1);
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001394#endif
Jens Axboe24941b92018-02-28 09:18:57 -07001395 return false;
1396}
Jens Axboe93b57042018-01-10 09:06:23 -07001397
Jens Axboe24941b92018-02-28 09:18:57 -07001398static bool should_requeue_request(struct request *rq)
1399{
1400#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1401 if (g_requeue_str[0])
1402 return should_fail(&null_requeue_attr, 1);
1403#endif
Jens Axboe93b57042018-01-10 09:06:23 -07001404 return false;
1405}
1406
Jens Axboef2298c02013-10-25 11:52:25 +01001407static void null_request_fn(struct request_queue *q)
1408{
1409 struct request *rq;
1410
1411 while ((rq = blk_fetch_request(q)) != NULL) {
1412 struct nullb_cmd *cmd = rq->special;
1413
Jens Axboe24941b92018-02-28 09:18:57 -07001414 /* just ignore the request */
1415 if (should_timeout_request(rq))
1416 continue;
1417 if (should_requeue_request(rq)) {
1418 blk_requeue_request(q, rq);
1419 continue;
Jens Axboe93b57042018-01-10 09:06:23 -07001420 }
Jens Axboe24941b92018-02-28 09:18:57 -07001421
1422 spin_unlock_irq(q->queue_lock);
1423 null_handle_cmd(cmd);
1424 spin_lock_irq(q->queue_lock);
Jens Axboef2298c02013-10-25 11:52:25 +01001425 }
1426}
1427
Jens Axboe5448aca2018-01-09 12:47:24 -07001428static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
1429{
1430 pr_info("null: rq %p timed out\n", rq);
Christoph Hellwig0df0bb02018-05-29 15:52:33 +02001431 blk_mq_complete_request(rq);
1432 return BLK_EH_DONE;
Jens Axboe5448aca2018-01-09 12:47:24 -07001433}
1434
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001435static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
Jens Axboe74c45052014-10-29 11:14:52 -06001436 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +01001437{
Jens Axboe74c45052014-10-29 11:14:52 -06001438 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Shaohua Li2984c862017-08-14 15:04:52 -07001439 struct nullb_queue *nq = hctx->driver_data;
Jens Axboef2298c02013-10-25 11:52:25 +01001440
Jens Axboedb5bcf82017-03-30 13:44:26 -06001441 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1442
Shaohua Li2984c862017-08-14 15:04:52 -07001443 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +01001444 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1445 cmd->timer.function = null_cmd_timer_expired;
1446 }
Jens Axboe74c45052014-10-29 11:14:52 -06001447 cmd->rq = bd->rq;
Shaohua Li2984c862017-08-14 15:04:52 -07001448 cmd->nq = nq;
Jens Axboef2298c02013-10-25 11:52:25 +01001449
Jens Axboe74c45052014-10-29 11:14:52 -06001450 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -07001451
Jens Axboe24941b92018-02-28 09:18:57 -07001452 if (should_requeue_request(bd->rq)) {
1453 /*
1454 * Alternate between hitting the core BUSY path, and the
1455 * driver driven requeue path
1456 */
1457 nq->requeue_selection++;
1458 if (nq->requeue_selection & 1)
1459 return BLK_STS_RESOURCE;
1460 else {
1461 blk_mq_requeue_request(bd->rq, true);
1462 return BLK_STS_OK;
1463 }
1464 }
1465 if (should_timeout_request(bd->rq))
1466 return BLK_STS_OK;
Jens Axboe93b57042018-01-10 09:06:23 -07001467
Jens Axboe24941b92018-02-28 09:18:57 -07001468 return null_handle_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +01001469}
1470
Eric Biggersf363b082017-03-30 13:39:16 -07001471static const struct blk_mq_ops null_mq_ops = {
Jens Axboef2298c02013-10-25 11:52:25 +01001472 .queue_rq = null_queue_rq,
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001473 .complete = null_softirq_done_fn,
Jens Axboe5448aca2018-01-09 12:47:24 -07001474 .timeout = null_timeout_rq,
Jens Axboef2298c02013-10-25 11:52:25 +01001475};
1476
Matias Bjørlingde65d2d2015-08-31 14:17:18 +02001477static void cleanup_queue(struct nullb_queue *nq)
1478{
1479 kfree(nq->tag_map);
1480 kfree(nq->cmds);
1481}
1482
1483static void cleanup_queues(struct nullb *nullb)
1484{
1485 int i;
1486
1487 for (i = 0; i < nullb->nr_queues; i++)
1488 cleanup_queue(&nullb->queues[i]);
1489
1490 kfree(nullb->queues);
1491}
1492
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001493static void null_del_dev(struct nullb *nullb)
1494{
Shaohua Li2984c862017-08-14 15:04:52 -07001495 struct nullb_device *dev = nullb->dev;
1496
Shaohua Li94bc02e2017-08-14 15:04:55 -07001497 ida_simple_remove(&nullb_indexes, nullb->index);
1498
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001499 list_del_init(&nullb->list);
1500
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001501 del_gendisk(nullb->disk);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001502
1503 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1504 hrtimer_cancel(&nullb->bw_timer);
1505 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1506 null_restart_queue_async(nullb);
1507 }
1508
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001509 blk_cleanup_queue(nullb->q);
Shaohua Li2984c862017-08-14 15:04:52 -07001510 if (dev->queue_mode == NULL_Q_MQ &&
1511 nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001512 blk_mq_free_tag_set(nullb->tag_set);
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001513 put_disk(nullb->disk);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001514 cleanup_queues(nullb);
Shaohua Lideb78b42017-08-14 15:04:59 -07001515 if (null_cache_active(nullb))
1516 null_free_device_storage(nullb->dev, true);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001517 kfree(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001518 dev->nullb = NULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001519}
1520
Shaohua Li306eb6b2017-08-14 15:04:57 -07001521static void null_config_discard(struct nullb *nullb)
1522{
1523 if (nullb->dev->discard == false)
1524 return;
1525 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1526 nullb->q->limits.discard_alignment = nullb->dev->blocksize;
1527 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
Bart Van Assche8b904b52018-03-07 17:10:10 -08001528 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001529}
1530
Jens Axboef2298c02013-10-25 11:52:25 +01001531static int null_open(struct block_device *bdev, fmode_t mode)
1532{
1533 return 0;
1534}
1535
1536static void null_release(struct gendisk *disk, fmode_t mode)
1537{
1538}
1539
1540static const struct block_device_operations null_fops = {
1541 .owner = THIS_MODULE,
1542 .open = null_open,
1543 .release = null_release,
1544};
1545
Jens Axboe82f402f2017-06-20 14:22:01 -06001546static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1547{
1548 BUG_ON(!nullb);
1549 BUG_ON(!nq);
1550
1551 init_waitqueue_head(&nq->wait);
1552 nq->queue_depth = nullb->queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -07001553 nq->dev = nullb->dev;
Jens Axboe82f402f2017-06-20 14:22:01 -06001554}
1555
1556static void null_init_queues(struct nullb *nullb)
1557{
1558 struct request_queue *q = nullb->q;
1559 struct blk_mq_hw_ctx *hctx;
1560 struct nullb_queue *nq;
1561 int i;
1562
1563 queue_for_each_hw_ctx(q, hctx, i) {
1564 if (!hctx->nr_ctx || !hctx->tags)
1565 continue;
1566 nq = &nullb->queues[i];
1567 hctx->driver_data = nq;
1568 null_init_queue(nullb, nq);
1569 nullb->nr_queues++;
1570 }
1571}
1572
Jens Axboef2298c02013-10-25 11:52:25 +01001573static int setup_commands(struct nullb_queue *nq)
1574{
1575 struct nullb_cmd *cmd;
1576 int i, tag_size;
1577
1578 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
1579 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001580 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001581
1582 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
1583 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
1584 if (!nq->tag_map) {
1585 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +01001586 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001587 }
1588
1589 for (i = 0; i < nq->queue_depth; i++) {
1590 cmd = &nq->cmds[i];
1591 INIT_LIST_HEAD(&cmd->list);
1592 cmd->ll_list.next = NULL;
1593 cmd->tag = -1U;
1594 }
1595
1596 return 0;
1597}
1598
Jens Axboef2298c02013-10-25 11:52:25 +01001599static int setup_queues(struct nullb *nullb)
1600{
Shaohua Li2984c862017-08-14 15:04:52 -07001601 nullb->queues = kzalloc(nullb->dev->submit_queues *
1602 sizeof(struct nullb_queue), GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001603 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001604 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001605
1606 nullb->nr_queues = 0;
Shaohua Li2984c862017-08-14 15:04:52 -07001607 nullb->queue_depth = nullb->dev->hw_queue_depth;
Jens Axboef2298c02013-10-25 11:52:25 +01001608
Matias Bjorling2d263a782013-12-18 13:41:43 +01001609 return 0;
1610}
1611
1612static int init_driver_queues(struct nullb *nullb)
1613{
1614 struct nullb_queue *nq;
1615 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001616
Shaohua Li2984c862017-08-14 15:04:52 -07001617 for (i = 0; i < nullb->dev->submit_queues; i++) {
Jens Axboef2298c02013-10-25 11:52:25 +01001618 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +01001619
1620 null_init_queue(nullb, nq);
1621
1622 ret = setup_commands(nq);
1623 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +02001624 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01001625 nullb->nr_queues++;
1626 }
Matias Bjorling2d263a782013-12-18 13:41:43 +01001627 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001628}
1629
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001630static int null_gendisk_register(struct nullb *nullb)
Jens Axboef2298c02013-10-25 11:52:25 +01001631{
1632 struct gendisk *disk;
Jens Axboef2298c02013-10-25 11:52:25 +01001633 sector_t size;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001634
Shaohua Li2984c862017-08-14 15:04:52 -07001635 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001636 if (!disk)
1637 return -ENOMEM;
Shaohua Li2984c862017-08-14 15:04:52 -07001638 size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001639 set_capacity(disk, size >> 9);
1640
1641 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
1642 disk->major = null_major;
1643 disk->first_minor = nullb->index;
1644 disk->fops = &null_fops;
1645 disk->private_data = nullb;
1646 disk->queue = nullb->q;
1647 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1648
1649 add_disk(disk);
1650 return 0;
1651}
1652
Shaohua Li2984c862017-08-14 15:04:52 -07001653static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001654{
1655 set->ops = &null_mq_ops;
Shaohua Li2984c862017-08-14 15:04:52 -07001656 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
1657 g_submit_queues;
1658 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
1659 g_hw_queue_depth;
1660 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
Jens Axboe82f402f2017-06-20 14:22:01 -06001661 set->cmd_size = sizeof(struct nullb_cmd);
1662 set->flags = BLK_MQ_F_SHOULD_MERGE;
weiping zhangb3cffc32017-09-30 09:49:21 +08001663 if (g_no_sched)
1664 set->flags |= BLK_MQ_F_NO_SCHED;
Jens Axboe82f402f2017-06-20 14:22:01 -06001665 set->driver_data = NULL;
1666
Shaohua Li0d06a422017-08-25 13:46:25 -07001667 if ((nullb && nullb->dev->blocking) || g_blocking)
Jens Axboe82f402f2017-06-20 14:22:01 -06001668 set->flags |= BLK_MQ_F_BLOCKING;
1669
1670 return blk_mq_alloc_tag_set(set);
1671}
1672
Shaohua Licedcafa2017-08-14 15:04:54 -07001673static void null_validate_conf(struct nullb_device *dev)
1674{
1675 dev->blocksize = round_down(dev->blocksize, 512);
1676 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
Shaohua Licedcafa2017-08-14 15:04:54 -07001677
1678 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
1679 if (dev->submit_queues != nr_online_nodes)
1680 dev->submit_queues = nr_online_nodes;
1681 } else if (dev->submit_queues > nr_cpu_ids)
1682 dev->submit_queues = nr_cpu_ids;
1683 else if (dev->submit_queues == 0)
1684 dev->submit_queues = 1;
1685
1686 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
1687 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001688
1689 /* Do memory allocation, so set blocking */
1690 if (dev->memory_backed)
1691 dev->blocking = true;
Shaohua Lideb78b42017-08-14 15:04:59 -07001692 else /* cache is meaningless */
1693 dev->cache_size = 0;
1694 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1695 dev->cache_size);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001696 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1697 /* can not stop a queue */
1698 if (dev->queue_mode == NULL_Q_BIO)
1699 dev->mbps = 0;
Shaohua Licedcafa2017-08-14 15:04:54 -07001700}
1701
Jens Axboe24941b92018-02-28 09:18:57 -07001702#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1703static bool __null_setup_fault(struct fault_attr *attr, char *str)
1704{
1705 if (!str[0])
1706 return true;
1707
1708 if (!setup_fault_attr(attr, str))
1709 return false;
1710
1711 attr->verbose = 0;
1712 return true;
1713}
1714#endif
1715
Jens Axboe93b57042018-01-10 09:06:23 -07001716static bool null_setup_fault(void)
1717{
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001718#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe24941b92018-02-28 09:18:57 -07001719 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
Jens Axboe93b57042018-01-10 09:06:23 -07001720 return false;
Jens Axboe24941b92018-02-28 09:18:57 -07001721 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
1722 return false;
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001723#endif
Jens Axboe93b57042018-01-10 09:06:23 -07001724 return true;
1725}
1726
Shaohua Li2984c862017-08-14 15:04:52 -07001727static int null_add_dev(struct nullb_device *dev)
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001728{
1729 struct nullb *nullb;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001730 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +01001731
Shaohua Licedcafa2017-08-14 15:04:54 -07001732 null_validate_conf(dev);
1733
Shaohua Li2984c862017-08-14 15:04:52 -07001734 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001735 if (!nullb) {
1736 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001737 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001738 }
Shaohua Li2984c862017-08-14 15:04:52 -07001739 nullb->dev = dev;
1740 dev->nullb = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01001741
1742 spin_lock_init(&nullb->lock);
1743
Robert Elliottdc501dc2014-09-02 11:38:49 -05001744 rv = setup_queues(nullb);
1745 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001746 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01001747
Shaohua Li2984c862017-08-14 15:04:52 -07001748 if (dev->queue_mode == NULL_Q_MQ) {
Jens Axboe82f402f2017-06-20 14:22:01 -06001749 if (shared_tags) {
1750 nullb->tag_set = &tag_set;
1751 rv = 0;
1752 } else {
1753 nullb->tag_set = &nullb->__tag_set;
Shaohua Li2984c862017-08-14 15:04:52 -07001754 rv = null_init_tag_set(nullb, nullb->tag_set);
Jens Axboe82f402f2017-06-20 14:22:01 -06001755 }
Jens Axboef2298c02013-10-25 11:52:25 +01001756
Robert Elliottdc501dc2014-09-02 11:38:49 -05001757 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001758 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +01001759
Jens Axboe93b57042018-01-10 09:06:23 -07001760 if (!null_setup_fault())
1761 goto out_cleanup_queues;
1762
Jens Axboe5448aca2018-01-09 12:47:24 -07001763 nullb->tag_set->timeout = 5 * HZ;
Jens Axboe82f402f2017-06-20 14:22:01 -06001764 nullb->q = blk_mq_init_queue(nullb->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +00001765 if (IS_ERR(nullb->q)) {
Robert Elliottdc501dc2014-09-02 11:38:49 -05001766 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001767 goto out_cleanup_tags;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001768 }
Jens Axboe82f402f2017-06-20 14:22:01 -06001769 null_init_queues(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001770 } else if (dev->queue_mode == NULL_Q_BIO) {
Bart Van Assche5ee05242018-02-28 10:15:31 -08001771 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node,
1772 NULL);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001773 if (!nullb->q) {
1774 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001775 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001776 }
Jens Axboef2298c02013-10-25 11:52:25 +01001777 blk_queue_make_request(nullb->q, null_queue_bio);
Jan Kara31f96902014-10-22 15:34:21 +02001778 rv = init_driver_queues(nullb);
1779 if (rv)
1780 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001781 } else {
Shaohua Li2984c862017-08-14 15:04:52 -07001782 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock,
1783 dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001784 if (!nullb->q) {
1785 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001786 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001787 }
Jens Axboe93b57042018-01-10 09:06:23 -07001788
1789 if (!null_setup_fault())
1790 goto out_cleanup_blk_queue;
1791
Jens Axboef2298c02013-10-25 11:52:25 +01001792 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001793 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
Jens Axboe5448aca2018-01-09 12:47:24 -07001794 blk_queue_rq_timed_out(nullb->q, null_rq_timed_out_fn);
1795 nullb->q->rq_timeout = 5 * HZ;
Jan Kara31f96902014-10-22 15:34:21 +02001796 rv = init_driver_queues(nullb);
1797 if (rv)
1798 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001799 }
1800
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001801 if (dev->mbps) {
1802 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1803 nullb_setup_bwtimer(nullb);
1804 }
1805
Shaohua Lideb78b42017-08-14 15:04:59 -07001806 if (dev->cache_size > 0) {
1807 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1808 blk_queue_write_cache(nullb->q, true, true);
1809 blk_queue_flush_queueable(nullb->q, true);
1810 }
1811
Jens Axboef2298c02013-10-25 11:52:25 +01001812 nullb->q->queuedata = nullb;
Bart Van Assche8b904b52018-03-07 17:10:10 -08001813 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
1814 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +01001815
Jens Axboef2298c02013-10-25 11:52:25 +01001816 mutex_lock(&lock);
Shaohua Li94bc02e2017-08-14 15:04:55 -07001817 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
Shaohua Licedcafa2017-08-14 15:04:54 -07001818 dev->index = nullb->index;
Jens Axboef2298c02013-10-25 11:52:25 +01001819 mutex_unlock(&lock);
1820
Shaohua Li2984c862017-08-14 15:04:52 -07001821 blk_queue_logical_block_size(nullb->q, dev->blocksize);
1822 blk_queue_physical_block_size(nullb->q, dev->blocksize);
Jens Axboef2298c02013-10-25 11:52:25 +01001823
Shaohua Li306eb6b2017-08-14 15:04:57 -07001824 null_config_discard(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +01001825
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001826 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1827
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001828 rv = null_gendisk_register(nullb);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001829 if (rv)
1830 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001831
Matias Bjørlinga5143792016-02-11 14:49:13 +01001832 mutex_lock(&lock);
1833 list_add_tail(&nullb->list, &nullb_list);
1834 mutex_unlock(&lock);
Wenwei Tao3681c852016-03-05 00:27:04 +08001835
Jens Axboef2298c02013-10-25 11:52:25 +01001836 return 0;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001837out_cleanup_blk_queue:
1838 blk_cleanup_queue(nullb->q);
1839out_cleanup_tags:
Shaohua Li2984c862017-08-14 15:04:52 -07001840 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001841 blk_mq_free_tag_set(nullb->tag_set);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001842out_cleanup_queues:
1843 cleanup_queues(nullb);
1844out_free_nullb:
1845 kfree(nullb);
1846out:
Robert Elliottdc501dc2014-09-02 11:38:49 -05001847 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +01001848}
1849
1850static int __init null_init(void)
1851{
Minfei Huangaf096e22015-12-08 13:47:34 -07001852 int ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001853 unsigned int i;
Minfei Huangaf096e22015-12-08 13:47:34 -07001854 struct nullb *nullb;
Shaohua Li2984c862017-08-14 15:04:52 -07001855 struct nullb_device *dev;
Jens Axboef2298c02013-10-25 11:52:25 +01001856
Shaohua Li2984c862017-08-14 15:04:52 -07001857 if (g_bs > PAGE_SIZE) {
Raghavendra K T9967d8a2014-01-21 16:59:59 +05301858 pr_warn("null_blk: invalid block size\n");
1859 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
Shaohua Li2984c862017-08-14 15:04:52 -07001860 g_bs = PAGE_SIZE;
Raghavendra K T9967d8a2014-01-21 16:59:59 +05301861 }
Jens Axboef2298c02013-10-25 11:52:25 +01001862
Shaohua Li2984c862017-08-14 15:04:52 -07001863 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
1864 if (g_submit_queues != nr_online_nodes) {
weiping zhang558ab3002017-08-03 00:26:39 +08001865 pr_warn("null_blk: submit_queues param is set to %u.\n",
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +01001866 nr_online_nodes);
Shaohua Li2984c862017-08-14 15:04:52 -07001867 g_submit_queues = nr_online_nodes;
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01001868 }
Shaohua Li2984c862017-08-14 15:04:52 -07001869 } else if (g_submit_queues > nr_cpu_ids)
1870 g_submit_queues = nr_cpu_ids;
1871 else if (g_submit_queues <= 0)
1872 g_submit_queues = 1;
Jens Axboef2298c02013-10-25 11:52:25 +01001873
Shaohua Li2984c862017-08-14 15:04:52 -07001874 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
1875 ret = null_init_tag_set(NULL, &tag_set);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001876 if (ret)
1877 return ret;
1878 }
1879
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001880 config_group_init(&nullb_subsys.su_group);
1881 mutex_init(&nullb_subsys.su_mutex);
1882
1883 ret = configfs_register_subsystem(&nullb_subsys);
1884 if (ret)
1885 goto err_tagset;
1886
Jens Axboef2298c02013-10-25 11:52:25 +01001887 mutex_init(&lock);
1888
Jens Axboef2298c02013-10-25 11:52:25 +01001889 null_major = register_blkdev(0, "nullb");
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001890 if (null_major < 0) {
1891 ret = null_major;
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001892 goto err_conf;
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001893 }
Jens Axboef2298c02013-10-25 11:52:25 +01001894
Minfei Huangaf096e22015-12-08 13:47:34 -07001895 for (i = 0; i < nr_devices; i++) {
Shaohua Li2984c862017-08-14 15:04:52 -07001896 dev = null_alloc_dev();
Wei Yongjun30c516d2017-10-17 12:11:46 +00001897 if (!dev) {
1898 ret = -ENOMEM;
Minfei Huangaf096e22015-12-08 13:47:34 -07001899 goto err_dev;
Wei Yongjun30c516d2017-10-17 12:11:46 +00001900 }
Shaohua Li2984c862017-08-14 15:04:52 -07001901 ret = null_add_dev(dev);
1902 if (ret) {
1903 null_free_dev(dev);
1904 goto err_dev;
1905 }
Minfei Huangaf096e22015-12-08 13:47:34 -07001906 }
1907
Jens Axboef2298c02013-10-25 11:52:25 +01001908 pr_info("null: module loaded\n");
1909 return 0;
Minfei Huangaf096e22015-12-08 13:47:34 -07001910
1911err_dev:
1912 while (!list_empty(&nullb_list)) {
1913 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07001914 dev = nullb->dev;
Minfei Huangaf096e22015-12-08 13:47:34 -07001915 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001916 null_free_dev(dev);
Minfei Huangaf096e22015-12-08 13:47:34 -07001917 }
Minfei Huangaf096e22015-12-08 13:47:34 -07001918 unregister_blkdev(null_major, "nullb");
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001919err_conf:
1920 configfs_unregister_subsystem(&nullb_subsys);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001921err_tagset:
Shaohua Li2984c862017-08-14 15:04:52 -07001922 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001923 blk_mq_free_tag_set(&tag_set);
Minfei Huangaf096e22015-12-08 13:47:34 -07001924 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01001925}
1926
1927static void __exit null_exit(void)
1928{
1929 struct nullb *nullb;
1930
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001931 configfs_unregister_subsystem(&nullb_subsys);
1932
Jens Axboef2298c02013-10-25 11:52:25 +01001933 unregister_blkdev(null_major, "nullb");
1934
1935 mutex_lock(&lock);
1936 while (!list_empty(&nullb_list)) {
Shaohua Li2984c862017-08-14 15:04:52 -07001937 struct nullb_device *dev;
1938
Jens Axboef2298c02013-10-25 11:52:25 +01001939 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07001940 dev = nullb->dev;
Jens Axboef2298c02013-10-25 11:52:25 +01001941 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001942 null_free_dev(dev);
Jens Axboef2298c02013-10-25 11:52:25 +01001943 }
1944 mutex_unlock(&lock);
Matias Bjørling6bb95352015-11-19 12:50:08 +01001945
Shaohua Li2984c862017-08-14 15:04:52 -07001946 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Jens Axboe82f402f2017-06-20 14:22:01 -06001947 blk_mq_free_tag_set(&tag_set);
Jens Axboef2298c02013-10-25 11:52:25 +01001948}
1949
1950module_init(null_init);
1951module_exit(null_exit);
1952
Jens Axboe231b3db2017-08-25 12:53:15 -06001953MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
Jens Axboef2298c02013-10-25 11:52:25 +01001954MODULE_LICENSE("GPL");