blob: ea7a4d6b78487f4f4471e774d016e94d2faea0ae [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Shaohua Li3bf2bd22017-08-14 15:04:53 -07002/*
3 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
4 * Shaohua Li <shli@fb.com>
5 */
Jens Axboef2298c02013-10-25 11:52:25 +01006#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01007
Jens Axboef2298c02013-10-25 11:52:25 +01008#include <linux/moduleparam.h>
9#include <linux/sched.h>
10#include <linux/fs.h>
Jens Axboef2298c02013-10-25 11:52:25 +010011#include <linux/init.h>
Matias Bjørling6dad38d2018-07-06 19:38:38 +020012#include "null_blk.h"
Jens Axboef2298c02013-10-25 11:52:25 +010013
Shaohua Li5bcd0e02017-08-14 15:04:56 -070014#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
15#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070016#define SECTOR_MASK (PAGE_SECTORS - 1)
17
18#define FREE_BATCH 16
19
Shaohua Lieff2c4f2017-08-14 15:04:58 -070020#define TICKS_PER_SEC 50ULL
21#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
22
Arnd Bergmann33f782c2018-01-11 11:31:25 +010023#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -070024static DECLARE_FAULT_ATTR(null_timeout_attr);
Jens Axboe24941b92018-02-28 09:18:57 -070025static DECLARE_FAULT_ATTR(null_requeue_attr);
Arnd Bergmann33f782c2018-01-11 11:31:25 +010026#endif
Jens Axboe93b57042018-01-10 09:06:23 -070027
Shaohua Lieff2c4f2017-08-14 15:04:58 -070028static inline u64 mb_per_tick(int mbps)
29{
30 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
31}
Jens Axboef2298c02013-10-25 11:52:25 +010032
Shaohua Li3bf2bd22017-08-14 15:04:53 -070033/*
34 * Status flags for nullb_device.
35 *
36 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
37 * UP: Device is currently on and visible in userspace.
Shaohua Lieff2c4f2017-08-14 15:04:58 -070038 * THROTTLED: Device is being throttled.
Shaohua Lideb78b42017-08-14 15:04:59 -070039 * CACHE: Device is using a write-back cache.
Shaohua Li3bf2bd22017-08-14 15:04:53 -070040 */
41enum nullb_device_flags {
42 NULLB_DEV_FL_CONFIGURED = 0,
43 NULLB_DEV_FL_UP = 1,
Shaohua Lieff2c4f2017-08-14 15:04:58 -070044 NULLB_DEV_FL_THROTTLED = 2,
Shaohua Lideb78b42017-08-14 15:04:59 -070045 NULLB_DEV_FL_CACHE = 3,
Shaohua Li3bf2bd22017-08-14 15:04:53 -070046};
47
Ming Lei66231ad2018-03-06 12:07:13 +080048#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070049/*
50 * nullb_page is a page in memory for nullb devices.
51 *
52 * @page: The page holding the data.
53 * @bitmap: The bitmap represents which sector in the page has data.
54 * Each bit represents one block size. For example, sector 8
55 * will use the 7th bit
Shaohua Lideb78b42017-08-14 15:04:59 -070056 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
57 * page is being flushing to storage. FREE means the cache page is freed and
58 * should be skipped from flushing to storage. Please see
59 * null_make_cache_space
Shaohua Li5bcd0e02017-08-14 15:04:56 -070060 */
61struct nullb_page {
62 struct page *page;
Ming Lei66231ad2018-03-06 12:07:13 +080063 DECLARE_BITMAP(bitmap, MAP_SZ);
Shaohua Li5bcd0e02017-08-14 15:04:56 -070064};
Ming Lei66231ad2018-03-06 12:07:13 +080065#define NULLB_PAGE_LOCK (MAP_SZ - 1)
66#define NULLB_PAGE_FREE (MAP_SZ - 2)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070067
Jens Axboef2298c02013-10-25 11:52:25 +010068static LIST_HEAD(nullb_list);
69static struct mutex lock;
70static int null_major;
Shaohua Li94bc02e2017-08-14 15:04:55 -070071static DEFINE_IDA(nullb_indexes);
Jens Axboe82f402f2017-06-20 14:22:01 -060072static struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +010073
Jens Axboef2298c02013-10-25 11:52:25 +010074enum {
75 NULL_IRQ_NONE = 0,
76 NULL_IRQ_SOFTIRQ = 1,
77 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -080078};
Jens Axboef2298c02013-10-25 11:52:25 +010079
Christoph Hellwigce2c3502014-02-10 03:24:40 -080080enum {
Jens Axboef2298c02013-10-25 11:52:25 +010081 NULL_Q_BIO = 0,
82 NULL_Q_RQ = 1,
83 NULL_Q_MQ = 2,
84};
85
weiping zhangb3cffc32017-09-30 09:49:21 +080086static int g_no_sched;
Joe Perches5657a812018-05-24 13:38:59 -060087module_param_named(no_sched, g_no_sched, int, 0444);
weiping zhangb3cffc32017-09-30 09:49:21 +080088MODULE_PARM_DESC(no_sched, "No io scheduler");
89
Shaohua Li2984c862017-08-14 15:04:52 -070090static int g_submit_queues = 1;
Joe Perches5657a812018-05-24 13:38:59 -060091module_param_named(submit_queues, g_submit_queues, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +010092MODULE_PARM_DESC(submit_queues, "Number of submission queues");
93
Shaohua Li2984c862017-08-14 15:04:52 -070094static int g_home_node = NUMA_NO_NODE;
Joe Perches5657a812018-05-24 13:38:59 -060095module_param_named(home_node, g_home_node, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +010096MODULE_PARM_DESC(home_node, "Home node for the device");
97
Arnd Bergmann33f782c2018-01-11 11:31:25 +010098#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -070099static char g_timeout_str[80];
Joe Perches5657a812018-05-24 13:38:59 -0600100module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
Jens Axboe24941b92018-02-28 09:18:57 -0700101
102static char g_requeue_str[80];
Joe Perches5657a812018-05-24 13:38:59 -0600103module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
Arnd Bergmann33f782c2018-01-11 11:31:25 +0100104#endif
Jens Axboe93b57042018-01-10 09:06:23 -0700105
Shaohua Li2984c862017-08-14 15:04:52 -0700106static int g_queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700107
108static int null_param_store_val(const char *str, int *val, int min, int max)
109{
110 int ret, new_val;
111
112 ret = kstrtoint(str, 10, &new_val);
113 if (ret)
114 return -EINVAL;
115
116 if (new_val < min || new_val > max)
117 return -EINVAL;
118
119 *val = new_val;
120 return 0;
121}
122
123static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
124{
Shaohua Li2984c862017-08-14 15:04:52 -0700125 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
Matias Bjorling709c8662014-11-26 14:45:48 -0700126}
127
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930128static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700129 .set = null_set_queue_mode,
130 .get = param_get_int,
131};
132
Joe Perches5657a812018-05-24 13:38:59 -0600133device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400134MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100135
Shaohua Li2984c862017-08-14 15:04:52 -0700136static int g_gb = 250;
Joe Perches5657a812018-05-24 13:38:59 -0600137module_param_named(gb, g_gb, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100138MODULE_PARM_DESC(gb, "Size in GB");
139
Shaohua Li2984c862017-08-14 15:04:52 -0700140static int g_bs = 512;
Joe Perches5657a812018-05-24 13:38:59 -0600141module_param_named(bs, g_bs, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100142MODULE_PARM_DESC(bs, "Block size (in bytes)");
143
André Almeidaf7c4ce82019-09-11 11:46:36 -0300144static unsigned int nr_devices = 1;
André Almeida701dfc42019-09-16 11:07:58 -0300145module_param(nr_devices, uint, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100146MODULE_PARM_DESC(nr_devices, "Number of devices to register");
147
Shaohua Li2984c862017-08-14 15:04:52 -0700148static bool g_blocking;
Joe Perches5657a812018-05-24 13:38:59 -0600149module_param_named(blocking, g_blocking, bool, 0444);
Jens Axboedb5bcf82017-03-30 13:44:26 -0600150MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
151
Jens Axboe82f402f2017-06-20 14:22:01 -0600152static bool shared_tags;
Joe Perches5657a812018-05-24 13:38:59 -0600153module_param(shared_tags, bool, 0444);
Jens Axboe82f402f2017-06-20 14:22:01 -0600154MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
155
Shaohua Li2984c862017-08-14 15:04:52 -0700156static int g_irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700157
158static int null_set_irqmode(const char *str, const struct kernel_param *kp)
159{
Shaohua Li2984c862017-08-14 15:04:52 -0700160 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
Matias Bjorling709c8662014-11-26 14:45:48 -0700161 NULL_IRQ_TIMER);
162}
163
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930164static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700165 .set = null_set_irqmode,
166 .get = param_get_int,
167};
168
Joe Perches5657a812018-05-24 13:38:59 -0600169device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100170MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
171
Shaohua Li2984c862017-08-14 15:04:52 -0700172static unsigned long g_completion_nsec = 10000;
Joe Perches5657a812018-05-24 13:38:59 -0600173module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100174MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
175
Shaohua Li2984c862017-08-14 15:04:52 -0700176static int g_hw_queue_depth = 64;
Joe Perches5657a812018-05-24 13:38:59 -0600177module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100178MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
179
Shaohua Li2984c862017-08-14 15:04:52 -0700180static bool g_use_per_node_hctx;
Joe Perches5657a812018-05-24 13:38:59 -0600181module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
Matias Bjørling20005242013-12-21 00:11:00 +0100182MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100183
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200184static bool g_zoned;
185module_param_named(zoned, g_zoned, bool, S_IRUGO);
186MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
187
188static unsigned long g_zone_size = 256;
189module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
190MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
191
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900192static unsigned int g_zone_nr_conv;
193module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
194MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
195
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700196static struct nullb_device *null_alloc_dev(void);
197static void null_free_dev(struct nullb_device *dev);
Shaohua Licedcafa2017-08-14 15:04:54 -0700198static void null_del_dev(struct nullb *nullb);
199static int null_add_dev(struct nullb_device *dev);
Shaohua Lideb78b42017-08-14 15:04:59 -0700200static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700201
202static inline struct nullb_device *to_nullb_device(struct config_item *item)
203{
204 return item ? container_of(item, struct nullb_device, item) : NULL;
205}
206
207static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
208{
209 return snprintf(page, PAGE_SIZE, "%u\n", val);
210}
211
212static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
213 char *page)
214{
215 return snprintf(page, PAGE_SIZE, "%lu\n", val);
216}
217
218static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
219{
220 return snprintf(page, PAGE_SIZE, "%u\n", val);
221}
222
223static ssize_t nullb_device_uint_attr_store(unsigned int *val,
224 const char *page, size_t count)
225{
226 unsigned int tmp;
227 int result;
228
229 result = kstrtouint(page, 0, &tmp);
Bart Van Assche45919fb2019-09-30 16:00:47 -0700230 if (result < 0)
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700231 return result;
232
233 *val = tmp;
234 return count;
235}
236
237static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
238 const char *page, size_t count)
239{
240 int result;
241 unsigned long tmp;
242
243 result = kstrtoul(page, 0, &tmp);
Bart Van Assche45919fb2019-09-30 16:00:47 -0700244 if (result < 0)
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700245 return result;
246
247 *val = tmp;
248 return count;
249}
250
251static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
252 size_t count)
253{
254 bool tmp;
255 int result;
256
257 result = kstrtobool(page, &tmp);
Bart Van Assche45919fb2019-09-30 16:00:47 -0700258 if (result < 0)
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700259 return result;
260
261 *val = tmp;
262 return count;
263}
264
265/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
Bart Van Assche45919fb2019-09-30 16:00:47 -0700266#define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700267static ssize_t \
268nullb_device_##NAME##_show(struct config_item *item, char *page) \
269{ \
270 return nullb_device_##TYPE##_attr_show( \
271 to_nullb_device(item)->NAME, page); \
272} \
273static ssize_t \
274nullb_device_##NAME##_store(struct config_item *item, const char *page, \
275 size_t count) \
276{ \
Bart Van Assche45919fb2019-09-30 16:00:47 -0700277 int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY; \
Bart Van Asschee29158b2019-09-30 16:00:46 -0700278 struct nullb_device *dev = to_nullb_device(item); \
Bart Van Assche45919fb2019-09-30 16:00:47 -0700279 TYPE new_value; \
280 int ret; \
Bart Van Asschee29158b2019-09-30 16:00:46 -0700281 \
Bart Van Assche45919fb2019-09-30 16:00:47 -0700282 ret = nullb_device_##TYPE##_attr_store(&new_value, page, count); \
283 if (ret < 0) \
284 return ret; \
285 if (apply_fn) \
286 ret = apply_fn(dev, new_value); \
287 else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
288 ret = -EBUSY; \
289 if (ret < 0) \
290 return ret; \
291 dev->NAME = new_value; \
292 return count; \
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700293} \
294CONFIGFS_ATTR(nullb_device_, NAME);
295
Bart Van Assche45919fb2019-09-30 16:00:47 -0700296static int nullb_apply_submit_queues(struct nullb_device *dev,
297 unsigned int submit_queues)
298{
299 struct nullb *nullb = dev->nullb;
300 struct blk_mq_tag_set *set;
301
302 if (!nullb)
303 return 0;
304
305 set = nullb->tag_set;
306 blk_mq_update_nr_hw_queues(set, submit_queues);
307 return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM;
308}
309
310NULLB_DEVICE_ATTR(size, ulong, NULL);
311NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
312NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
313NULLB_DEVICE_ATTR(home_node, uint, NULL);
314NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
315NULLB_DEVICE_ATTR(blocksize, uint, NULL);
316NULLB_DEVICE_ATTR(irqmode, uint, NULL);
317NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
318NULLB_DEVICE_ATTR(index, uint, NULL);
319NULLB_DEVICE_ATTR(blocking, bool, NULL);
320NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
321NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
322NULLB_DEVICE_ATTR(discard, bool, NULL);
323NULLB_DEVICE_ATTR(mbps, uint, NULL);
324NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
325NULLB_DEVICE_ATTR(zoned, bool, NULL);
326NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
327NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700328
Shaohua Licedcafa2017-08-14 15:04:54 -0700329static ssize_t nullb_device_power_show(struct config_item *item, char *page)
330{
331 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
332}
333
334static ssize_t nullb_device_power_store(struct config_item *item,
335 const char *page, size_t count)
336{
337 struct nullb_device *dev = to_nullb_device(item);
338 bool newp = false;
339 ssize_t ret;
340
341 ret = nullb_device_bool_attr_store(&newp, page, count);
342 if (ret < 0)
343 return ret;
344
345 if (!dev->power && newp) {
346 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
347 return count;
348 if (null_add_dev(dev)) {
349 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
350 return -ENOMEM;
351 }
352
353 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
354 dev->power = newp;
Jens Axboeb3c30512017-08-28 15:06:31 -0600355 } else if (dev->power && !newp) {
Bob Liu76028432019-06-15 01:43:48 -0600356 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
357 mutex_lock(&lock);
358 dev->power = newp;
359 null_del_dev(dev->nullb);
360 mutex_unlock(&lock);
361 }
Liu Bo00a8cdb2018-07-06 03:07:13 +0800362 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
Shaohua Licedcafa2017-08-14 15:04:54 -0700363 }
364
365 return count;
366}
367
368CONFIGFS_ATTR(nullb_device_, power);
369
Shaohua Li2f54a612017-08-14 15:05:00 -0700370static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
371{
372 struct nullb_device *t_dev = to_nullb_device(item);
373
374 return badblocks_show(&t_dev->badblocks, page, 0);
375}
376
377static ssize_t nullb_device_badblocks_store(struct config_item *item,
378 const char *page, size_t count)
379{
380 struct nullb_device *t_dev = to_nullb_device(item);
381 char *orig, *buf, *tmp;
382 u64 start, end;
383 int ret;
384
385 orig = kstrndup(page, count, GFP_KERNEL);
386 if (!orig)
387 return -ENOMEM;
388
389 buf = strstrip(orig);
390
391 ret = -EINVAL;
392 if (buf[0] != '+' && buf[0] != '-')
393 goto out;
394 tmp = strchr(&buf[1], '-');
395 if (!tmp)
396 goto out;
397 *tmp = '\0';
398 ret = kstrtoull(buf + 1, 0, &start);
399 if (ret)
400 goto out;
401 ret = kstrtoull(tmp + 1, 0, &end);
402 if (ret)
403 goto out;
404 ret = -EINVAL;
405 if (start > end)
406 goto out;
407 /* enable badblocks */
408 cmpxchg(&t_dev->badblocks.shift, -1, 0);
409 if (buf[0] == '+')
410 ret = badblocks_set(&t_dev->badblocks, start,
411 end - start + 1, 1);
412 else
413 ret = badblocks_clear(&t_dev->badblocks, start,
414 end - start + 1);
415 if (ret == 0)
416 ret = count;
417out:
418 kfree(orig);
419 return ret;
420}
421CONFIGFS_ATTR(nullb_device_, badblocks);
422
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700423static struct configfs_attribute *nullb_device_attrs[] = {
424 &nullb_device_attr_size,
425 &nullb_device_attr_completion_nsec,
426 &nullb_device_attr_submit_queues,
427 &nullb_device_attr_home_node,
428 &nullb_device_attr_queue_mode,
429 &nullb_device_attr_blocksize,
430 &nullb_device_attr_irqmode,
431 &nullb_device_attr_hw_queue_depth,
Shaohua Licedcafa2017-08-14 15:04:54 -0700432 &nullb_device_attr_index,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700433 &nullb_device_attr_blocking,
434 &nullb_device_attr_use_per_node_hctx,
Shaohua Licedcafa2017-08-14 15:04:54 -0700435 &nullb_device_attr_power,
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700436 &nullb_device_attr_memory_backed,
Shaohua Li306eb6b2017-08-14 15:04:57 -0700437 &nullb_device_attr_discard,
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700438 &nullb_device_attr_mbps,
Shaohua Lideb78b42017-08-14 15:04:59 -0700439 &nullb_device_attr_cache_size,
Shaohua Li2f54a612017-08-14 15:05:00 -0700440 &nullb_device_attr_badblocks,
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200441 &nullb_device_attr_zoned,
442 &nullb_device_attr_zone_size,
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900443 &nullb_device_attr_zone_nr_conv,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700444 NULL,
445};
446
447static void nullb_device_release(struct config_item *item)
448{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700449 struct nullb_device *dev = to_nullb_device(item);
450
Shaohua Lideb78b42017-08-14 15:04:59 -0700451 null_free_device_storage(dev, false);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700452 null_free_dev(dev);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700453}
454
455static struct configfs_item_operations nullb_device_ops = {
456 .release = nullb_device_release,
457};
458
Bhumika Goyale1919df2017-10-16 17:18:49 +0200459static const struct config_item_type nullb_device_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700460 .ct_item_ops = &nullb_device_ops,
461 .ct_attrs = nullb_device_attrs,
462 .ct_owner = THIS_MODULE,
463};
464
465static struct
466config_item *nullb_group_make_item(struct config_group *group, const char *name)
467{
468 struct nullb_device *dev;
469
470 dev = null_alloc_dev();
471 if (!dev)
472 return ERR_PTR(-ENOMEM);
473
474 config_item_init_type_name(&dev->item, name, &nullb_device_type);
475
476 return &dev->item;
477}
478
479static void
480nullb_group_drop_item(struct config_group *group, struct config_item *item)
481{
Shaohua Licedcafa2017-08-14 15:04:54 -0700482 struct nullb_device *dev = to_nullb_device(item);
483
484 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
485 mutex_lock(&lock);
486 dev->power = false;
487 null_del_dev(dev->nullb);
488 mutex_unlock(&lock);
489 }
490
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700491 config_item_put(item);
492}
493
494static ssize_t memb_group_features_show(struct config_item *item, char *page)
495{
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200496 return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700497}
498
499CONFIGFS_ATTR_RO(memb_group_, features);
500
501static struct configfs_attribute *nullb_group_attrs[] = {
502 &memb_group_attr_features,
503 NULL,
504};
505
506static struct configfs_group_operations nullb_group_ops = {
507 .make_item = nullb_group_make_item,
508 .drop_item = nullb_group_drop_item,
509};
510
Bhumika Goyale1919df2017-10-16 17:18:49 +0200511static const struct config_item_type nullb_group_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700512 .ct_group_ops = &nullb_group_ops,
513 .ct_attrs = nullb_group_attrs,
514 .ct_owner = THIS_MODULE,
515};
516
517static struct configfs_subsystem nullb_subsys = {
518 .su_group = {
519 .cg_item = {
520 .ci_namebuf = "nullb",
521 .ci_type = &nullb_group_type,
522 },
523 },
524};
525
Shaohua Lideb78b42017-08-14 15:04:59 -0700526static inline int null_cache_active(struct nullb *nullb)
527{
528 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
529}
530
Shaohua Li2984c862017-08-14 15:04:52 -0700531static struct nullb_device *null_alloc_dev(void)
532{
533 struct nullb_device *dev;
534
535 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
536 if (!dev)
537 return NULL;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700538 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
Shaohua Lideb78b42017-08-14 15:04:59 -0700539 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
Shaohua Li2f54a612017-08-14 15:05:00 -0700540 if (badblocks_init(&dev->badblocks, 0)) {
541 kfree(dev);
542 return NULL;
543 }
544
Shaohua Li2984c862017-08-14 15:04:52 -0700545 dev->size = g_gb * 1024;
546 dev->completion_nsec = g_completion_nsec;
547 dev->submit_queues = g_submit_queues;
548 dev->home_node = g_home_node;
549 dev->queue_mode = g_queue_mode;
550 dev->blocksize = g_bs;
551 dev->irqmode = g_irqmode;
552 dev->hw_queue_depth = g_hw_queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -0700553 dev->blocking = g_blocking;
554 dev->use_per_node_hctx = g_use_per_node_hctx;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200555 dev->zoned = g_zoned;
556 dev->zone_size = g_zone_size;
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900557 dev->zone_nr_conv = g_zone_nr_conv;
Shaohua Li2984c862017-08-14 15:04:52 -0700558 return dev;
559}
560
561static void null_free_dev(struct nullb_device *dev)
562{
David Disseldorp1addb792017-11-08 17:29:44 +0100563 if (!dev)
564 return;
565
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200566 null_zone_exit(dev);
David Disseldorp1addb792017-11-08 17:29:44 +0100567 badblocks_exit(&dev->badblocks);
Shaohua Li2984c862017-08-14 15:04:52 -0700568 kfree(dev);
569}
570
Jens Axboef2298c02013-10-25 11:52:25 +0100571static void put_tag(struct nullb_queue *nq, unsigned int tag)
572{
573 clear_bit_unlock(tag, nq->tag_map);
574
575 if (waitqueue_active(&nq->wait))
576 wake_up(&nq->wait);
577}
578
579static unsigned int get_tag(struct nullb_queue *nq)
580{
581 unsigned int tag;
582
583 do {
584 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
585 if (tag >= nq->queue_depth)
586 return -1U;
587 } while (test_and_set_bit_lock(tag, nq->tag_map));
588
589 return tag;
590}
591
592static void free_cmd(struct nullb_cmd *cmd)
593{
594 put_tag(cmd->nq, cmd->tag);
595}
596
Paolo Valente3c395a92015-12-01 11:48:17 +0100597static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
598
Jens Axboef2298c02013-10-25 11:52:25 +0100599static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
600{
601 struct nullb_cmd *cmd;
602 unsigned int tag;
603
604 tag = get_tag(nq);
605 if (tag != -1U) {
606 cmd = &nq->cmds[tag];
607 cmd->tag = tag;
608 cmd->nq = nq;
Shaohua Li2984c862017-08-14 15:04:52 -0700609 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +0100610 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
611 HRTIMER_MODE_REL);
612 cmd->timer.function = null_cmd_timer_expired;
613 }
Jens Axboef2298c02013-10-25 11:52:25 +0100614 return cmd;
615 }
616
617 return NULL;
618}
619
620static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
621{
622 struct nullb_cmd *cmd;
623 DEFINE_WAIT(wait);
624
625 cmd = __alloc_cmd(nq);
626 if (cmd || !can_wait)
627 return cmd;
628
629 do {
630 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
631 cmd = __alloc_cmd(nq);
632 if (cmd)
633 break;
634
635 io_schedule();
636 } while (1);
637
638 finish_wait(&nq->wait, &wait);
639 return cmd;
640}
641
642static void end_cmd(struct nullb_cmd *cmd)
643{
Shaohua Li2984c862017-08-14 15:04:52 -0700644 int queue_mode = cmd->nq->dev->queue_mode;
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100645
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800646 switch (queue_mode) {
647 case NULL_Q_MQ:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700648 blk_mq_end_request(cmd->rq, cmd->error);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800649 return;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800650 case NULL_Q_BIO:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700651 cmd->bio->bi_status = cmd->error;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200652 bio_endio(cmd->bio);
Jens Axboe48cc6612015-12-28 13:02:47 -0700653 break;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800654 }
Jens Axboef2298c02013-10-25 11:52:25 +0100655
Jens Axboe48cc6612015-12-28 13:02:47 -0700656 free_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +0100657}
658
659static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
660{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100661 end_cmd(container_of(timer, struct nullb_cmd, timer));
Jens Axboef2298c02013-10-25 11:52:25 +0100662
663 return HRTIMER_NORESTART;
664}
665
666static void null_cmd_end_timer(struct nullb_cmd *cmd)
667{
Shaohua Li2984c862017-08-14 15:04:52 -0700668 ktime_t kt = cmd->nq->dev->completion_nsec;
Jens Axboef2298c02013-10-25 11:52:25 +0100669
Paolo Valente3c395a92015-12-01 11:48:17 +0100670 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +0100671}
672
Christoph Hellwig49f66132018-11-10 09:30:45 +0100673static void null_complete_rq(struct request *rq)
Jens Axboef2298c02013-10-25 11:52:25 +0100674{
Christoph Hellwig49f66132018-11-10 09:30:45 +0100675 end_cmd(blk_mq_rq_to_pdu(rq));
Jens Axboef2298c02013-10-25 11:52:25 +0100676}
677
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700678static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
Jens Axboef2298c02013-10-25 11:52:25 +0100679{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700680 struct nullb_page *t_page;
681
682 t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
683 if (!t_page)
684 goto out;
685
686 t_page->page = alloc_pages(gfp_flags, 0);
687 if (!t_page->page)
688 goto out_freepage;
689
Ming Lei66231ad2018-03-06 12:07:13 +0800690 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700691 return t_page;
692out_freepage:
693 kfree(t_page);
694out:
695 return NULL;
696}
697
698static void null_free_page(struct nullb_page *t_page)
699{
Ming Lei66231ad2018-03-06 12:07:13 +0800700 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
701 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
Shaohua Lideb78b42017-08-14 15:04:59 -0700702 return;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700703 __free_page(t_page->page);
704 kfree(t_page);
705}
706
Ming Lei66231ad2018-03-06 12:07:13 +0800707static bool null_page_empty(struct nullb_page *page)
708{
709 int size = MAP_SZ - 2;
710
711 return find_first_bit(page->bitmap, size) == size;
712}
713
Shaohua Lideb78b42017-08-14 15:04:59 -0700714static void null_free_sector(struct nullb *nullb, sector_t sector,
715 bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700716{
717 unsigned int sector_bit;
718 u64 idx;
719 struct nullb_page *t_page, *ret;
720 struct radix_tree_root *root;
721
Shaohua Lideb78b42017-08-14 15:04:59 -0700722 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700723 idx = sector >> PAGE_SECTORS_SHIFT;
724 sector_bit = (sector & SECTOR_MASK);
725
726 t_page = radix_tree_lookup(root, idx);
727 if (t_page) {
Ming Lei66231ad2018-03-06 12:07:13 +0800728 __clear_bit(sector_bit, t_page->bitmap);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700729
Ming Lei66231ad2018-03-06 12:07:13 +0800730 if (null_page_empty(t_page)) {
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700731 ret = radix_tree_delete_item(root, idx, t_page);
732 WARN_ON(ret != t_page);
733 null_free_page(ret);
Shaohua Lideb78b42017-08-14 15:04:59 -0700734 if (is_cache)
735 nullb->dev->curr_cache -= PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700736 }
737 }
738}
739
740static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
Shaohua Lideb78b42017-08-14 15:04:59 -0700741 struct nullb_page *t_page, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700742{
743 struct radix_tree_root *root;
744
Shaohua Lideb78b42017-08-14 15:04:59 -0700745 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700746
747 if (radix_tree_insert(root, idx, t_page)) {
748 null_free_page(t_page);
749 t_page = radix_tree_lookup(root, idx);
750 WARN_ON(!t_page || t_page->page->index != idx);
Shaohua Lideb78b42017-08-14 15:04:59 -0700751 } else if (is_cache)
752 nullb->dev->curr_cache += PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700753
754 return t_page;
755}
756
Shaohua Lideb78b42017-08-14 15:04:59 -0700757static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700758{
759 unsigned long pos = 0;
760 int nr_pages;
761 struct nullb_page *ret, *t_pages[FREE_BATCH];
762 struct radix_tree_root *root;
763
Shaohua Lideb78b42017-08-14 15:04:59 -0700764 root = is_cache ? &dev->cache : &dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700765
766 do {
767 int i;
768
769 nr_pages = radix_tree_gang_lookup(root,
770 (void **)t_pages, pos, FREE_BATCH);
771
772 for (i = 0; i < nr_pages; i++) {
773 pos = t_pages[i]->page->index;
774 ret = radix_tree_delete_item(root, pos, t_pages[i]);
775 WARN_ON(ret != t_pages[i]);
776 null_free_page(ret);
777 }
778
779 pos++;
780 } while (nr_pages == FREE_BATCH);
Shaohua Lideb78b42017-08-14 15:04:59 -0700781
782 if (is_cache)
783 dev->curr_cache = 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700784}
785
Shaohua Lideb78b42017-08-14 15:04:59 -0700786static struct nullb_page *__null_lookup_page(struct nullb *nullb,
787 sector_t sector, bool for_write, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700788{
789 unsigned int sector_bit;
790 u64 idx;
791 struct nullb_page *t_page;
Shaohua Lideb78b42017-08-14 15:04:59 -0700792 struct radix_tree_root *root;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700793
794 idx = sector >> PAGE_SECTORS_SHIFT;
795 sector_bit = (sector & SECTOR_MASK);
796
Shaohua Lideb78b42017-08-14 15:04:59 -0700797 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
798 t_page = radix_tree_lookup(root, idx);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700799 WARN_ON(t_page && t_page->page->index != idx);
800
Ming Lei66231ad2018-03-06 12:07:13 +0800801 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700802 return t_page;
803
804 return NULL;
805}
806
Shaohua Lideb78b42017-08-14 15:04:59 -0700807static struct nullb_page *null_lookup_page(struct nullb *nullb,
808 sector_t sector, bool for_write, bool ignore_cache)
809{
810 struct nullb_page *page = NULL;
811
812 if (!ignore_cache)
813 page = __null_lookup_page(nullb, sector, for_write, true);
814 if (page)
815 return page;
816 return __null_lookup_page(nullb, sector, for_write, false);
817}
818
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700819static struct nullb_page *null_insert_page(struct nullb *nullb,
Jens Axboe61884de2018-08-09 14:22:41 -0600820 sector_t sector, bool ignore_cache)
821 __releases(&nullb->lock)
822 __acquires(&nullb->lock)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700823{
824 u64 idx;
825 struct nullb_page *t_page;
826
Shaohua Lideb78b42017-08-14 15:04:59 -0700827 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700828 if (t_page)
829 return t_page;
830
831 spin_unlock_irq(&nullb->lock);
832
833 t_page = null_alloc_page(GFP_NOIO);
834 if (!t_page)
835 goto out_lock;
836
837 if (radix_tree_preload(GFP_NOIO))
838 goto out_freepage;
839
840 spin_lock_irq(&nullb->lock);
841 idx = sector >> PAGE_SECTORS_SHIFT;
842 t_page->page->index = idx;
Shaohua Lideb78b42017-08-14 15:04:59 -0700843 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700844 radix_tree_preload_end();
845
846 return t_page;
847out_freepage:
848 null_free_page(t_page);
849out_lock:
850 spin_lock_irq(&nullb->lock);
Shaohua Lideb78b42017-08-14 15:04:59 -0700851 return null_lookup_page(nullb, sector, true, ignore_cache);
852}
853
854static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
855{
856 int i;
857 unsigned int offset;
858 u64 idx;
859 struct nullb_page *t_page, *ret;
860 void *dst, *src;
861
862 idx = c_page->page->index;
863
864 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
865
Ming Lei66231ad2018-03-06 12:07:13 +0800866 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
867 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
Shaohua Lideb78b42017-08-14 15:04:59 -0700868 null_free_page(c_page);
Ming Lei66231ad2018-03-06 12:07:13 +0800869 if (t_page && null_page_empty(t_page)) {
Shaohua Lideb78b42017-08-14 15:04:59 -0700870 ret = radix_tree_delete_item(&nullb->dev->data,
871 idx, t_page);
872 null_free_page(t_page);
873 }
874 return 0;
875 }
876
877 if (!t_page)
878 return -ENOMEM;
879
880 src = kmap_atomic(c_page->page);
881 dst = kmap_atomic(t_page->page);
882
883 for (i = 0; i < PAGE_SECTORS;
884 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
Ming Lei66231ad2018-03-06 12:07:13 +0800885 if (test_bit(i, c_page->bitmap)) {
Shaohua Lideb78b42017-08-14 15:04:59 -0700886 offset = (i << SECTOR_SHIFT);
887 memcpy(dst + offset, src + offset,
888 nullb->dev->blocksize);
Ming Lei66231ad2018-03-06 12:07:13 +0800889 __set_bit(i, t_page->bitmap);
Shaohua Lideb78b42017-08-14 15:04:59 -0700890 }
891 }
892
893 kunmap_atomic(dst);
894 kunmap_atomic(src);
895
896 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
897 null_free_page(ret);
898 nullb->dev->curr_cache -= PAGE_SIZE;
899
900 return 0;
901}
902
903static int null_make_cache_space(struct nullb *nullb, unsigned long n)
904{
905 int i, err, nr_pages;
906 struct nullb_page *c_pages[FREE_BATCH];
907 unsigned long flushed = 0, one_round;
908
909again:
910 if ((nullb->dev->cache_size * 1024 * 1024) >
911 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
912 return 0;
913
914 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
915 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
916 /*
917 * nullb_flush_cache_page could unlock before using the c_pages. To
918 * avoid race, we don't allow page free
919 */
920 for (i = 0; i < nr_pages; i++) {
921 nullb->cache_flush_pos = c_pages[i]->page->index;
922 /*
923 * We found the page which is being flushed to disk by other
924 * threads
925 */
Ming Lei66231ad2018-03-06 12:07:13 +0800926 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
Shaohua Lideb78b42017-08-14 15:04:59 -0700927 c_pages[i] = NULL;
928 else
Ming Lei66231ad2018-03-06 12:07:13 +0800929 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
Shaohua Lideb78b42017-08-14 15:04:59 -0700930 }
931
932 one_round = 0;
933 for (i = 0; i < nr_pages; i++) {
934 if (c_pages[i] == NULL)
935 continue;
936 err = null_flush_cache_page(nullb, c_pages[i]);
937 if (err)
938 return err;
939 one_round++;
940 }
941 flushed += one_round << PAGE_SHIFT;
942
943 if (n > flushed) {
944 if (nr_pages == 0)
945 nullb->cache_flush_pos = 0;
946 if (one_round == 0) {
947 /* give other threads a chance */
948 spin_unlock_irq(&nullb->lock);
949 spin_lock_irq(&nullb->lock);
950 }
951 goto again;
952 }
953 return 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700954}
955
956static int copy_to_nullb(struct nullb *nullb, struct page *source,
Shaohua Lideb78b42017-08-14 15:04:59 -0700957 unsigned int off, sector_t sector, size_t n, bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700958{
959 size_t temp, count = 0;
960 unsigned int offset;
961 struct nullb_page *t_page;
962 void *dst, *src;
963
964 while (count < n) {
965 temp = min_t(size_t, nullb->dev->blocksize, n - count);
966
Shaohua Lideb78b42017-08-14 15:04:59 -0700967 if (null_cache_active(nullb) && !is_fua)
968 null_make_cache_space(nullb, PAGE_SIZE);
969
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700970 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -0700971 t_page = null_insert_page(nullb, sector,
972 !null_cache_active(nullb) || is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700973 if (!t_page)
974 return -ENOSPC;
975
976 src = kmap_atomic(source);
977 dst = kmap_atomic(t_page->page);
978 memcpy(dst + offset, src + off + count, temp);
979 kunmap_atomic(dst);
980 kunmap_atomic(src);
981
Ming Lei66231ad2018-03-06 12:07:13 +0800982 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700983
Shaohua Lideb78b42017-08-14 15:04:59 -0700984 if (is_fua)
985 null_free_sector(nullb, sector, true);
986
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700987 count += temp;
988 sector += temp >> SECTOR_SHIFT;
989 }
990 return 0;
991}
992
993static int copy_from_nullb(struct nullb *nullb, struct page *dest,
994 unsigned int off, sector_t sector, size_t n)
995{
996 size_t temp, count = 0;
997 unsigned int offset;
998 struct nullb_page *t_page;
999 void *dst, *src;
1000
1001 while (count < n) {
1002 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1003
1004 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -07001005 t_page = null_lookup_page(nullb, sector, false,
1006 !null_cache_active(nullb));
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001007
1008 dst = kmap_atomic(dest);
1009 if (!t_page) {
1010 memset(dst + off + count, 0, temp);
1011 goto next;
1012 }
1013 src = kmap_atomic(t_page->page);
1014 memcpy(dst + off + count, src + offset, temp);
1015 kunmap_atomic(src);
1016next:
1017 kunmap_atomic(dst);
1018
1019 count += temp;
1020 sector += temp >> SECTOR_SHIFT;
1021 }
1022 return 0;
1023}
1024
Ajay Joshidd85b492019-10-17 14:19:43 -07001025static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
1026 unsigned int len, unsigned int off)
1027{
1028 void *dst;
1029
1030 dst = kmap_atomic(page);
1031 memset(dst + off, 0xFF, len);
1032 kunmap_atomic(dst);
1033}
1034
Shaohua Li306eb6b2017-08-14 15:04:57 -07001035static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
1036{
1037 size_t temp;
1038
1039 spin_lock_irq(&nullb->lock);
1040 while (n > 0) {
1041 temp = min_t(size_t, n, nullb->dev->blocksize);
Shaohua Lideb78b42017-08-14 15:04:59 -07001042 null_free_sector(nullb, sector, false);
1043 if (null_cache_active(nullb))
1044 null_free_sector(nullb, sector, true);
Shaohua Li306eb6b2017-08-14 15:04:57 -07001045 sector += temp >> SECTOR_SHIFT;
1046 n -= temp;
1047 }
1048 spin_unlock_irq(&nullb->lock);
1049}
1050
Shaohua Lideb78b42017-08-14 15:04:59 -07001051static int null_handle_flush(struct nullb *nullb)
1052{
1053 int err;
1054
1055 if (!null_cache_active(nullb))
1056 return 0;
1057
1058 spin_lock_irq(&nullb->lock);
1059 while (true) {
1060 err = null_make_cache_space(nullb,
1061 nullb->dev->cache_size * 1024 * 1024);
1062 if (err || nullb->dev->curr_cache == 0)
1063 break;
1064 }
1065
1066 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1067 spin_unlock_irq(&nullb->lock);
1068 return err;
1069}
1070
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001071static int null_transfer(struct nullb *nullb, struct page *page,
Shaohua Lideb78b42017-08-14 15:04:59 -07001072 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1073 bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001074{
Ajay Joshidd85b492019-10-17 14:19:43 -07001075 struct nullb_device *dev = nullb->dev;
1076 unsigned int valid_len = len;
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001077 int err = 0;
1078
1079 if (!is_write) {
Ajay Joshidd85b492019-10-17 14:19:43 -07001080 if (dev->zoned)
1081 valid_len = null_zone_valid_read_len(nullb,
1082 sector, len);
1083
1084 if (valid_len) {
1085 err = copy_from_nullb(nullb, page, off,
1086 sector, valid_len);
1087 off += valid_len;
1088 len -= valid_len;
1089 }
1090
1091 if (len)
1092 nullb_fill_pattern(nullb, page, len, off);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001093 flush_dcache_page(page);
1094 } else {
1095 flush_dcache_page(page);
Shaohua Lideb78b42017-08-14 15:04:59 -07001096 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001097 }
1098
1099 return err;
1100}
1101
1102static int null_handle_rq(struct nullb_cmd *cmd)
1103{
1104 struct request *rq = cmd->rq;
1105 struct nullb *nullb = cmd->nq->dev->nullb;
1106 int err;
1107 unsigned int len;
1108 sector_t sector;
1109 struct req_iterator iter;
1110 struct bio_vec bvec;
1111
1112 sector = blk_rq_pos(rq);
1113
Shaohua Li306eb6b2017-08-14 15:04:57 -07001114 if (req_op(rq) == REQ_OP_DISCARD) {
1115 null_handle_discard(nullb, sector, blk_rq_bytes(rq));
1116 return 0;
1117 }
1118
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001119 spin_lock_irq(&nullb->lock);
1120 rq_for_each_segment(bvec, rq, iter) {
1121 len = bvec.bv_len;
1122 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001123 op_is_write(req_op(rq)), sector,
1124 req_op(rq) & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001125 if (err) {
1126 spin_unlock_irq(&nullb->lock);
1127 return err;
1128 }
1129 sector += len >> SECTOR_SHIFT;
1130 }
1131 spin_unlock_irq(&nullb->lock);
1132
1133 return 0;
1134}
1135
1136static int null_handle_bio(struct nullb_cmd *cmd)
1137{
1138 struct bio *bio = cmd->bio;
1139 struct nullb *nullb = cmd->nq->dev->nullb;
1140 int err;
1141 unsigned int len;
1142 sector_t sector;
1143 struct bio_vec bvec;
1144 struct bvec_iter iter;
1145
1146 sector = bio->bi_iter.bi_sector;
1147
Shaohua Li306eb6b2017-08-14 15:04:57 -07001148 if (bio_op(bio) == REQ_OP_DISCARD) {
1149 null_handle_discard(nullb, sector,
1150 bio_sectors(bio) << SECTOR_SHIFT);
1151 return 0;
1152 }
1153
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001154 spin_lock_irq(&nullb->lock);
1155 bio_for_each_segment(bvec, bio, iter) {
1156 len = bvec.bv_len;
1157 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001158 op_is_write(bio_op(bio)), sector,
Heinz Mauelshagenbf7c7a02019-02-22 20:00:01 +01001159 bio->bi_opf & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001160 if (err) {
1161 spin_unlock_irq(&nullb->lock);
1162 return err;
1163 }
1164 sector += len >> SECTOR_SHIFT;
1165 }
1166 spin_unlock_irq(&nullb->lock);
1167 return 0;
1168}
1169
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001170static void null_stop_queue(struct nullb *nullb)
1171{
1172 struct request_queue *q = nullb->q;
1173
1174 if (nullb->dev->queue_mode == NULL_Q_MQ)
1175 blk_mq_stop_hw_queues(q);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001176}
1177
1178static void null_restart_queue_async(struct nullb *nullb)
1179{
1180 struct request_queue *q = nullb->q;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001181
1182 if (nullb->dev->queue_mode == NULL_Q_MQ)
1183 blk_mq_start_stopped_hw_queues(q, true);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001184}
1185
Chaitanya Kulkarniadb84282019-08-22 21:45:15 -07001186static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
1187{
1188 struct nullb_device *dev = cmd->nq->dev;
1189 struct nullb *nullb = dev->nullb;
1190 blk_status_t sts = BLK_STS_OK;
1191 struct request *rq = cmd->rq;
1192
1193 if (!hrtimer_active(&nullb->bw_timer))
1194 hrtimer_restart(&nullb->bw_timer);
1195
1196 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
1197 null_stop_queue(nullb);
1198 /* race with timer */
1199 if (atomic_long_read(&nullb->cur_bytes) > 0)
1200 null_restart_queue_async(nullb);
1201 /* requeue request */
1202 sts = BLK_STS_DEV_RESOURCE;
1203 }
1204 return sts;
1205}
1206
Chaitanya Kulkarni8f94d1c2019-08-22 21:45:16 -07001207static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
1208 sector_t sector,
1209 sector_t nr_sectors)
1210{
1211 struct badblocks *bb = &cmd->nq->dev->badblocks;
1212 sector_t first_bad;
1213 int bad_sectors;
1214
1215 if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
1216 return BLK_STS_IOERR;
1217
1218 return BLK_STS_OK;
1219}
1220
Chaitanya Kulkarni7ea88e22019-08-22 21:45:17 -07001221static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
1222 enum req_opf op)
1223{
1224 struct nullb_device *dev = cmd->nq->dev;
1225 int err;
1226
1227 if (dev->queue_mode == NULL_Q_BIO)
1228 err = null_handle_bio(cmd);
1229 else
1230 err = null_handle_rq(cmd);
1231
1232 return errno_to_blk_status(err);
1233}
1234
Chaitanya Kulkarnia3d7d672019-08-22 21:45:19 -07001235static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
1236{
1237 /* Complete IO by inline, softirq or timer */
1238 switch (cmd->nq->dev->irqmode) {
1239 case NULL_IRQ_SOFTIRQ:
1240 switch (cmd->nq->dev->queue_mode) {
1241 case NULL_Q_MQ:
1242 blk_mq_complete_request(cmd->rq);
1243 break;
1244 case NULL_Q_BIO:
1245 /*
1246 * XXX: no proper submitting cpu information available.
1247 */
1248 end_cmd(cmd);
1249 break;
1250 }
1251 break;
1252 case NULL_IRQ_NONE:
1253 end_cmd(cmd);
1254 break;
1255 case NULL_IRQ_TIMER:
1256 null_cmd_end_timer(cmd);
1257 break;
1258 }
1259}
1260
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001261static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
1262 sector_t nr_sectors, enum req_opf op)
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001263{
1264 struct nullb_device *dev = cmd->nq->dev;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001265 struct nullb *nullb = dev->nullb;
Chaitanya Kulkarniadb84282019-08-22 21:45:15 -07001266 blk_status_t sts;
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001267
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001268 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
Chaitanya Kulkarniadb84282019-08-22 21:45:15 -07001269 sts = null_handle_throttled(cmd);
1270 if (sts != BLK_STS_OK)
1271 return sts;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001272 }
1273
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001274 if (op == REQ_OP_FLUSH) {
1275 cmd->error = errno_to_blk_status(null_handle_flush(nullb));
1276 goto out;
1277 }
Shaohua Li2f54a612017-08-14 15:05:00 -07001278
Chaitanya Kulkarni8f94d1c2019-08-22 21:45:16 -07001279 if (nullb->dev->badblocks.shift != -1) {
1280 cmd->error = null_handle_badblocks(cmd, sector, nr_sectors);
1281 if (cmd->error != BLK_STS_OK)
Shaohua Li2f54a612017-08-14 15:05:00 -07001282 goto out;
Shaohua Li2f54a612017-08-14 15:05:00 -07001283 }
1284
Chaitanya Kulkarni7ea88e22019-08-22 21:45:17 -07001285 if (dev->memory_backed)
1286 cmd->error = null_handle_memory_backed(cmd, op);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001287
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -07001288 if (!cmd->error && dev->zoned)
1289 cmd->error = null_handle_zoned(cmd, op, sector, nr_sectors);
1290
Shaohua Li2f54a612017-08-14 15:05:00 -07001291out:
Chaitanya Kulkarnia3d7d672019-08-22 21:45:19 -07001292 nullb_complete_cmd(cmd);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001293 return BLK_STS_OK;
Jens Axboef2298c02013-10-25 11:52:25 +01001294}
1295
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001296static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1297{
1298 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1299 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1300 unsigned int mbps = nullb->dev->mbps;
1301
1302 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1303 return HRTIMER_NORESTART;
1304
1305 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1306 null_restart_queue_async(nullb);
1307
1308 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1309
1310 return HRTIMER_RESTART;
1311}
1312
1313static void nullb_setup_bwtimer(struct nullb *nullb)
1314{
1315 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1316
1317 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1318 nullb->bw_timer.function = nullb_bwtimer_fn;
1319 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1320 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +01001321}
1322
1323static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1324{
1325 int index = 0;
1326
1327 if (nullb->nr_queues != 1)
1328 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1329
1330 return &nullb->queues[index];
1331}
1332
Jens Axboedece1632015-11-05 10:41:16 -07001333static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +01001334{
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001335 sector_t sector = bio->bi_iter.bi_sector;
1336 sector_t nr_sectors = bio_sectors(bio);
Jens Axboef2298c02013-10-25 11:52:25 +01001337 struct nullb *nullb = q->queuedata;
1338 struct nullb_queue *nq = nullb_to_queue(nullb);
1339 struct nullb_cmd *cmd;
1340
1341 cmd = alloc_cmd(nq, 1);
1342 cmd->bio = bio;
1343
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001344 null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
Jens Axboedece1632015-11-05 10:41:16 -07001345 return BLK_QC_T_NONE;
Jens Axboef2298c02013-10-25 11:52:25 +01001346}
1347
Jens Axboe93b57042018-01-10 09:06:23 -07001348static bool should_timeout_request(struct request *rq)
1349{
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001350#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -07001351 if (g_timeout_str[0])
1352 return should_fail(&null_timeout_attr, 1);
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001353#endif
Jens Axboe24941b92018-02-28 09:18:57 -07001354 return false;
1355}
Jens Axboe93b57042018-01-10 09:06:23 -07001356
Jens Axboe24941b92018-02-28 09:18:57 -07001357static bool should_requeue_request(struct request *rq)
1358{
1359#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1360 if (g_requeue_str[0])
1361 return should_fail(&null_requeue_attr, 1);
1362#endif
Jens Axboe93b57042018-01-10 09:06:23 -07001363 return false;
1364}
1365
Jens Axboe5448aca2018-01-09 12:47:24 -07001366static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
1367{
André Almeida9c7eddf2019-09-16 11:07:59 -03001368 pr_info("rq %p timed out\n", rq);
Christoph Hellwig0df0bb02018-05-29 15:52:33 +02001369 blk_mq_complete_request(rq);
1370 return BLK_EH_DONE;
Jens Axboe5448aca2018-01-09 12:47:24 -07001371}
1372
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001373static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
Jens Axboe74c45052014-10-29 11:14:52 -06001374 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +01001375{
Jens Axboe74c45052014-10-29 11:14:52 -06001376 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Shaohua Li2984c862017-08-14 15:04:52 -07001377 struct nullb_queue *nq = hctx->driver_data;
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001378 sector_t nr_sectors = blk_rq_sectors(bd->rq);
1379 sector_t sector = blk_rq_pos(bd->rq);
Jens Axboef2298c02013-10-25 11:52:25 +01001380
Jens Axboedb5bcf82017-03-30 13:44:26 -06001381 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1382
Shaohua Li2984c862017-08-14 15:04:52 -07001383 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +01001384 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1385 cmd->timer.function = null_cmd_timer_expired;
1386 }
Jens Axboe74c45052014-10-29 11:14:52 -06001387 cmd->rq = bd->rq;
Shaohua Li2984c862017-08-14 15:04:52 -07001388 cmd->nq = nq;
Jens Axboef2298c02013-10-25 11:52:25 +01001389
Jens Axboe74c45052014-10-29 11:14:52 -06001390 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -07001391
Jens Axboe24941b92018-02-28 09:18:57 -07001392 if (should_requeue_request(bd->rq)) {
1393 /*
1394 * Alternate between hitting the core BUSY path, and the
1395 * driver driven requeue path
1396 */
1397 nq->requeue_selection++;
1398 if (nq->requeue_selection & 1)
1399 return BLK_STS_RESOURCE;
1400 else {
1401 blk_mq_requeue_request(bd->rq, true);
1402 return BLK_STS_OK;
1403 }
1404 }
1405 if (should_timeout_request(bd->rq))
1406 return BLK_STS_OK;
Jens Axboe93b57042018-01-10 09:06:23 -07001407
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001408 return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
Jens Axboef2298c02013-10-25 11:52:25 +01001409}
1410
Eric Biggersf363b082017-03-30 13:39:16 -07001411static const struct blk_mq_ops null_mq_ops = {
Jens Axboef2298c02013-10-25 11:52:25 +01001412 .queue_rq = null_queue_rq,
Christoph Hellwig49f66132018-11-10 09:30:45 +01001413 .complete = null_complete_rq,
Jens Axboe5448aca2018-01-09 12:47:24 -07001414 .timeout = null_timeout_rq,
Jens Axboef2298c02013-10-25 11:52:25 +01001415};
1416
Matias Bjørlingde65d2d2015-08-31 14:17:18 +02001417static void cleanup_queue(struct nullb_queue *nq)
1418{
1419 kfree(nq->tag_map);
1420 kfree(nq->cmds);
1421}
1422
1423static void cleanup_queues(struct nullb *nullb)
1424{
1425 int i;
1426
1427 for (i = 0; i < nullb->nr_queues; i++)
1428 cleanup_queue(&nullb->queues[i]);
1429
1430 kfree(nullb->queues);
1431}
1432
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001433static void null_del_dev(struct nullb *nullb)
1434{
Shaohua Li2984c862017-08-14 15:04:52 -07001435 struct nullb_device *dev = nullb->dev;
1436
Shaohua Li94bc02e2017-08-14 15:04:55 -07001437 ida_simple_remove(&nullb_indexes, nullb->index);
1438
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001439 list_del_init(&nullb->list);
1440
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001441 del_gendisk(nullb->disk);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001442
1443 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1444 hrtimer_cancel(&nullb->bw_timer);
1445 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1446 null_restart_queue_async(nullb);
1447 }
1448
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001449 blk_cleanup_queue(nullb->q);
Shaohua Li2984c862017-08-14 15:04:52 -07001450 if (dev->queue_mode == NULL_Q_MQ &&
1451 nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001452 blk_mq_free_tag_set(nullb->tag_set);
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001453 put_disk(nullb->disk);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001454 cleanup_queues(nullb);
Shaohua Lideb78b42017-08-14 15:04:59 -07001455 if (null_cache_active(nullb))
1456 null_free_device_storage(nullb->dev, true);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001457 kfree(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001458 dev->nullb = NULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001459}
1460
Shaohua Li306eb6b2017-08-14 15:04:57 -07001461static void null_config_discard(struct nullb *nullb)
1462{
1463 if (nullb->dev->discard == false)
1464 return;
1465 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1466 nullb->q->limits.discard_alignment = nullb->dev->blocksize;
1467 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
Bart Van Assche8b904b52018-03-07 17:10:10 -08001468 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001469}
1470
Jens Axboef2298c02013-10-25 11:52:25 +01001471static int null_open(struct block_device *bdev, fmode_t mode)
1472{
1473 return 0;
1474}
1475
1476static void null_release(struct gendisk *disk, fmode_t mode)
1477{
1478}
1479
1480static const struct block_device_operations null_fops = {
1481 .owner = THIS_MODULE,
1482 .open = null_open,
1483 .release = null_release,
Christoph Hellwige76239a2018-10-12 19:08:49 +09001484 .report_zones = null_zone_report,
Jens Axboef2298c02013-10-25 11:52:25 +01001485};
1486
Jens Axboe82f402f2017-06-20 14:22:01 -06001487static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1488{
1489 BUG_ON(!nullb);
1490 BUG_ON(!nq);
1491
1492 init_waitqueue_head(&nq->wait);
1493 nq->queue_depth = nullb->queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -07001494 nq->dev = nullb->dev;
Jens Axboe82f402f2017-06-20 14:22:01 -06001495}
1496
1497static void null_init_queues(struct nullb *nullb)
1498{
1499 struct request_queue *q = nullb->q;
1500 struct blk_mq_hw_ctx *hctx;
1501 struct nullb_queue *nq;
1502 int i;
1503
1504 queue_for_each_hw_ctx(q, hctx, i) {
1505 if (!hctx->nr_ctx || !hctx->tags)
1506 continue;
1507 nq = &nullb->queues[i];
1508 hctx->driver_data = nq;
1509 null_init_queue(nullb, nq);
1510 nullb->nr_queues++;
1511 }
1512}
1513
Jens Axboef2298c02013-10-25 11:52:25 +01001514static int setup_commands(struct nullb_queue *nq)
1515{
1516 struct nullb_cmd *cmd;
1517 int i, tag_size;
1518
Kees Cook6396bb22018-06-12 14:03:40 -07001519 nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001520 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001521 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001522
1523 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
Kees Cook6396bb22018-06-12 14:03:40 -07001524 nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001525 if (!nq->tag_map) {
1526 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +01001527 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001528 }
1529
1530 for (i = 0; i < nq->queue_depth; i++) {
1531 cmd = &nq->cmds[i];
1532 INIT_LIST_HEAD(&cmd->list);
1533 cmd->ll_list.next = NULL;
1534 cmd->tag = -1U;
1535 }
1536
1537 return 0;
1538}
1539
Jens Axboef2298c02013-10-25 11:52:25 +01001540static int setup_queues(struct nullb *nullb)
1541{
Kees Cook6396bb22018-06-12 14:03:40 -07001542 nullb->queues = kcalloc(nullb->dev->submit_queues,
1543 sizeof(struct nullb_queue),
1544 GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001545 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001546 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001547
Shaohua Li2984c862017-08-14 15:04:52 -07001548 nullb->queue_depth = nullb->dev->hw_queue_depth;
Jens Axboef2298c02013-10-25 11:52:25 +01001549
Matias Bjorling2d263a782013-12-18 13:41:43 +01001550 return 0;
1551}
1552
1553static int init_driver_queues(struct nullb *nullb)
1554{
1555 struct nullb_queue *nq;
1556 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001557
Shaohua Li2984c862017-08-14 15:04:52 -07001558 for (i = 0; i < nullb->dev->submit_queues; i++) {
Jens Axboef2298c02013-10-25 11:52:25 +01001559 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +01001560
1561 null_init_queue(nullb, nq);
1562
1563 ret = setup_commands(nq);
1564 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +02001565 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01001566 nullb->nr_queues++;
1567 }
Matias Bjorling2d263a782013-12-18 13:41:43 +01001568 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001569}
1570
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001571static int null_gendisk_register(struct nullb *nullb)
Jens Axboef2298c02013-10-25 11:52:25 +01001572{
1573 struct gendisk *disk;
Jens Axboef2298c02013-10-25 11:52:25 +01001574 sector_t size;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001575
Shaohua Li2984c862017-08-14 15:04:52 -07001576 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001577 if (!disk)
1578 return -ENOMEM;
Shaohua Li2984c862017-08-14 15:04:52 -07001579 size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001580 set_capacity(disk, size >> 9);
1581
1582 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
1583 disk->major = null_major;
1584 disk->first_minor = nullb->index;
1585 disk->fops = &null_fops;
1586 disk->private_data = nullb;
1587 disk->queue = nullb->q;
1588 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1589
Damien Le Moalbf505452018-10-12 19:08:50 +09001590 if (nullb->dev->zoned) {
1591 int ret = blk_revalidate_disk_zones(disk);
1592
1593 if (ret != 0)
1594 return ret;
1595 }
1596
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001597 add_disk(disk);
1598 return 0;
1599}
1600
Shaohua Li2984c862017-08-14 15:04:52 -07001601static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001602{
1603 set->ops = &null_mq_ops;
Shaohua Li2984c862017-08-14 15:04:52 -07001604 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
1605 g_submit_queues;
1606 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
1607 g_hw_queue_depth;
1608 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
Jens Axboe82f402f2017-06-20 14:22:01 -06001609 set->cmd_size = sizeof(struct nullb_cmd);
1610 set->flags = BLK_MQ_F_SHOULD_MERGE;
weiping zhangb3cffc32017-09-30 09:49:21 +08001611 if (g_no_sched)
1612 set->flags |= BLK_MQ_F_NO_SCHED;
Jens Axboe82f402f2017-06-20 14:22:01 -06001613 set->driver_data = NULL;
1614
Shaohua Li0d06a422017-08-25 13:46:25 -07001615 if ((nullb && nullb->dev->blocking) || g_blocking)
Jens Axboe82f402f2017-06-20 14:22:01 -06001616 set->flags |= BLK_MQ_F_BLOCKING;
1617
1618 return blk_mq_alloc_tag_set(set);
1619}
1620
Shaohua Licedcafa2017-08-14 15:04:54 -07001621static void null_validate_conf(struct nullb_device *dev)
1622{
1623 dev->blocksize = round_down(dev->blocksize, 512);
1624 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
Shaohua Licedcafa2017-08-14 15:04:54 -07001625
1626 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
1627 if (dev->submit_queues != nr_online_nodes)
1628 dev->submit_queues = nr_online_nodes;
1629 } else if (dev->submit_queues > nr_cpu_ids)
1630 dev->submit_queues = nr_cpu_ids;
1631 else if (dev->submit_queues == 0)
1632 dev->submit_queues = 1;
1633
1634 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
1635 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001636
1637 /* Do memory allocation, so set blocking */
1638 if (dev->memory_backed)
1639 dev->blocking = true;
Shaohua Lideb78b42017-08-14 15:04:59 -07001640 else /* cache is meaningless */
1641 dev->cache_size = 0;
1642 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1643 dev->cache_size);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001644 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1645 /* can not stop a queue */
1646 if (dev->queue_mode == NULL_Q_BIO)
1647 dev->mbps = 0;
Shaohua Licedcafa2017-08-14 15:04:54 -07001648}
1649
Jens Axboe24941b92018-02-28 09:18:57 -07001650#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1651static bool __null_setup_fault(struct fault_attr *attr, char *str)
1652{
1653 if (!str[0])
1654 return true;
1655
1656 if (!setup_fault_attr(attr, str))
1657 return false;
1658
1659 attr->verbose = 0;
1660 return true;
1661}
1662#endif
1663
Jens Axboe93b57042018-01-10 09:06:23 -07001664static bool null_setup_fault(void)
1665{
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001666#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe24941b92018-02-28 09:18:57 -07001667 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
Jens Axboe93b57042018-01-10 09:06:23 -07001668 return false;
Jens Axboe24941b92018-02-28 09:18:57 -07001669 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
1670 return false;
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001671#endif
Jens Axboe93b57042018-01-10 09:06:23 -07001672 return true;
1673}
1674
Shaohua Li2984c862017-08-14 15:04:52 -07001675static int null_add_dev(struct nullb_device *dev)
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001676{
1677 struct nullb *nullb;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001678 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +01001679
Shaohua Licedcafa2017-08-14 15:04:54 -07001680 null_validate_conf(dev);
1681
Shaohua Li2984c862017-08-14 15:04:52 -07001682 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001683 if (!nullb) {
1684 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001685 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001686 }
Shaohua Li2984c862017-08-14 15:04:52 -07001687 nullb->dev = dev;
1688 dev->nullb = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01001689
1690 spin_lock_init(&nullb->lock);
1691
Robert Elliottdc501dc2014-09-02 11:38:49 -05001692 rv = setup_queues(nullb);
1693 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001694 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01001695
Shaohua Li2984c862017-08-14 15:04:52 -07001696 if (dev->queue_mode == NULL_Q_MQ) {
Jens Axboe82f402f2017-06-20 14:22:01 -06001697 if (shared_tags) {
1698 nullb->tag_set = &tag_set;
1699 rv = 0;
1700 } else {
1701 nullb->tag_set = &nullb->__tag_set;
Shaohua Li2984c862017-08-14 15:04:52 -07001702 rv = null_init_tag_set(nullb, nullb->tag_set);
Jens Axboe82f402f2017-06-20 14:22:01 -06001703 }
Jens Axboef2298c02013-10-25 11:52:25 +01001704
Robert Elliottdc501dc2014-09-02 11:38:49 -05001705 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001706 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +01001707
Jens Axboe93b57042018-01-10 09:06:23 -07001708 if (!null_setup_fault())
1709 goto out_cleanup_queues;
1710
Jens Axboe5448aca2018-01-09 12:47:24 -07001711 nullb->tag_set->timeout = 5 * HZ;
Jens Axboe82f402f2017-06-20 14:22:01 -06001712 nullb->q = blk_mq_init_queue(nullb->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +00001713 if (IS_ERR(nullb->q)) {
Robert Elliottdc501dc2014-09-02 11:38:49 -05001714 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001715 goto out_cleanup_tags;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001716 }
Jens Axboe82f402f2017-06-20 14:22:01 -06001717 null_init_queues(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001718 } else if (dev->queue_mode == NULL_Q_BIO) {
Christoph Hellwig6d469642018-11-14 17:02:18 +01001719 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001720 if (!nullb->q) {
1721 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001722 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001723 }
Jens Axboef2298c02013-10-25 11:52:25 +01001724 blk_queue_make_request(nullb->q, null_queue_bio);
Jan Kara31f96902014-10-22 15:34:21 +02001725 rv = init_driver_queues(nullb);
1726 if (rv)
1727 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001728 }
1729
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001730 if (dev->mbps) {
1731 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1732 nullb_setup_bwtimer(nullb);
1733 }
1734
Shaohua Lideb78b42017-08-14 15:04:59 -07001735 if (dev->cache_size > 0) {
1736 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1737 blk_queue_write_cache(nullb->q, true, true);
Shaohua Lideb78b42017-08-14 15:04:59 -07001738 }
1739
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001740 if (dev->zoned) {
1741 rv = null_zone_init(dev);
1742 if (rv)
1743 goto out_cleanup_blk_queue;
1744
1745 blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects);
1746 nullb->q->limits.zoned = BLK_ZONED_HM;
Chaitanya Kulkarnia61dbfb2019-08-01 10:26:38 -07001747 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, nullb->q);
Damien Le Moal780d97a2019-09-05 18:51:34 +09001748 blk_queue_required_elevator_features(nullb->q,
1749 ELEVATOR_F_ZBD_SEQ_WRITE);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001750 }
1751
Jens Axboef2298c02013-10-25 11:52:25 +01001752 nullb->q->queuedata = nullb;
Bart Van Assche8b904b52018-03-07 17:10:10 -08001753 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
1754 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +01001755
Jens Axboef2298c02013-10-25 11:52:25 +01001756 mutex_lock(&lock);
Shaohua Li94bc02e2017-08-14 15:04:55 -07001757 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
Shaohua Licedcafa2017-08-14 15:04:54 -07001758 dev->index = nullb->index;
Jens Axboef2298c02013-10-25 11:52:25 +01001759 mutex_unlock(&lock);
1760
Shaohua Li2984c862017-08-14 15:04:52 -07001761 blk_queue_logical_block_size(nullb->q, dev->blocksize);
1762 blk_queue_physical_block_size(nullb->q, dev->blocksize);
Jens Axboef2298c02013-10-25 11:52:25 +01001763
Shaohua Li306eb6b2017-08-14 15:04:57 -07001764 null_config_discard(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +01001765
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001766 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1767
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001768 rv = null_gendisk_register(nullb);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001769 if (rv)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001770 goto out_cleanup_zone;
Jens Axboef2298c02013-10-25 11:52:25 +01001771
Matias Bjørlinga5143792016-02-11 14:49:13 +01001772 mutex_lock(&lock);
1773 list_add_tail(&nullb->list, &nullb_list);
1774 mutex_unlock(&lock);
Wenwei Tao3681c852016-03-05 00:27:04 +08001775
Jens Axboef2298c02013-10-25 11:52:25 +01001776 return 0;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001777out_cleanup_zone:
1778 if (dev->zoned)
1779 null_zone_exit(dev);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001780out_cleanup_blk_queue:
1781 blk_cleanup_queue(nullb->q);
1782out_cleanup_tags:
Shaohua Li2984c862017-08-14 15:04:52 -07001783 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001784 blk_mq_free_tag_set(nullb->tag_set);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001785out_cleanup_queues:
1786 cleanup_queues(nullb);
1787out_free_nullb:
1788 kfree(nullb);
1789out:
Robert Elliottdc501dc2014-09-02 11:38:49 -05001790 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +01001791}
1792
1793static int __init null_init(void)
1794{
Minfei Huangaf096e22015-12-08 13:47:34 -07001795 int ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001796 unsigned int i;
Minfei Huangaf096e22015-12-08 13:47:34 -07001797 struct nullb *nullb;
Shaohua Li2984c862017-08-14 15:04:52 -07001798 struct nullb_device *dev;
Jens Axboef2298c02013-10-25 11:52:25 +01001799
Shaohua Li2984c862017-08-14 15:04:52 -07001800 if (g_bs > PAGE_SIZE) {
André Almeida9c7eddf2019-09-16 11:07:59 -03001801 pr_warn("invalid block size\n");
1802 pr_warn("defaults block size to %lu\n", PAGE_SIZE);
Shaohua Li2984c862017-08-14 15:04:52 -07001803 g_bs = PAGE_SIZE;
Raghavendra K T9967d8a2014-01-21 16:59:59 +05301804 }
Jens Axboef2298c02013-10-25 11:52:25 +01001805
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001806 if (!is_power_of_2(g_zone_size)) {
André Almeida9c7eddf2019-09-16 11:07:59 -03001807 pr_err("zone_size must be power-of-two\n");
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001808 return -EINVAL;
1809 }
1810
John Pittman7ff684a2019-04-05 17:42:45 -04001811 if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
André Almeida9c7eddf2019-09-16 11:07:59 -03001812 pr_err("invalid home_node value\n");
John Pittman7ff684a2019-04-05 17:42:45 -04001813 g_home_node = NUMA_NO_NODE;
1814 }
1815
Jens Axboee50b1e32018-10-11 17:58:17 -06001816 if (g_queue_mode == NULL_Q_RQ) {
André Almeida9c7eddf2019-09-16 11:07:59 -03001817 pr_err("legacy IO path no longer available\n");
Jens Axboee50b1e32018-10-11 17:58:17 -06001818 return -EINVAL;
1819 }
Shaohua Li2984c862017-08-14 15:04:52 -07001820 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
1821 if (g_submit_queues != nr_online_nodes) {
André Almeida9c7eddf2019-09-16 11:07:59 -03001822 pr_warn("submit_queues param is set to %u.\n",
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +01001823 nr_online_nodes);
Shaohua Li2984c862017-08-14 15:04:52 -07001824 g_submit_queues = nr_online_nodes;
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01001825 }
Shaohua Li2984c862017-08-14 15:04:52 -07001826 } else if (g_submit_queues > nr_cpu_ids)
1827 g_submit_queues = nr_cpu_ids;
1828 else if (g_submit_queues <= 0)
1829 g_submit_queues = 1;
Jens Axboef2298c02013-10-25 11:52:25 +01001830
Shaohua Li2984c862017-08-14 15:04:52 -07001831 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
1832 ret = null_init_tag_set(NULL, &tag_set);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001833 if (ret)
1834 return ret;
1835 }
1836
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001837 config_group_init(&nullb_subsys.su_group);
1838 mutex_init(&nullb_subsys.su_mutex);
1839
1840 ret = configfs_register_subsystem(&nullb_subsys);
1841 if (ret)
1842 goto err_tagset;
1843
Jens Axboef2298c02013-10-25 11:52:25 +01001844 mutex_init(&lock);
1845
Jens Axboef2298c02013-10-25 11:52:25 +01001846 null_major = register_blkdev(0, "nullb");
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001847 if (null_major < 0) {
1848 ret = null_major;
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001849 goto err_conf;
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001850 }
Jens Axboef2298c02013-10-25 11:52:25 +01001851
Minfei Huangaf096e22015-12-08 13:47:34 -07001852 for (i = 0; i < nr_devices; i++) {
Shaohua Li2984c862017-08-14 15:04:52 -07001853 dev = null_alloc_dev();
Wei Yongjun30c516d2017-10-17 12:11:46 +00001854 if (!dev) {
1855 ret = -ENOMEM;
Minfei Huangaf096e22015-12-08 13:47:34 -07001856 goto err_dev;
Wei Yongjun30c516d2017-10-17 12:11:46 +00001857 }
Shaohua Li2984c862017-08-14 15:04:52 -07001858 ret = null_add_dev(dev);
1859 if (ret) {
1860 null_free_dev(dev);
1861 goto err_dev;
1862 }
Minfei Huangaf096e22015-12-08 13:47:34 -07001863 }
1864
André Almeida9c7eddf2019-09-16 11:07:59 -03001865 pr_info("module loaded\n");
Jens Axboef2298c02013-10-25 11:52:25 +01001866 return 0;
Minfei Huangaf096e22015-12-08 13:47:34 -07001867
1868err_dev:
1869 while (!list_empty(&nullb_list)) {
1870 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07001871 dev = nullb->dev;
Minfei Huangaf096e22015-12-08 13:47:34 -07001872 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001873 null_free_dev(dev);
Minfei Huangaf096e22015-12-08 13:47:34 -07001874 }
Minfei Huangaf096e22015-12-08 13:47:34 -07001875 unregister_blkdev(null_major, "nullb");
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001876err_conf:
1877 configfs_unregister_subsystem(&nullb_subsys);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001878err_tagset:
Shaohua Li2984c862017-08-14 15:04:52 -07001879 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001880 blk_mq_free_tag_set(&tag_set);
Minfei Huangaf096e22015-12-08 13:47:34 -07001881 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01001882}
1883
1884static void __exit null_exit(void)
1885{
1886 struct nullb *nullb;
1887
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001888 configfs_unregister_subsystem(&nullb_subsys);
1889
Jens Axboef2298c02013-10-25 11:52:25 +01001890 unregister_blkdev(null_major, "nullb");
1891
1892 mutex_lock(&lock);
1893 while (!list_empty(&nullb_list)) {
Shaohua Li2984c862017-08-14 15:04:52 -07001894 struct nullb_device *dev;
1895
Jens Axboef2298c02013-10-25 11:52:25 +01001896 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07001897 dev = nullb->dev;
Jens Axboef2298c02013-10-25 11:52:25 +01001898 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001899 null_free_dev(dev);
Jens Axboef2298c02013-10-25 11:52:25 +01001900 }
1901 mutex_unlock(&lock);
Matias Bjørling6bb95352015-11-19 12:50:08 +01001902
Shaohua Li2984c862017-08-14 15:04:52 -07001903 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Jens Axboe82f402f2017-06-20 14:22:01 -06001904 blk_mq_free_tag_set(&tag_set);
Jens Axboef2298c02013-10-25 11:52:25 +01001905}
1906
1907module_init(null_init);
1908module_exit(null_exit);
1909
Jens Axboe231b3db2017-08-25 12:53:15 -06001910MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
Jens Axboef2298c02013-10-25 11:52:25 +01001911MODULE_LICENSE("GPL");