blob: 53d4c15299844f52416eb5300fc86f810ea72ff7 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Shaohua Li3bf2bd22017-08-14 15:04:53 -07002/*
3 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
4 * Shaohua Li <shli@fb.com>
5 */
Jens Axboef2298c02013-10-25 11:52:25 +01006#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01007
Jens Axboef2298c02013-10-25 11:52:25 +01008#include <linux/moduleparam.h>
9#include <linux/sched.h>
10#include <linux/fs.h>
Jens Axboef2298c02013-10-25 11:52:25 +010011#include <linux/init.h>
Matias Bjørling6dad38d2018-07-06 19:38:38 +020012#include "null_blk.h"
Jens Axboef2298c02013-10-25 11:52:25 +010013
Shaohua Li5bcd0e02017-08-14 15:04:56 -070014#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
15#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070016#define SECTOR_MASK (PAGE_SECTORS - 1)
17
18#define FREE_BATCH 16
19
Shaohua Lieff2c4f2017-08-14 15:04:58 -070020#define TICKS_PER_SEC 50ULL
21#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
22
Arnd Bergmann33f782c2018-01-11 11:31:25 +010023#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -070024static DECLARE_FAULT_ATTR(null_timeout_attr);
Jens Axboe24941b92018-02-28 09:18:57 -070025static DECLARE_FAULT_ATTR(null_requeue_attr);
Arnd Bergmann33f782c2018-01-11 11:31:25 +010026#endif
Jens Axboe93b57042018-01-10 09:06:23 -070027
Shaohua Lieff2c4f2017-08-14 15:04:58 -070028static inline u64 mb_per_tick(int mbps)
29{
30 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
31}
Jens Axboef2298c02013-10-25 11:52:25 +010032
Shaohua Li3bf2bd22017-08-14 15:04:53 -070033/*
34 * Status flags for nullb_device.
35 *
36 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
37 * UP: Device is currently on and visible in userspace.
Shaohua Lieff2c4f2017-08-14 15:04:58 -070038 * THROTTLED: Device is being throttled.
Shaohua Lideb78b42017-08-14 15:04:59 -070039 * CACHE: Device is using a write-back cache.
Shaohua Li3bf2bd22017-08-14 15:04:53 -070040 */
41enum nullb_device_flags {
42 NULLB_DEV_FL_CONFIGURED = 0,
43 NULLB_DEV_FL_UP = 1,
Shaohua Lieff2c4f2017-08-14 15:04:58 -070044 NULLB_DEV_FL_THROTTLED = 2,
Shaohua Lideb78b42017-08-14 15:04:59 -070045 NULLB_DEV_FL_CACHE = 3,
Shaohua Li3bf2bd22017-08-14 15:04:53 -070046};
47
Ming Lei66231ad2018-03-06 12:07:13 +080048#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070049/*
50 * nullb_page is a page in memory for nullb devices.
51 *
52 * @page: The page holding the data.
53 * @bitmap: The bitmap represents which sector in the page has data.
54 * Each bit represents one block size. For example, sector 8
55 * will use the 7th bit
Shaohua Lideb78b42017-08-14 15:04:59 -070056 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
57 * page is being flushing to storage. FREE means the cache page is freed and
58 * should be skipped from flushing to storage. Please see
59 * null_make_cache_space
Shaohua Li5bcd0e02017-08-14 15:04:56 -070060 */
61struct nullb_page {
62 struct page *page;
Ming Lei66231ad2018-03-06 12:07:13 +080063 DECLARE_BITMAP(bitmap, MAP_SZ);
Shaohua Li5bcd0e02017-08-14 15:04:56 -070064};
Ming Lei66231ad2018-03-06 12:07:13 +080065#define NULLB_PAGE_LOCK (MAP_SZ - 1)
66#define NULLB_PAGE_FREE (MAP_SZ - 2)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070067
Jens Axboef2298c02013-10-25 11:52:25 +010068static LIST_HEAD(nullb_list);
69static struct mutex lock;
70static int null_major;
Shaohua Li94bc02e2017-08-14 15:04:55 -070071static DEFINE_IDA(nullb_indexes);
Jens Axboe82f402f2017-06-20 14:22:01 -060072static struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +010073
Jens Axboef2298c02013-10-25 11:52:25 +010074enum {
75 NULL_IRQ_NONE = 0,
76 NULL_IRQ_SOFTIRQ = 1,
77 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -080078};
Jens Axboef2298c02013-10-25 11:52:25 +010079
Christoph Hellwigce2c3502014-02-10 03:24:40 -080080enum {
Jens Axboef2298c02013-10-25 11:52:25 +010081 NULL_Q_BIO = 0,
82 NULL_Q_RQ = 1,
83 NULL_Q_MQ = 2,
84};
85
weiping zhangb3cffc32017-09-30 09:49:21 +080086static int g_no_sched;
Joe Perches5657a812018-05-24 13:38:59 -060087module_param_named(no_sched, g_no_sched, int, 0444);
weiping zhangb3cffc32017-09-30 09:49:21 +080088MODULE_PARM_DESC(no_sched, "No io scheduler");
89
Shaohua Li2984c862017-08-14 15:04:52 -070090static int g_submit_queues = 1;
Joe Perches5657a812018-05-24 13:38:59 -060091module_param_named(submit_queues, g_submit_queues, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +010092MODULE_PARM_DESC(submit_queues, "Number of submission queues");
93
Shaohua Li2984c862017-08-14 15:04:52 -070094static int g_home_node = NUMA_NO_NODE;
Joe Perches5657a812018-05-24 13:38:59 -060095module_param_named(home_node, g_home_node, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +010096MODULE_PARM_DESC(home_node, "Home node for the device");
97
Arnd Bergmann33f782c2018-01-11 11:31:25 +010098#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -070099static char g_timeout_str[80];
Joe Perches5657a812018-05-24 13:38:59 -0600100module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
Jens Axboe24941b92018-02-28 09:18:57 -0700101
102static char g_requeue_str[80];
Joe Perches5657a812018-05-24 13:38:59 -0600103module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
Arnd Bergmann33f782c2018-01-11 11:31:25 +0100104#endif
Jens Axboe93b57042018-01-10 09:06:23 -0700105
Shaohua Li2984c862017-08-14 15:04:52 -0700106static int g_queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700107
108static int null_param_store_val(const char *str, int *val, int min, int max)
109{
110 int ret, new_val;
111
112 ret = kstrtoint(str, 10, &new_val);
113 if (ret)
114 return -EINVAL;
115
116 if (new_val < min || new_val > max)
117 return -EINVAL;
118
119 *val = new_val;
120 return 0;
121}
122
123static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
124{
Shaohua Li2984c862017-08-14 15:04:52 -0700125 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
Matias Bjorling709c8662014-11-26 14:45:48 -0700126}
127
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930128static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700129 .set = null_set_queue_mode,
130 .get = param_get_int,
131};
132
Joe Perches5657a812018-05-24 13:38:59 -0600133device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400134MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100135
Shaohua Li2984c862017-08-14 15:04:52 -0700136static int g_gb = 250;
Joe Perches5657a812018-05-24 13:38:59 -0600137module_param_named(gb, g_gb, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100138MODULE_PARM_DESC(gb, "Size in GB");
139
Shaohua Li2984c862017-08-14 15:04:52 -0700140static int g_bs = 512;
Joe Perches5657a812018-05-24 13:38:59 -0600141module_param_named(bs, g_bs, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100142MODULE_PARM_DESC(bs, "Block size (in bytes)");
143
André Almeidaf7c4ce82019-09-11 11:46:36 -0300144static unsigned int nr_devices = 1;
Joe Perches5657a812018-05-24 13:38:59 -0600145module_param(nr_devices, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100146MODULE_PARM_DESC(nr_devices, "Number of devices to register");
147
Shaohua Li2984c862017-08-14 15:04:52 -0700148static bool g_blocking;
Joe Perches5657a812018-05-24 13:38:59 -0600149module_param_named(blocking, g_blocking, bool, 0444);
Jens Axboedb5bcf82017-03-30 13:44:26 -0600150MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
151
Jens Axboe82f402f2017-06-20 14:22:01 -0600152static bool shared_tags;
Joe Perches5657a812018-05-24 13:38:59 -0600153module_param(shared_tags, bool, 0444);
Jens Axboe82f402f2017-06-20 14:22:01 -0600154MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
155
Shaohua Li2984c862017-08-14 15:04:52 -0700156static int g_irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700157
158static int null_set_irqmode(const char *str, const struct kernel_param *kp)
159{
Shaohua Li2984c862017-08-14 15:04:52 -0700160 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
Matias Bjorling709c8662014-11-26 14:45:48 -0700161 NULL_IRQ_TIMER);
162}
163
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930164static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700165 .set = null_set_irqmode,
166 .get = param_get_int,
167};
168
Joe Perches5657a812018-05-24 13:38:59 -0600169device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100170MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
171
Shaohua Li2984c862017-08-14 15:04:52 -0700172static unsigned long g_completion_nsec = 10000;
Joe Perches5657a812018-05-24 13:38:59 -0600173module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100174MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
175
Shaohua Li2984c862017-08-14 15:04:52 -0700176static int g_hw_queue_depth = 64;
Joe Perches5657a812018-05-24 13:38:59 -0600177module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100178MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
179
Shaohua Li2984c862017-08-14 15:04:52 -0700180static bool g_use_per_node_hctx;
Joe Perches5657a812018-05-24 13:38:59 -0600181module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
Matias Bjørling20005242013-12-21 00:11:00 +0100182MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100183
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200184static bool g_zoned;
185module_param_named(zoned, g_zoned, bool, S_IRUGO);
186MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
187
188static unsigned long g_zone_size = 256;
189module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
190MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
191
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900192static unsigned int g_zone_nr_conv;
193module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
194MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
195
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700196static struct nullb_device *null_alloc_dev(void);
197static void null_free_dev(struct nullb_device *dev);
Shaohua Licedcafa2017-08-14 15:04:54 -0700198static void null_del_dev(struct nullb *nullb);
199static int null_add_dev(struct nullb_device *dev);
Shaohua Lideb78b42017-08-14 15:04:59 -0700200static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700201
202static inline struct nullb_device *to_nullb_device(struct config_item *item)
203{
204 return item ? container_of(item, struct nullb_device, item) : NULL;
205}
206
207static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
208{
209 return snprintf(page, PAGE_SIZE, "%u\n", val);
210}
211
212static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
213 char *page)
214{
215 return snprintf(page, PAGE_SIZE, "%lu\n", val);
216}
217
218static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
219{
220 return snprintf(page, PAGE_SIZE, "%u\n", val);
221}
222
223static ssize_t nullb_device_uint_attr_store(unsigned int *val,
224 const char *page, size_t count)
225{
226 unsigned int tmp;
227 int result;
228
229 result = kstrtouint(page, 0, &tmp);
230 if (result)
231 return result;
232
233 *val = tmp;
234 return count;
235}
236
237static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
238 const char *page, size_t count)
239{
240 int result;
241 unsigned long tmp;
242
243 result = kstrtoul(page, 0, &tmp);
244 if (result)
245 return result;
246
247 *val = tmp;
248 return count;
249}
250
251static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
252 size_t count)
253{
254 bool tmp;
255 int result;
256
257 result = kstrtobool(page, &tmp);
258 if (result)
259 return result;
260
261 *val = tmp;
262 return count;
263}
264
265/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
266#define NULLB_DEVICE_ATTR(NAME, TYPE) \
267static ssize_t \
268nullb_device_##NAME##_show(struct config_item *item, char *page) \
269{ \
270 return nullb_device_##TYPE##_attr_show( \
271 to_nullb_device(item)->NAME, page); \
272} \
273static ssize_t \
274nullb_device_##NAME##_store(struct config_item *item, const char *page, \
275 size_t count) \
276{ \
277 if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
278 return -EBUSY; \
279 return nullb_device_##TYPE##_attr_store( \
280 &to_nullb_device(item)->NAME, page, count); \
281} \
282CONFIGFS_ATTR(nullb_device_, NAME);
283
284NULLB_DEVICE_ATTR(size, ulong);
285NULLB_DEVICE_ATTR(completion_nsec, ulong);
286NULLB_DEVICE_ATTR(submit_queues, uint);
287NULLB_DEVICE_ATTR(home_node, uint);
288NULLB_DEVICE_ATTR(queue_mode, uint);
289NULLB_DEVICE_ATTR(blocksize, uint);
290NULLB_DEVICE_ATTR(irqmode, uint);
291NULLB_DEVICE_ATTR(hw_queue_depth, uint);
Shaohua Licedcafa2017-08-14 15:04:54 -0700292NULLB_DEVICE_ATTR(index, uint);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700293NULLB_DEVICE_ATTR(blocking, bool);
294NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700295NULLB_DEVICE_ATTR(memory_backed, bool);
Shaohua Li306eb6b2017-08-14 15:04:57 -0700296NULLB_DEVICE_ATTR(discard, bool);
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700297NULLB_DEVICE_ATTR(mbps, uint);
Shaohua Lideb78b42017-08-14 15:04:59 -0700298NULLB_DEVICE_ATTR(cache_size, ulong);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200299NULLB_DEVICE_ATTR(zoned, bool);
300NULLB_DEVICE_ATTR(zone_size, ulong);
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900301NULLB_DEVICE_ATTR(zone_nr_conv, uint);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700302
Shaohua Licedcafa2017-08-14 15:04:54 -0700303static ssize_t nullb_device_power_show(struct config_item *item, char *page)
304{
305 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
306}
307
308static ssize_t nullb_device_power_store(struct config_item *item,
309 const char *page, size_t count)
310{
311 struct nullb_device *dev = to_nullb_device(item);
312 bool newp = false;
313 ssize_t ret;
314
315 ret = nullb_device_bool_attr_store(&newp, page, count);
316 if (ret < 0)
317 return ret;
318
319 if (!dev->power && newp) {
320 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
321 return count;
322 if (null_add_dev(dev)) {
323 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
324 return -ENOMEM;
325 }
326
327 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
328 dev->power = newp;
Jens Axboeb3c30512017-08-28 15:06:31 -0600329 } else if (dev->power && !newp) {
Bob Liu76028432019-06-15 01:43:48 -0600330 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
331 mutex_lock(&lock);
332 dev->power = newp;
333 null_del_dev(dev->nullb);
334 mutex_unlock(&lock);
335 }
Liu Bo00a8cdb2018-07-06 03:07:13 +0800336 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
Shaohua Licedcafa2017-08-14 15:04:54 -0700337 }
338
339 return count;
340}
341
342CONFIGFS_ATTR(nullb_device_, power);
343
Shaohua Li2f54a612017-08-14 15:05:00 -0700344static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
345{
346 struct nullb_device *t_dev = to_nullb_device(item);
347
348 return badblocks_show(&t_dev->badblocks, page, 0);
349}
350
351static ssize_t nullb_device_badblocks_store(struct config_item *item,
352 const char *page, size_t count)
353{
354 struct nullb_device *t_dev = to_nullb_device(item);
355 char *orig, *buf, *tmp;
356 u64 start, end;
357 int ret;
358
359 orig = kstrndup(page, count, GFP_KERNEL);
360 if (!orig)
361 return -ENOMEM;
362
363 buf = strstrip(orig);
364
365 ret = -EINVAL;
366 if (buf[0] != '+' && buf[0] != '-')
367 goto out;
368 tmp = strchr(&buf[1], '-');
369 if (!tmp)
370 goto out;
371 *tmp = '\0';
372 ret = kstrtoull(buf + 1, 0, &start);
373 if (ret)
374 goto out;
375 ret = kstrtoull(tmp + 1, 0, &end);
376 if (ret)
377 goto out;
378 ret = -EINVAL;
379 if (start > end)
380 goto out;
381 /* enable badblocks */
382 cmpxchg(&t_dev->badblocks.shift, -1, 0);
383 if (buf[0] == '+')
384 ret = badblocks_set(&t_dev->badblocks, start,
385 end - start + 1, 1);
386 else
387 ret = badblocks_clear(&t_dev->badblocks, start,
388 end - start + 1);
389 if (ret == 0)
390 ret = count;
391out:
392 kfree(orig);
393 return ret;
394}
395CONFIGFS_ATTR(nullb_device_, badblocks);
396
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700397static struct configfs_attribute *nullb_device_attrs[] = {
398 &nullb_device_attr_size,
399 &nullb_device_attr_completion_nsec,
400 &nullb_device_attr_submit_queues,
401 &nullb_device_attr_home_node,
402 &nullb_device_attr_queue_mode,
403 &nullb_device_attr_blocksize,
404 &nullb_device_attr_irqmode,
405 &nullb_device_attr_hw_queue_depth,
Shaohua Licedcafa2017-08-14 15:04:54 -0700406 &nullb_device_attr_index,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700407 &nullb_device_attr_blocking,
408 &nullb_device_attr_use_per_node_hctx,
Shaohua Licedcafa2017-08-14 15:04:54 -0700409 &nullb_device_attr_power,
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700410 &nullb_device_attr_memory_backed,
Shaohua Li306eb6b2017-08-14 15:04:57 -0700411 &nullb_device_attr_discard,
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700412 &nullb_device_attr_mbps,
Shaohua Lideb78b42017-08-14 15:04:59 -0700413 &nullb_device_attr_cache_size,
Shaohua Li2f54a612017-08-14 15:05:00 -0700414 &nullb_device_attr_badblocks,
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200415 &nullb_device_attr_zoned,
416 &nullb_device_attr_zone_size,
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900417 &nullb_device_attr_zone_nr_conv,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700418 NULL,
419};
420
421static void nullb_device_release(struct config_item *item)
422{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700423 struct nullb_device *dev = to_nullb_device(item);
424
Shaohua Lideb78b42017-08-14 15:04:59 -0700425 null_free_device_storage(dev, false);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700426 null_free_dev(dev);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700427}
428
429static struct configfs_item_operations nullb_device_ops = {
430 .release = nullb_device_release,
431};
432
Bhumika Goyale1919df2017-10-16 17:18:49 +0200433static const struct config_item_type nullb_device_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700434 .ct_item_ops = &nullb_device_ops,
435 .ct_attrs = nullb_device_attrs,
436 .ct_owner = THIS_MODULE,
437};
438
439static struct
440config_item *nullb_group_make_item(struct config_group *group, const char *name)
441{
442 struct nullb_device *dev;
443
444 dev = null_alloc_dev();
445 if (!dev)
446 return ERR_PTR(-ENOMEM);
447
448 config_item_init_type_name(&dev->item, name, &nullb_device_type);
449
450 return &dev->item;
451}
452
453static void
454nullb_group_drop_item(struct config_group *group, struct config_item *item)
455{
Shaohua Licedcafa2017-08-14 15:04:54 -0700456 struct nullb_device *dev = to_nullb_device(item);
457
458 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
459 mutex_lock(&lock);
460 dev->power = false;
461 null_del_dev(dev->nullb);
462 mutex_unlock(&lock);
463 }
464
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700465 config_item_put(item);
466}
467
468static ssize_t memb_group_features_show(struct config_item *item, char *page)
469{
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200470 return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700471}
472
473CONFIGFS_ATTR_RO(memb_group_, features);
474
475static struct configfs_attribute *nullb_group_attrs[] = {
476 &memb_group_attr_features,
477 NULL,
478};
479
480static struct configfs_group_operations nullb_group_ops = {
481 .make_item = nullb_group_make_item,
482 .drop_item = nullb_group_drop_item,
483};
484
Bhumika Goyale1919df2017-10-16 17:18:49 +0200485static const struct config_item_type nullb_group_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700486 .ct_group_ops = &nullb_group_ops,
487 .ct_attrs = nullb_group_attrs,
488 .ct_owner = THIS_MODULE,
489};
490
491static struct configfs_subsystem nullb_subsys = {
492 .su_group = {
493 .cg_item = {
494 .ci_namebuf = "nullb",
495 .ci_type = &nullb_group_type,
496 },
497 },
498};
499
Shaohua Lideb78b42017-08-14 15:04:59 -0700500static inline int null_cache_active(struct nullb *nullb)
501{
502 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
503}
504
Shaohua Li2984c862017-08-14 15:04:52 -0700505static struct nullb_device *null_alloc_dev(void)
506{
507 struct nullb_device *dev;
508
509 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
510 if (!dev)
511 return NULL;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700512 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
Shaohua Lideb78b42017-08-14 15:04:59 -0700513 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
Shaohua Li2f54a612017-08-14 15:05:00 -0700514 if (badblocks_init(&dev->badblocks, 0)) {
515 kfree(dev);
516 return NULL;
517 }
518
Shaohua Li2984c862017-08-14 15:04:52 -0700519 dev->size = g_gb * 1024;
520 dev->completion_nsec = g_completion_nsec;
521 dev->submit_queues = g_submit_queues;
522 dev->home_node = g_home_node;
523 dev->queue_mode = g_queue_mode;
524 dev->blocksize = g_bs;
525 dev->irqmode = g_irqmode;
526 dev->hw_queue_depth = g_hw_queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -0700527 dev->blocking = g_blocking;
528 dev->use_per_node_hctx = g_use_per_node_hctx;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200529 dev->zoned = g_zoned;
530 dev->zone_size = g_zone_size;
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900531 dev->zone_nr_conv = g_zone_nr_conv;
Shaohua Li2984c862017-08-14 15:04:52 -0700532 return dev;
533}
534
535static void null_free_dev(struct nullb_device *dev)
536{
David Disseldorp1addb792017-11-08 17:29:44 +0100537 if (!dev)
538 return;
539
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200540 null_zone_exit(dev);
David Disseldorp1addb792017-11-08 17:29:44 +0100541 badblocks_exit(&dev->badblocks);
Shaohua Li2984c862017-08-14 15:04:52 -0700542 kfree(dev);
543}
544
Jens Axboef2298c02013-10-25 11:52:25 +0100545static void put_tag(struct nullb_queue *nq, unsigned int tag)
546{
547 clear_bit_unlock(tag, nq->tag_map);
548
549 if (waitqueue_active(&nq->wait))
550 wake_up(&nq->wait);
551}
552
553static unsigned int get_tag(struct nullb_queue *nq)
554{
555 unsigned int tag;
556
557 do {
558 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
559 if (tag >= nq->queue_depth)
560 return -1U;
561 } while (test_and_set_bit_lock(tag, nq->tag_map));
562
563 return tag;
564}
565
566static void free_cmd(struct nullb_cmd *cmd)
567{
568 put_tag(cmd->nq, cmd->tag);
569}
570
Paolo Valente3c395a92015-12-01 11:48:17 +0100571static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
572
Jens Axboef2298c02013-10-25 11:52:25 +0100573static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
574{
575 struct nullb_cmd *cmd;
576 unsigned int tag;
577
578 tag = get_tag(nq);
579 if (tag != -1U) {
580 cmd = &nq->cmds[tag];
581 cmd->tag = tag;
582 cmd->nq = nq;
Shaohua Li2984c862017-08-14 15:04:52 -0700583 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +0100584 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
585 HRTIMER_MODE_REL);
586 cmd->timer.function = null_cmd_timer_expired;
587 }
Jens Axboef2298c02013-10-25 11:52:25 +0100588 return cmd;
589 }
590
591 return NULL;
592}
593
594static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
595{
596 struct nullb_cmd *cmd;
597 DEFINE_WAIT(wait);
598
599 cmd = __alloc_cmd(nq);
600 if (cmd || !can_wait)
601 return cmd;
602
603 do {
604 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
605 cmd = __alloc_cmd(nq);
606 if (cmd)
607 break;
608
609 io_schedule();
610 } while (1);
611
612 finish_wait(&nq->wait, &wait);
613 return cmd;
614}
615
616static void end_cmd(struct nullb_cmd *cmd)
617{
Shaohua Li2984c862017-08-14 15:04:52 -0700618 int queue_mode = cmd->nq->dev->queue_mode;
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100619
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800620 switch (queue_mode) {
621 case NULL_Q_MQ:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700622 blk_mq_end_request(cmd->rq, cmd->error);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800623 return;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800624 case NULL_Q_BIO:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700625 cmd->bio->bi_status = cmd->error;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200626 bio_endio(cmd->bio);
Jens Axboe48cc6612015-12-28 13:02:47 -0700627 break;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800628 }
Jens Axboef2298c02013-10-25 11:52:25 +0100629
Jens Axboe48cc6612015-12-28 13:02:47 -0700630 free_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +0100631}
632
633static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
634{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100635 end_cmd(container_of(timer, struct nullb_cmd, timer));
Jens Axboef2298c02013-10-25 11:52:25 +0100636
637 return HRTIMER_NORESTART;
638}
639
640static void null_cmd_end_timer(struct nullb_cmd *cmd)
641{
Shaohua Li2984c862017-08-14 15:04:52 -0700642 ktime_t kt = cmd->nq->dev->completion_nsec;
Jens Axboef2298c02013-10-25 11:52:25 +0100643
Paolo Valente3c395a92015-12-01 11:48:17 +0100644 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +0100645}
646
Christoph Hellwig49f66132018-11-10 09:30:45 +0100647static void null_complete_rq(struct request *rq)
Jens Axboef2298c02013-10-25 11:52:25 +0100648{
Christoph Hellwig49f66132018-11-10 09:30:45 +0100649 end_cmd(blk_mq_rq_to_pdu(rq));
Jens Axboef2298c02013-10-25 11:52:25 +0100650}
651
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700652static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
Jens Axboef2298c02013-10-25 11:52:25 +0100653{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700654 struct nullb_page *t_page;
655
656 t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
657 if (!t_page)
658 goto out;
659
660 t_page->page = alloc_pages(gfp_flags, 0);
661 if (!t_page->page)
662 goto out_freepage;
663
Ming Lei66231ad2018-03-06 12:07:13 +0800664 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700665 return t_page;
666out_freepage:
667 kfree(t_page);
668out:
669 return NULL;
670}
671
672static void null_free_page(struct nullb_page *t_page)
673{
Ming Lei66231ad2018-03-06 12:07:13 +0800674 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
675 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
Shaohua Lideb78b42017-08-14 15:04:59 -0700676 return;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700677 __free_page(t_page->page);
678 kfree(t_page);
679}
680
Ming Lei66231ad2018-03-06 12:07:13 +0800681static bool null_page_empty(struct nullb_page *page)
682{
683 int size = MAP_SZ - 2;
684
685 return find_first_bit(page->bitmap, size) == size;
686}
687
Shaohua Lideb78b42017-08-14 15:04:59 -0700688static void null_free_sector(struct nullb *nullb, sector_t sector,
689 bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700690{
691 unsigned int sector_bit;
692 u64 idx;
693 struct nullb_page *t_page, *ret;
694 struct radix_tree_root *root;
695
Shaohua Lideb78b42017-08-14 15:04:59 -0700696 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700697 idx = sector >> PAGE_SECTORS_SHIFT;
698 sector_bit = (sector & SECTOR_MASK);
699
700 t_page = radix_tree_lookup(root, idx);
701 if (t_page) {
Ming Lei66231ad2018-03-06 12:07:13 +0800702 __clear_bit(sector_bit, t_page->bitmap);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700703
Ming Lei66231ad2018-03-06 12:07:13 +0800704 if (null_page_empty(t_page)) {
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700705 ret = radix_tree_delete_item(root, idx, t_page);
706 WARN_ON(ret != t_page);
707 null_free_page(ret);
Shaohua Lideb78b42017-08-14 15:04:59 -0700708 if (is_cache)
709 nullb->dev->curr_cache -= PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700710 }
711 }
712}
713
714static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
Shaohua Lideb78b42017-08-14 15:04:59 -0700715 struct nullb_page *t_page, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700716{
717 struct radix_tree_root *root;
718
Shaohua Lideb78b42017-08-14 15:04:59 -0700719 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700720
721 if (radix_tree_insert(root, idx, t_page)) {
722 null_free_page(t_page);
723 t_page = radix_tree_lookup(root, idx);
724 WARN_ON(!t_page || t_page->page->index != idx);
Shaohua Lideb78b42017-08-14 15:04:59 -0700725 } else if (is_cache)
726 nullb->dev->curr_cache += PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700727
728 return t_page;
729}
730
Shaohua Lideb78b42017-08-14 15:04:59 -0700731static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700732{
733 unsigned long pos = 0;
734 int nr_pages;
735 struct nullb_page *ret, *t_pages[FREE_BATCH];
736 struct radix_tree_root *root;
737
Shaohua Lideb78b42017-08-14 15:04:59 -0700738 root = is_cache ? &dev->cache : &dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700739
740 do {
741 int i;
742
743 nr_pages = radix_tree_gang_lookup(root,
744 (void **)t_pages, pos, FREE_BATCH);
745
746 for (i = 0; i < nr_pages; i++) {
747 pos = t_pages[i]->page->index;
748 ret = radix_tree_delete_item(root, pos, t_pages[i]);
749 WARN_ON(ret != t_pages[i]);
750 null_free_page(ret);
751 }
752
753 pos++;
754 } while (nr_pages == FREE_BATCH);
Shaohua Lideb78b42017-08-14 15:04:59 -0700755
756 if (is_cache)
757 dev->curr_cache = 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700758}
759
Shaohua Lideb78b42017-08-14 15:04:59 -0700760static struct nullb_page *__null_lookup_page(struct nullb *nullb,
761 sector_t sector, bool for_write, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700762{
763 unsigned int sector_bit;
764 u64 idx;
765 struct nullb_page *t_page;
Shaohua Lideb78b42017-08-14 15:04:59 -0700766 struct radix_tree_root *root;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700767
768 idx = sector >> PAGE_SECTORS_SHIFT;
769 sector_bit = (sector & SECTOR_MASK);
770
Shaohua Lideb78b42017-08-14 15:04:59 -0700771 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
772 t_page = radix_tree_lookup(root, idx);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700773 WARN_ON(t_page && t_page->page->index != idx);
774
Ming Lei66231ad2018-03-06 12:07:13 +0800775 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700776 return t_page;
777
778 return NULL;
779}
780
Shaohua Lideb78b42017-08-14 15:04:59 -0700781static struct nullb_page *null_lookup_page(struct nullb *nullb,
782 sector_t sector, bool for_write, bool ignore_cache)
783{
784 struct nullb_page *page = NULL;
785
786 if (!ignore_cache)
787 page = __null_lookup_page(nullb, sector, for_write, true);
788 if (page)
789 return page;
790 return __null_lookup_page(nullb, sector, for_write, false);
791}
792
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700793static struct nullb_page *null_insert_page(struct nullb *nullb,
Jens Axboe61884de2018-08-09 14:22:41 -0600794 sector_t sector, bool ignore_cache)
795 __releases(&nullb->lock)
796 __acquires(&nullb->lock)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700797{
798 u64 idx;
799 struct nullb_page *t_page;
800
Shaohua Lideb78b42017-08-14 15:04:59 -0700801 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700802 if (t_page)
803 return t_page;
804
805 spin_unlock_irq(&nullb->lock);
806
807 t_page = null_alloc_page(GFP_NOIO);
808 if (!t_page)
809 goto out_lock;
810
811 if (radix_tree_preload(GFP_NOIO))
812 goto out_freepage;
813
814 spin_lock_irq(&nullb->lock);
815 idx = sector >> PAGE_SECTORS_SHIFT;
816 t_page->page->index = idx;
Shaohua Lideb78b42017-08-14 15:04:59 -0700817 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700818 radix_tree_preload_end();
819
820 return t_page;
821out_freepage:
822 null_free_page(t_page);
823out_lock:
824 spin_lock_irq(&nullb->lock);
Shaohua Lideb78b42017-08-14 15:04:59 -0700825 return null_lookup_page(nullb, sector, true, ignore_cache);
826}
827
828static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
829{
830 int i;
831 unsigned int offset;
832 u64 idx;
833 struct nullb_page *t_page, *ret;
834 void *dst, *src;
835
836 idx = c_page->page->index;
837
838 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
839
Ming Lei66231ad2018-03-06 12:07:13 +0800840 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
841 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
Shaohua Lideb78b42017-08-14 15:04:59 -0700842 null_free_page(c_page);
Ming Lei66231ad2018-03-06 12:07:13 +0800843 if (t_page && null_page_empty(t_page)) {
Shaohua Lideb78b42017-08-14 15:04:59 -0700844 ret = radix_tree_delete_item(&nullb->dev->data,
845 idx, t_page);
846 null_free_page(t_page);
847 }
848 return 0;
849 }
850
851 if (!t_page)
852 return -ENOMEM;
853
854 src = kmap_atomic(c_page->page);
855 dst = kmap_atomic(t_page->page);
856
857 for (i = 0; i < PAGE_SECTORS;
858 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
Ming Lei66231ad2018-03-06 12:07:13 +0800859 if (test_bit(i, c_page->bitmap)) {
Shaohua Lideb78b42017-08-14 15:04:59 -0700860 offset = (i << SECTOR_SHIFT);
861 memcpy(dst + offset, src + offset,
862 nullb->dev->blocksize);
Ming Lei66231ad2018-03-06 12:07:13 +0800863 __set_bit(i, t_page->bitmap);
Shaohua Lideb78b42017-08-14 15:04:59 -0700864 }
865 }
866
867 kunmap_atomic(dst);
868 kunmap_atomic(src);
869
870 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
871 null_free_page(ret);
872 nullb->dev->curr_cache -= PAGE_SIZE;
873
874 return 0;
875}
876
877static int null_make_cache_space(struct nullb *nullb, unsigned long n)
878{
879 int i, err, nr_pages;
880 struct nullb_page *c_pages[FREE_BATCH];
881 unsigned long flushed = 0, one_round;
882
883again:
884 if ((nullb->dev->cache_size * 1024 * 1024) >
885 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
886 return 0;
887
888 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
889 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
890 /*
891 * nullb_flush_cache_page could unlock before using the c_pages. To
892 * avoid race, we don't allow page free
893 */
894 for (i = 0; i < nr_pages; i++) {
895 nullb->cache_flush_pos = c_pages[i]->page->index;
896 /*
897 * We found the page which is being flushed to disk by other
898 * threads
899 */
Ming Lei66231ad2018-03-06 12:07:13 +0800900 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
Shaohua Lideb78b42017-08-14 15:04:59 -0700901 c_pages[i] = NULL;
902 else
Ming Lei66231ad2018-03-06 12:07:13 +0800903 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
Shaohua Lideb78b42017-08-14 15:04:59 -0700904 }
905
906 one_round = 0;
907 for (i = 0; i < nr_pages; i++) {
908 if (c_pages[i] == NULL)
909 continue;
910 err = null_flush_cache_page(nullb, c_pages[i]);
911 if (err)
912 return err;
913 one_round++;
914 }
915 flushed += one_round << PAGE_SHIFT;
916
917 if (n > flushed) {
918 if (nr_pages == 0)
919 nullb->cache_flush_pos = 0;
920 if (one_round == 0) {
921 /* give other threads a chance */
922 spin_unlock_irq(&nullb->lock);
923 spin_lock_irq(&nullb->lock);
924 }
925 goto again;
926 }
927 return 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700928}
929
930static int copy_to_nullb(struct nullb *nullb, struct page *source,
Shaohua Lideb78b42017-08-14 15:04:59 -0700931 unsigned int off, sector_t sector, size_t n, bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700932{
933 size_t temp, count = 0;
934 unsigned int offset;
935 struct nullb_page *t_page;
936 void *dst, *src;
937
938 while (count < n) {
939 temp = min_t(size_t, nullb->dev->blocksize, n - count);
940
Shaohua Lideb78b42017-08-14 15:04:59 -0700941 if (null_cache_active(nullb) && !is_fua)
942 null_make_cache_space(nullb, PAGE_SIZE);
943
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700944 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -0700945 t_page = null_insert_page(nullb, sector,
946 !null_cache_active(nullb) || is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700947 if (!t_page)
948 return -ENOSPC;
949
950 src = kmap_atomic(source);
951 dst = kmap_atomic(t_page->page);
952 memcpy(dst + offset, src + off + count, temp);
953 kunmap_atomic(dst);
954 kunmap_atomic(src);
955
Ming Lei66231ad2018-03-06 12:07:13 +0800956 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700957
Shaohua Lideb78b42017-08-14 15:04:59 -0700958 if (is_fua)
959 null_free_sector(nullb, sector, true);
960
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700961 count += temp;
962 sector += temp >> SECTOR_SHIFT;
963 }
964 return 0;
965}
966
967static int copy_from_nullb(struct nullb *nullb, struct page *dest,
968 unsigned int off, sector_t sector, size_t n)
969{
970 size_t temp, count = 0;
971 unsigned int offset;
972 struct nullb_page *t_page;
973 void *dst, *src;
974
975 while (count < n) {
976 temp = min_t(size_t, nullb->dev->blocksize, n - count);
977
978 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -0700979 t_page = null_lookup_page(nullb, sector, false,
980 !null_cache_active(nullb));
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700981
982 dst = kmap_atomic(dest);
983 if (!t_page) {
984 memset(dst + off + count, 0, temp);
985 goto next;
986 }
987 src = kmap_atomic(t_page->page);
988 memcpy(dst + off + count, src + offset, temp);
989 kunmap_atomic(src);
990next:
991 kunmap_atomic(dst);
992
993 count += temp;
994 sector += temp >> SECTOR_SHIFT;
995 }
996 return 0;
997}
998
Shaohua Li306eb6b2017-08-14 15:04:57 -0700999static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
1000{
1001 size_t temp;
1002
1003 spin_lock_irq(&nullb->lock);
1004 while (n > 0) {
1005 temp = min_t(size_t, n, nullb->dev->blocksize);
Shaohua Lideb78b42017-08-14 15:04:59 -07001006 null_free_sector(nullb, sector, false);
1007 if (null_cache_active(nullb))
1008 null_free_sector(nullb, sector, true);
Shaohua Li306eb6b2017-08-14 15:04:57 -07001009 sector += temp >> SECTOR_SHIFT;
1010 n -= temp;
1011 }
1012 spin_unlock_irq(&nullb->lock);
1013}
1014
Shaohua Lideb78b42017-08-14 15:04:59 -07001015static int null_handle_flush(struct nullb *nullb)
1016{
1017 int err;
1018
1019 if (!null_cache_active(nullb))
1020 return 0;
1021
1022 spin_lock_irq(&nullb->lock);
1023 while (true) {
1024 err = null_make_cache_space(nullb,
1025 nullb->dev->cache_size * 1024 * 1024);
1026 if (err || nullb->dev->curr_cache == 0)
1027 break;
1028 }
1029
1030 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1031 spin_unlock_irq(&nullb->lock);
1032 return err;
1033}
1034
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001035static int null_transfer(struct nullb *nullb, struct page *page,
Shaohua Lideb78b42017-08-14 15:04:59 -07001036 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1037 bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001038{
1039 int err = 0;
1040
1041 if (!is_write) {
1042 err = copy_from_nullb(nullb, page, off, sector, len);
1043 flush_dcache_page(page);
1044 } else {
1045 flush_dcache_page(page);
Shaohua Lideb78b42017-08-14 15:04:59 -07001046 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001047 }
1048
1049 return err;
1050}
1051
1052static int null_handle_rq(struct nullb_cmd *cmd)
1053{
1054 struct request *rq = cmd->rq;
1055 struct nullb *nullb = cmd->nq->dev->nullb;
1056 int err;
1057 unsigned int len;
1058 sector_t sector;
1059 struct req_iterator iter;
1060 struct bio_vec bvec;
1061
1062 sector = blk_rq_pos(rq);
1063
Shaohua Li306eb6b2017-08-14 15:04:57 -07001064 if (req_op(rq) == REQ_OP_DISCARD) {
1065 null_handle_discard(nullb, sector, blk_rq_bytes(rq));
1066 return 0;
1067 }
1068
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001069 spin_lock_irq(&nullb->lock);
1070 rq_for_each_segment(bvec, rq, iter) {
1071 len = bvec.bv_len;
1072 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001073 op_is_write(req_op(rq)), sector,
1074 req_op(rq) & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001075 if (err) {
1076 spin_unlock_irq(&nullb->lock);
1077 return err;
1078 }
1079 sector += len >> SECTOR_SHIFT;
1080 }
1081 spin_unlock_irq(&nullb->lock);
1082
1083 return 0;
1084}
1085
1086static int null_handle_bio(struct nullb_cmd *cmd)
1087{
1088 struct bio *bio = cmd->bio;
1089 struct nullb *nullb = cmd->nq->dev->nullb;
1090 int err;
1091 unsigned int len;
1092 sector_t sector;
1093 struct bio_vec bvec;
1094 struct bvec_iter iter;
1095
1096 sector = bio->bi_iter.bi_sector;
1097
Shaohua Li306eb6b2017-08-14 15:04:57 -07001098 if (bio_op(bio) == REQ_OP_DISCARD) {
1099 null_handle_discard(nullb, sector,
1100 bio_sectors(bio) << SECTOR_SHIFT);
1101 return 0;
1102 }
1103
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001104 spin_lock_irq(&nullb->lock);
1105 bio_for_each_segment(bvec, bio, iter) {
1106 len = bvec.bv_len;
1107 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001108 op_is_write(bio_op(bio)), sector,
Heinz Mauelshagenbf7c7a02019-02-22 20:00:01 +01001109 bio->bi_opf & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001110 if (err) {
1111 spin_unlock_irq(&nullb->lock);
1112 return err;
1113 }
1114 sector += len >> SECTOR_SHIFT;
1115 }
1116 spin_unlock_irq(&nullb->lock);
1117 return 0;
1118}
1119
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001120static void null_stop_queue(struct nullb *nullb)
1121{
1122 struct request_queue *q = nullb->q;
1123
1124 if (nullb->dev->queue_mode == NULL_Q_MQ)
1125 blk_mq_stop_hw_queues(q);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001126}
1127
1128static void null_restart_queue_async(struct nullb *nullb)
1129{
1130 struct request_queue *q = nullb->q;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001131
1132 if (nullb->dev->queue_mode == NULL_Q_MQ)
1133 blk_mq_start_stopped_hw_queues(q, true);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001134}
1135
Chaitanya Kulkarniadb84282019-08-22 21:45:15 -07001136static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
1137{
1138 struct nullb_device *dev = cmd->nq->dev;
1139 struct nullb *nullb = dev->nullb;
1140 blk_status_t sts = BLK_STS_OK;
1141 struct request *rq = cmd->rq;
1142
1143 if (!hrtimer_active(&nullb->bw_timer))
1144 hrtimer_restart(&nullb->bw_timer);
1145
1146 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
1147 null_stop_queue(nullb);
1148 /* race with timer */
1149 if (atomic_long_read(&nullb->cur_bytes) > 0)
1150 null_restart_queue_async(nullb);
1151 /* requeue request */
1152 sts = BLK_STS_DEV_RESOURCE;
1153 }
1154 return sts;
1155}
1156
Chaitanya Kulkarni8f94d1c2019-08-22 21:45:16 -07001157static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
1158 sector_t sector,
1159 sector_t nr_sectors)
1160{
1161 struct badblocks *bb = &cmd->nq->dev->badblocks;
1162 sector_t first_bad;
1163 int bad_sectors;
1164
1165 if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
1166 return BLK_STS_IOERR;
1167
1168 return BLK_STS_OK;
1169}
1170
Chaitanya Kulkarni7ea88e22019-08-22 21:45:17 -07001171static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
1172 enum req_opf op)
1173{
1174 struct nullb_device *dev = cmd->nq->dev;
1175 int err;
1176
1177 if (dev->queue_mode == NULL_Q_BIO)
1178 err = null_handle_bio(cmd);
1179 else
1180 err = null_handle_rq(cmd);
1181
1182 return errno_to_blk_status(err);
1183}
1184
Chaitanya Kulkarnia3d7d672019-08-22 21:45:19 -07001185static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
1186{
1187 /* Complete IO by inline, softirq or timer */
1188 switch (cmd->nq->dev->irqmode) {
1189 case NULL_IRQ_SOFTIRQ:
1190 switch (cmd->nq->dev->queue_mode) {
1191 case NULL_Q_MQ:
1192 blk_mq_complete_request(cmd->rq);
1193 break;
1194 case NULL_Q_BIO:
1195 /*
1196 * XXX: no proper submitting cpu information available.
1197 */
1198 end_cmd(cmd);
1199 break;
1200 }
1201 break;
1202 case NULL_IRQ_NONE:
1203 end_cmd(cmd);
1204 break;
1205 case NULL_IRQ_TIMER:
1206 null_cmd_end_timer(cmd);
1207 break;
1208 }
1209}
1210
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001211static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
1212 sector_t nr_sectors, enum req_opf op)
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001213{
1214 struct nullb_device *dev = cmd->nq->dev;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001215 struct nullb *nullb = dev->nullb;
Chaitanya Kulkarniadb84282019-08-22 21:45:15 -07001216 blk_status_t sts;
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001217
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001218 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
Chaitanya Kulkarniadb84282019-08-22 21:45:15 -07001219 sts = null_handle_throttled(cmd);
1220 if (sts != BLK_STS_OK)
1221 return sts;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001222 }
1223
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001224 if (op == REQ_OP_FLUSH) {
1225 cmd->error = errno_to_blk_status(null_handle_flush(nullb));
1226 goto out;
1227 }
Shaohua Li2f54a612017-08-14 15:05:00 -07001228
Chaitanya Kulkarni8f94d1c2019-08-22 21:45:16 -07001229 if (nullb->dev->badblocks.shift != -1) {
1230 cmd->error = null_handle_badblocks(cmd, sector, nr_sectors);
1231 if (cmd->error != BLK_STS_OK)
Shaohua Li2f54a612017-08-14 15:05:00 -07001232 goto out;
Shaohua Li2f54a612017-08-14 15:05:00 -07001233 }
1234
Chaitanya Kulkarni7ea88e22019-08-22 21:45:17 -07001235 if (dev->memory_backed)
1236 cmd->error = null_handle_memory_backed(cmd, op);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001237
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -07001238 if (!cmd->error && dev->zoned)
1239 cmd->error = null_handle_zoned(cmd, op, sector, nr_sectors);
1240
Shaohua Li2f54a612017-08-14 15:05:00 -07001241out:
Chaitanya Kulkarnia3d7d672019-08-22 21:45:19 -07001242 nullb_complete_cmd(cmd);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001243 return BLK_STS_OK;
Jens Axboef2298c02013-10-25 11:52:25 +01001244}
1245
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001246static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1247{
1248 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1249 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1250 unsigned int mbps = nullb->dev->mbps;
1251
1252 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1253 return HRTIMER_NORESTART;
1254
1255 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1256 null_restart_queue_async(nullb);
1257
1258 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1259
1260 return HRTIMER_RESTART;
1261}
1262
1263static void nullb_setup_bwtimer(struct nullb *nullb)
1264{
1265 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1266
1267 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1268 nullb->bw_timer.function = nullb_bwtimer_fn;
1269 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1270 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +01001271}
1272
1273static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1274{
1275 int index = 0;
1276
1277 if (nullb->nr_queues != 1)
1278 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1279
1280 return &nullb->queues[index];
1281}
1282
Jens Axboedece1632015-11-05 10:41:16 -07001283static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +01001284{
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001285 sector_t sector = bio->bi_iter.bi_sector;
1286 sector_t nr_sectors = bio_sectors(bio);
Jens Axboef2298c02013-10-25 11:52:25 +01001287 struct nullb *nullb = q->queuedata;
1288 struct nullb_queue *nq = nullb_to_queue(nullb);
1289 struct nullb_cmd *cmd;
1290
1291 cmd = alloc_cmd(nq, 1);
1292 cmd->bio = bio;
1293
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001294 null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
Jens Axboedece1632015-11-05 10:41:16 -07001295 return BLK_QC_T_NONE;
Jens Axboef2298c02013-10-25 11:52:25 +01001296}
1297
Jens Axboe93b57042018-01-10 09:06:23 -07001298static bool should_timeout_request(struct request *rq)
1299{
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001300#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -07001301 if (g_timeout_str[0])
1302 return should_fail(&null_timeout_attr, 1);
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001303#endif
Jens Axboe24941b92018-02-28 09:18:57 -07001304 return false;
1305}
Jens Axboe93b57042018-01-10 09:06:23 -07001306
Jens Axboe24941b92018-02-28 09:18:57 -07001307static bool should_requeue_request(struct request *rq)
1308{
1309#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1310 if (g_requeue_str[0])
1311 return should_fail(&null_requeue_attr, 1);
1312#endif
Jens Axboe93b57042018-01-10 09:06:23 -07001313 return false;
1314}
1315
Jens Axboe5448aca2018-01-09 12:47:24 -07001316static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
1317{
André Almeida4e47ee82019-09-11 11:46:35 -03001318 pr_info("null_blk: rq %p timed out\n", rq);
Christoph Hellwig0df0bb02018-05-29 15:52:33 +02001319 blk_mq_complete_request(rq);
1320 return BLK_EH_DONE;
Jens Axboe5448aca2018-01-09 12:47:24 -07001321}
1322
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001323static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
Jens Axboe74c45052014-10-29 11:14:52 -06001324 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +01001325{
Jens Axboe74c45052014-10-29 11:14:52 -06001326 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Shaohua Li2984c862017-08-14 15:04:52 -07001327 struct nullb_queue *nq = hctx->driver_data;
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001328 sector_t nr_sectors = blk_rq_sectors(bd->rq);
1329 sector_t sector = blk_rq_pos(bd->rq);
Jens Axboef2298c02013-10-25 11:52:25 +01001330
Jens Axboedb5bcf82017-03-30 13:44:26 -06001331 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1332
Shaohua Li2984c862017-08-14 15:04:52 -07001333 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +01001334 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1335 cmd->timer.function = null_cmd_timer_expired;
1336 }
Jens Axboe74c45052014-10-29 11:14:52 -06001337 cmd->rq = bd->rq;
Shaohua Li2984c862017-08-14 15:04:52 -07001338 cmd->nq = nq;
Jens Axboef2298c02013-10-25 11:52:25 +01001339
Jens Axboe74c45052014-10-29 11:14:52 -06001340 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -07001341
Jens Axboe24941b92018-02-28 09:18:57 -07001342 if (should_requeue_request(bd->rq)) {
1343 /*
1344 * Alternate between hitting the core BUSY path, and the
1345 * driver driven requeue path
1346 */
1347 nq->requeue_selection++;
1348 if (nq->requeue_selection & 1)
1349 return BLK_STS_RESOURCE;
1350 else {
1351 blk_mq_requeue_request(bd->rq, true);
1352 return BLK_STS_OK;
1353 }
1354 }
1355 if (should_timeout_request(bd->rq))
1356 return BLK_STS_OK;
Jens Axboe93b57042018-01-10 09:06:23 -07001357
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001358 return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
Jens Axboef2298c02013-10-25 11:52:25 +01001359}
1360
Eric Biggersf363b082017-03-30 13:39:16 -07001361static const struct blk_mq_ops null_mq_ops = {
Jens Axboef2298c02013-10-25 11:52:25 +01001362 .queue_rq = null_queue_rq,
Christoph Hellwig49f66132018-11-10 09:30:45 +01001363 .complete = null_complete_rq,
Jens Axboe5448aca2018-01-09 12:47:24 -07001364 .timeout = null_timeout_rq,
Jens Axboef2298c02013-10-25 11:52:25 +01001365};
1366
Matias Bjørlingde65d2d2015-08-31 14:17:18 +02001367static void cleanup_queue(struct nullb_queue *nq)
1368{
1369 kfree(nq->tag_map);
1370 kfree(nq->cmds);
1371}
1372
1373static void cleanup_queues(struct nullb *nullb)
1374{
1375 int i;
1376
1377 for (i = 0; i < nullb->nr_queues; i++)
1378 cleanup_queue(&nullb->queues[i]);
1379
1380 kfree(nullb->queues);
1381}
1382
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001383static void null_del_dev(struct nullb *nullb)
1384{
Shaohua Li2984c862017-08-14 15:04:52 -07001385 struct nullb_device *dev = nullb->dev;
1386
Shaohua Li94bc02e2017-08-14 15:04:55 -07001387 ida_simple_remove(&nullb_indexes, nullb->index);
1388
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001389 list_del_init(&nullb->list);
1390
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001391 del_gendisk(nullb->disk);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001392
1393 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1394 hrtimer_cancel(&nullb->bw_timer);
1395 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1396 null_restart_queue_async(nullb);
1397 }
1398
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001399 blk_cleanup_queue(nullb->q);
Shaohua Li2984c862017-08-14 15:04:52 -07001400 if (dev->queue_mode == NULL_Q_MQ &&
1401 nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001402 blk_mq_free_tag_set(nullb->tag_set);
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001403 put_disk(nullb->disk);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001404 cleanup_queues(nullb);
Shaohua Lideb78b42017-08-14 15:04:59 -07001405 if (null_cache_active(nullb))
1406 null_free_device_storage(nullb->dev, true);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001407 kfree(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001408 dev->nullb = NULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001409}
1410
Shaohua Li306eb6b2017-08-14 15:04:57 -07001411static void null_config_discard(struct nullb *nullb)
1412{
1413 if (nullb->dev->discard == false)
1414 return;
1415 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1416 nullb->q->limits.discard_alignment = nullb->dev->blocksize;
1417 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
Bart Van Assche8b904b52018-03-07 17:10:10 -08001418 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001419}
1420
Jens Axboef2298c02013-10-25 11:52:25 +01001421static int null_open(struct block_device *bdev, fmode_t mode)
1422{
1423 return 0;
1424}
1425
1426static void null_release(struct gendisk *disk, fmode_t mode)
1427{
1428}
1429
1430static const struct block_device_operations null_fops = {
1431 .owner = THIS_MODULE,
1432 .open = null_open,
1433 .release = null_release,
Christoph Hellwige76239a2018-10-12 19:08:49 +09001434 .report_zones = null_zone_report,
Jens Axboef2298c02013-10-25 11:52:25 +01001435};
1436
Jens Axboe82f402f2017-06-20 14:22:01 -06001437static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1438{
1439 BUG_ON(!nullb);
1440 BUG_ON(!nq);
1441
1442 init_waitqueue_head(&nq->wait);
1443 nq->queue_depth = nullb->queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -07001444 nq->dev = nullb->dev;
Jens Axboe82f402f2017-06-20 14:22:01 -06001445}
1446
1447static void null_init_queues(struct nullb *nullb)
1448{
1449 struct request_queue *q = nullb->q;
1450 struct blk_mq_hw_ctx *hctx;
1451 struct nullb_queue *nq;
1452 int i;
1453
1454 queue_for_each_hw_ctx(q, hctx, i) {
1455 if (!hctx->nr_ctx || !hctx->tags)
1456 continue;
1457 nq = &nullb->queues[i];
1458 hctx->driver_data = nq;
1459 null_init_queue(nullb, nq);
1460 nullb->nr_queues++;
1461 }
1462}
1463
Jens Axboef2298c02013-10-25 11:52:25 +01001464static int setup_commands(struct nullb_queue *nq)
1465{
1466 struct nullb_cmd *cmd;
1467 int i, tag_size;
1468
Kees Cook6396bb22018-06-12 14:03:40 -07001469 nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001470 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001471 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001472
1473 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
Kees Cook6396bb22018-06-12 14:03:40 -07001474 nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001475 if (!nq->tag_map) {
1476 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +01001477 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001478 }
1479
1480 for (i = 0; i < nq->queue_depth; i++) {
1481 cmd = &nq->cmds[i];
1482 INIT_LIST_HEAD(&cmd->list);
1483 cmd->ll_list.next = NULL;
1484 cmd->tag = -1U;
1485 }
1486
1487 return 0;
1488}
1489
Jens Axboef2298c02013-10-25 11:52:25 +01001490static int setup_queues(struct nullb *nullb)
1491{
Kees Cook6396bb22018-06-12 14:03:40 -07001492 nullb->queues = kcalloc(nullb->dev->submit_queues,
1493 sizeof(struct nullb_queue),
1494 GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001495 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001496 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001497
Shaohua Li2984c862017-08-14 15:04:52 -07001498 nullb->queue_depth = nullb->dev->hw_queue_depth;
Jens Axboef2298c02013-10-25 11:52:25 +01001499
Matias Bjorling2d263a782013-12-18 13:41:43 +01001500 return 0;
1501}
1502
1503static int init_driver_queues(struct nullb *nullb)
1504{
1505 struct nullb_queue *nq;
1506 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001507
Shaohua Li2984c862017-08-14 15:04:52 -07001508 for (i = 0; i < nullb->dev->submit_queues; i++) {
Jens Axboef2298c02013-10-25 11:52:25 +01001509 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +01001510
1511 null_init_queue(nullb, nq);
1512
1513 ret = setup_commands(nq);
1514 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +02001515 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01001516 nullb->nr_queues++;
1517 }
Matias Bjorling2d263a782013-12-18 13:41:43 +01001518 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001519}
1520
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001521static int null_gendisk_register(struct nullb *nullb)
Jens Axboef2298c02013-10-25 11:52:25 +01001522{
1523 struct gendisk *disk;
Jens Axboef2298c02013-10-25 11:52:25 +01001524 sector_t size;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001525
Shaohua Li2984c862017-08-14 15:04:52 -07001526 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001527 if (!disk)
1528 return -ENOMEM;
Shaohua Li2984c862017-08-14 15:04:52 -07001529 size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001530 set_capacity(disk, size >> 9);
1531
1532 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
1533 disk->major = null_major;
1534 disk->first_minor = nullb->index;
1535 disk->fops = &null_fops;
1536 disk->private_data = nullb;
1537 disk->queue = nullb->q;
1538 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1539
Damien Le Moalbf505452018-10-12 19:08:50 +09001540 if (nullb->dev->zoned) {
1541 int ret = blk_revalidate_disk_zones(disk);
1542
1543 if (ret != 0)
1544 return ret;
1545 }
1546
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001547 add_disk(disk);
1548 return 0;
1549}
1550
Shaohua Li2984c862017-08-14 15:04:52 -07001551static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001552{
1553 set->ops = &null_mq_ops;
Shaohua Li2984c862017-08-14 15:04:52 -07001554 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
1555 g_submit_queues;
1556 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
1557 g_hw_queue_depth;
1558 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
Jens Axboe82f402f2017-06-20 14:22:01 -06001559 set->cmd_size = sizeof(struct nullb_cmd);
1560 set->flags = BLK_MQ_F_SHOULD_MERGE;
weiping zhangb3cffc32017-09-30 09:49:21 +08001561 if (g_no_sched)
1562 set->flags |= BLK_MQ_F_NO_SCHED;
Jens Axboe82f402f2017-06-20 14:22:01 -06001563 set->driver_data = NULL;
1564
Shaohua Li0d06a422017-08-25 13:46:25 -07001565 if ((nullb && nullb->dev->blocking) || g_blocking)
Jens Axboe82f402f2017-06-20 14:22:01 -06001566 set->flags |= BLK_MQ_F_BLOCKING;
1567
1568 return blk_mq_alloc_tag_set(set);
1569}
1570
Shaohua Licedcafa2017-08-14 15:04:54 -07001571static void null_validate_conf(struct nullb_device *dev)
1572{
1573 dev->blocksize = round_down(dev->blocksize, 512);
1574 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
Shaohua Licedcafa2017-08-14 15:04:54 -07001575
1576 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
1577 if (dev->submit_queues != nr_online_nodes)
1578 dev->submit_queues = nr_online_nodes;
1579 } else if (dev->submit_queues > nr_cpu_ids)
1580 dev->submit_queues = nr_cpu_ids;
1581 else if (dev->submit_queues == 0)
1582 dev->submit_queues = 1;
1583
1584 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
1585 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001586
1587 /* Do memory allocation, so set blocking */
1588 if (dev->memory_backed)
1589 dev->blocking = true;
Shaohua Lideb78b42017-08-14 15:04:59 -07001590 else /* cache is meaningless */
1591 dev->cache_size = 0;
1592 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1593 dev->cache_size);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001594 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1595 /* can not stop a queue */
1596 if (dev->queue_mode == NULL_Q_BIO)
1597 dev->mbps = 0;
Shaohua Licedcafa2017-08-14 15:04:54 -07001598}
1599
Jens Axboe24941b92018-02-28 09:18:57 -07001600#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1601static bool __null_setup_fault(struct fault_attr *attr, char *str)
1602{
1603 if (!str[0])
1604 return true;
1605
1606 if (!setup_fault_attr(attr, str))
1607 return false;
1608
1609 attr->verbose = 0;
1610 return true;
1611}
1612#endif
1613
Jens Axboe93b57042018-01-10 09:06:23 -07001614static bool null_setup_fault(void)
1615{
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001616#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe24941b92018-02-28 09:18:57 -07001617 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
Jens Axboe93b57042018-01-10 09:06:23 -07001618 return false;
Jens Axboe24941b92018-02-28 09:18:57 -07001619 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
1620 return false;
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001621#endif
Jens Axboe93b57042018-01-10 09:06:23 -07001622 return true;
1623}
1624
Shaohua Li2984c862017-08-14 15:04:52 -07001625static int null_add_dev(struct nullb_device *dev)
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001626{
1627 struct nullb *nullb;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001628 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +01001629
Shaohua Licedcafa2017-08-14 15:04:54 -07001630 null_validate_conf(dev);
1631
Shaohua Li2984c862017-08-14 15:04:52 -07001632 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001633 if (!nullb) {
1634 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001635 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001636 }
Shaohua Li2984c862017-08-14 15:04:52 -07001637 nullb->dev = dev;
1638 dev->nullb = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01001639
1640 spin_lock_init(&nullb->lock);
1641
Robert Elliottdc501dc2014-09-02 11:38:49 -05001642 rv = setup_queues(nullb);
1643 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001644 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01001645
Shaohua Li2984c862017-08-14 15:04:52 -07001646 if (dev->queue_mode == NULL_Q_MQ) {
Jens Axboe82f402f2017-06-20 14:22:01 -06001647 if (shared_tags) {
1648 nullb->tag_set = &tag_set;
1649 rv = 0;
1650 } else {
1651 nullb->tag_set = &nullb->__tag_set;
Shaohua Li2984c862017-08-14 15:04:52 -07001652 rv = null_init_tag_set(nullb, nullb->tag_set);
Jens Axboe82f402f2017-06-20 14:22:01 -06001653 }
Jens Axboef2298c02013-10-25 11:52:25 +01001654
Robert Elliottdc501dc2014-09-02 11:38:49 -05001655 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001656 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +01001657
Jens Axboe93b57042018-01-10 09:06:23 -07001658 if (!null_setup_fault())
1659 goto out_cleanup_queues;
1660
Jens Axboe5448aca2018-01-09 12:47:24 -07001661 nullb->tag_set->timeout = 5 * HZ;
Jens Axboe82f402f2017-06-20 14:22:01 -06001662 nullb->q = blk_mq_init_queue(nullb->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +00001663 if (IS_ERR(nullb->q)) {
Robert Elliottdc501dc2014-09-02 11:38:49 -05001664 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001665 goto out_cleanup_tags;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001666 }
Jens Axboe82f402f2017-06-20 14:22:01 -06001667 null_init_queues(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001668 } else if (dev->queue_mode == NULL_Q_BIO) {
Christoph Hellwig6d469642018-11-14 17:02:18 +01001669 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001670 if (!nullb->q) {
1671 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001672 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001673 }
Jens Axboef2298c02013-10-25 11:52:25 +01001674 blk_queue_make_request(nullb->q, null_queue_bio);
Jan Kara31f96902014-10-22 15:34:21 +02001675 rv = init_driver_queues(nullb);
1676 if (rv)
1677 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001678 }
1679
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001680 if (dev->mbps) {
1681 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1682 nullb_setup_bwtimer(nullb);
1683 }
1684
Shaohua Lideb78b42017-08-14 15:04:59 -07001685 if (dev->cache_size > 0) {
1686 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1687 blk_queue_write_cache(nullb->q, true, true);
Shaohua Lideb78b42017-08-14 15:04:59 -07001688 }
1689
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001690 if (dev->zoned) {
1691 rv = null_zone_init(dev);
1692 if (rv)
1693 goto out_cleanup_blk_queue;
1694
1695 blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects);
1696 nullb->q->limits.zoned = BLK_ZONED_HM;
Chaitanya Kulkarnia61dbfb2019-08-01 10:26:38 -07001697 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, nullb->q);
Damien Le Moal780d97a2019-09-05 18:51:34 +09001698 blk_queue_required_elevator_features(nullb->q,
1699 ELEVATOR_F_ZBD_SEQ_WRITE);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001700 }
1701
Jens Axboef2298c02013-10-25 11:52:25 +01001702 nullb->q->queuedata = nullb;
Bart Van Assche8b904b52018-03-07 17:10:10 -08001703 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
1704 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +01001705
Jens Axboef2298c02013-10-25 11:52:25 +01001706 mutex_lock(&lock);
Shaohua Li94bc02e2017-08-14 15:04:55 -07001707 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
Shaohua Licedcafa2017-08-14 15:04:54 -07001708 dev->index = nullb->index;
Jens Axboef2298c02013-10-25 11:52:25 +01001709 mutex_unlock(&lock);
1710
Shaohua Li2984c862017-08-14 15:04:52 -07001711 blk_queue_logical_block_size(nullb->q, dev->blocksize);
1712 blk_queue_physical_block_size(nullb->q, dev->blocksize);
Jens Axboef2298c02013-10-25 11:52:25 +01001713
Shaohua Li306eb6b2017-08-14 15:04:57 -07001714 null_config_discard(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +01001715
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001716 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1717
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001718 rv = null_gendisk_register(nullb);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001719 if (rv)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001720 goto out_cleanup_zone;
Jens Axboef2298c02013-10-25 11:52:25 +01001721
Matias Bjørlinga5143792016-02-11 14:49:13 +01001722 mutex_lock(&lock);
1723 list_add_tail(&nullb->list, &nullb_list);
1724 mutex_unlock(&lock);
Wenwei Tao3681c852016-03-05 00:27:04 +08001725
Jens Axboef2298c02013-10-25 11:52:25 +01001726 return 0;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001727out_cleanup_zone:
1728 if (dev->zoned)
1729 null_zone_exit(dev);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001730out_cleanup_blk_queue:
1731 blk_cleanup_queue(nullb->q);
1732out_cleanup_tags:
Shaohua Li2984c862017-08-14 15:04:52 -07001733 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001734 blk_mq_free_tag_set(nullb->tag_set);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001735out_cleanup_queues:
1736 cleanup_queues(nullb);
1737out_free_nullb:
1738 kfree(nullb);
1739out:
Robert Elliottdc501dc2014-09-02 11:38:49 -05001740 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +01001741}
1742
1743static int __init null_init(void)
1744{
Minfei Huangaf096e22015-12-08 13:47:34 -07001745 int ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001746 unsigned int i;
Minfei Huangaf096e22015-12-08 13:47:34 -07001747 struct nullb *nullb;
Shaohua Li2984c862017-08-14 15:04:52 -07001748 struct nullb_device *dev;
Jens Axboef2298c02013-10-25 11:52:25 +01001749
Shaohua Li2984c862017-08-14 15:04:52 -07001750 if (g_bs > PAGE_SIZE) {
Raghavendra K T9967d8a2014-01-21 16:59:59 +05301751 pr_warn("null_blk: invalid block size\n");
1752 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
Shaohua Li2984c862017-08-14 15:04:52 -07001753 g_bs = PAGE_SIZE;
Raghavendra K T9967d8a2014-01-21 16:59:59 +05301754 }
Jens Axboef2298c02013-10-25 11:52:25 +01001755
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001756 if (!is_power_of_2(g_zone_size)) {
1757 pr_err("null_blk: zone_size must be power-of-two\n");
1758 return -EINVAL;
1759 }
1760
John Pittman7ff684a2019-04-05 17:42:45 -04001761 if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
1762 pr_err("null_blk: invalid home_node value\n");
1763 g_home_node = NUMA_NO_NODE;
1764 }
1765
Jens Axboee50b1e32018-10-11 17:58:17 -06001766 if (g_queue_mode == NULL_Q_RQ) {
1767 pr_err("null_blk: legacy IO path no longer available\n");
1768 return -EINVAL;
1769 }
André Almeidaf7c4ce82019-09-11 11:46:36 -03001770 if (!nr_devices) {
1771 pr_err("null_blk: invalid number of devices\n");
1772 return -EINVAL;
1773 }
Shaohua Li2984c862017-08-14 15:04:52 -07001774 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
1775 if (g_submit_queues != nr_online_nodes) {
weiping zhang558ab3002017-08-03 00:26:39 +08001776 pr_warn("null_blk: submit_queues param is set to %u.\n",
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +01001777 nr_online_nodes);
Shaohua Li2984c862017-08-14 15:04:52 -07001778 g_submit_queues = nr_online_nodes;
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01001779 }
Shaohua Li2984c862017-08-14 15:04:52 -07001780 } else if (g_submit_queues > nr_cpu_ids)
1781 g_submit_queues = nr_cpu_ids;
1782 else if (g_submit_queues <= 0)
1783 g_submit_queues = 1;
Jens Axboef2298c02013-10-25 11:52:25 +01001784
Shaohua Li2984c862017-08-14 15:04:52 -07001785 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
1786 ret = null_init_tag_set(NULL, &tag_set);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001787 if (ret)
1788 return ret;
1789 }
1790
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001791 config_group_init(&nullb_subsys.su_group);
1792 mutex_init(&nullb_subsys.su_mutex);
1793
1794 ret = configfs_register_subsystem(&nullb_subsys);
1795 if (ret)
1796 goto err_tagset;
1797
Jens Axboef2298c02013-10-25 11:52:25 +01001798 mutex_init(&lock);
1799
Jens Axboef2298c02013-10-25 11:52:25 +01001800 null_major = register_blkdev(0, "nullb");
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001801 if (null_major < 0) {
1802 ret = null_major;
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001803 goto err_conf;
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001804 }
Jens Axboef2298c02013-10-25 11:52:25 +01001805
Minfei Huangaf096e22015-12-08 13:47:34 -07001806 for (i = 0; i < nr_devices; i++) {
Shaohua Li2984c862017-08-14 15:04:52 -07001807 dev = null_alloc_dev();
Wei Yongjun30c516d2017-10-17 12:11:46 +00001808 if (!dev) {
1809 ret = -ENOMEM;
Minfei Huangaf096e22015-12-08 13:47:34 -07001810 goto err_dev;
Wei Yongjun30c516d2017-10-17 12:11:46 +00001811 }
Shaohua Li2984c862017-08-14 15:04:52 -07001812 ret = null_add_dev(dev);
1813 if (ret) {
1814 null_free_dev(dev);
1815 goto err_dev;
1816 }
Minfei Huangaf096e22015-12-08 13:47:34 -07001817 }
1818
André Almeida4e47ee82019-09-11 11:46:35 -03001819 pr_info("null_blk: module loaded\n");
Jens Axboef2298c02013-10-25 11:52:25 +01001820 return 0;
Minfei Huangaf096e22015-12-08 13:47:34 -07001821
1822err_dev:
1823 while (!list_empty(&nullb_list)) {
1824 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07001825 dev = nullb->dev;
Minfei Huangaf096e22015-12-08 13:47:34 -07001826 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001827 null_free_dev(dev);
Minfei Huangaf096e22015-12-08 13:47:34 -07001828 }
Minfei Huangaf096e22015-12-08 13:47:34 -07001829 unregister_blkdev(null_major, "nullb");
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001830err_conf:
1831 configfs_unregister_subsystem(&nullb_subsys);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001832err_tagset:
Shaohua Li2984c862017-08-14 15:04:52 -07001833 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001834 blk_mq_free_tag_set(&tag_set);
Minfei Huangaf096e22015-12-08 13:47:34 -07001835 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01001836}
1837
1838static void __exit null_exit(void)
1839{
1840 struct nullb *nullb;
1841
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001842 configfs_unregister_subsystem(&nullb_subsys);
1843
Jens Axboef2298c02013-10-25 11:52:25 +01001844 unregister_blkdev(null_major, "nullb");
1845
1846 mutex_lock(&lock);
1847 while (!list_empty(&nullb_list)) {
Shaohua Li2984c862017-08-14 15:04:52 -07001848 struct nullb_device *dev;
1849
Jens Axboef2298c02013-10-25 11:52:25 +01001850 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07001851 dev = nullb->dev;
Jens Axboef2298c02013-10-25 11:52:25 +01001852 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001853 null_free_dev(dev);
Jens Axboef2298c02013-10-25 11:52:25 +01001854 }
1855 mutex_unlock(&lock);
Matias Bjørling6bb95352015-11-19 12:50:08 +01001856
Shaohua Li2984c862017-08-14 15:04:52 -07001857 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Jens Axboe82f402f2017-06-20 14:22:01 -06001858 blk_mq_free_tag_set(&tag_set);
Jens Axboef2298c02013-10-25 11:52:25 +01001859}
1860
1861module_init(null_init);
1862module_exit(null_exit);
1863
Jens Axboe231b3db2017-08-25 12:53:15 -06001864MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
Jens Axboef2298c02013-10-25 11:52:25 +01001865MODULE_LICENSE("GPL");