blob: 62c9654b9ce88a3840369848c69349049b1275f9 [file] [log] [blame]
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001/*
2 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
3 * Shaohua Li <shli@fb.com>
4 */
Jens Axboef2298c02013-10-25 11:52:25 +01005#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01006
Jens Axboef2298c02013-10-25 11:52:25 +01007#include <linux/moduleparam.h>
8#include <linux/sched.h>
9#include <linux/fs.h>
Jens Axboef2298c02013-10-25 11:52:25 +010010#include <linux/init.h>
Matias Bjørling6dad38d2018-07-06 19:38:38 +020011#include "null_blk.h"
Jens Axboef2298c02013-10-25 11:52:25 +010012
Shaohua Li5bcd0e02017-08-14 15:04:56 -070013#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
14#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070015#define SECTOR_MASK (PAGE_SECTORS - 1)
16
17#define FREE_BATCH 16
18
Shaohua Lieff2c4f2017-08-14 15:04:58 -070019#define TICKS_PER_SEC 50ULL
20#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
21
Arnd Bergmann33f782c2018-01-11 11:31:25 +010022#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -070023static DECLARE_FAULT_ATTR(null_timeout_attr);
Jens Axboe24941b92018-02-28 09:18:57 -070024static DECLARE_FAULT_ATTR(null_requeue_attr);
Arnd Bergmann33f782c2018-01-11 11:31:25 +010025#endif
Jens Axboe93b57042018-01-10 09:06:23 -070026
Shaohua Lieff2c4f2017-08-14 15:04:58 -070027static inline u64 mb_per_tick(int mbps)
28{
29 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
30}
Jens Axboef2298c02013-10-25 11:52:25 +010031
Shaohua Li3bf2bd22017-08-14 15:04:53 -070032/*
33 * Status flags for nullb_device.
34 *
35 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
36 * UP: Device is currently on and visible in userspace.
Shaohua Lieff2c4f2017-08-14 15:04:58 -070037 * THROTTLED: Device is being throttled.
Shaohua Lideb78b42017-08-14 15:04:59 -070038 * CACHE: Device is using a write-back cache.
Shaohua Li3bf2bd22017-08-14 15:04:53 -070039 */
40enum nullb_device_flags {
41 NULLB_DEV_FL_CONFIGURED = 0,
42 NULLB_DEV_FL_UP = 1,
Shaohua Lieff2c4f2017-08-14 15:04:58 -070043 NULLB_DEV_FL_THROTTLED = 2,
Shaohua Lideb78b42017-08-14 15:04:59 -070044 NULLB_DEV_FL_CACHE = 3,
Shaohua Li3bf2bd22017-08-14 15:04:53 -070045};
46
Ming Lei66231ad2018-03-06 12:07:13 +080047#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070048/*
49 * nullb_page is a page in memory for nullb devices.
50 *
51 * @page: The page holding the data.
52 * @bitmap: The bitmap represents which sector in the page has data.
53 * Each bit represents one block size. For example, sector 8
54 * will use the 7th bit
Shaohua Lideb78b42017-08-14 15:04:59 -070055 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
56 * page is being flushing to storage. FREE means the cache page is freed and
57 * should be skipped from flushing to storage. Please see
58 * null_make_cache_space
Shaohua Li5bcd0e02017-08-14 15:04:56 -070059 */
60struct nullb_page {
61 struct page *page;
Ming Lei66231ad2018-03-06 12:07:13 +080062 DECLARE_BITMAP(bitmap, MAP_SZ);
Shaohua Li5bcd0e02017-08-14 15:04:56 -070063};
Ming Lei66231ad2018-03-06 12:07:13 +080064#define NULLB_PAGE_LOCK (MAP_SZ - 1)
65#define NULLB_PAGE_FREE (MAP_SZ - 2)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070066
Jens Axboef2298c02013-10-25 11:52:25 +010067static LIST_HEAD(nullb_list);
68static struct mutex lock;
69static int null_major;
Shaohua Li94bc02e2017-08-14 15:04:55 -070070static DEFINE_IDA(nullb_indexes);
Jens Axboe82f402f2017-06-20 14:22:01 -060071static struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +010072
Jens Axboef2298c02013-10-25 11:52:25 +010073enum {
74 NULL_IRQ_NONE = 0,
75 NULL_IRQ_SOFTIRQ = 1,
76 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -080077};
Jens Axboef2298c02013-10-25 11:52:25 +010078
Christoph Hellwigce2c3502014-02-10 03:24:40 -080079enum {
Jens Axboef2298c02013-10-25 11:52:25 +010080 NULL_Q_BIO = 0,
81 NULL_Q_RQ = 1,
82 NULL_Q_MQ = 2,
83};
84
weiping zhangb3cffc32017-09-30 09:49:21 +080085static int g_no_sched;
Joe Perches5657a812018-05-24 13:38:59 -060086module_param_named(no_sched, g_no_sched, int, 0444);
weiping zhangb3cffc32017-09-30 09:49:21 +080087MODULE_PARM_DESC(no_sched, "No io scheduler");
88
Shaohua Li2984c862017-08-14 15:04:52 -070089static int g_submit_queues = 1;
Joe Perches5657a812018-05-24 13:38:59 -060090module_param_named(submit_queues, g_submit_queues, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +010091MODULE_PARM_DESC(submit_queues, "Number of submission queues");
92
Shaohua Li2984c862017-08-14 15:04:52 -070093static int g_home_node = NUMA_NO_NODE;
Joe Perches5657a812018-05-24 13:38:59 -060094module_param_named(home_node, g_home_node, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +010095MODULE_PARM_DESC(home_node, "Home node for the device");
96
Arnd Bergmann33f782c2018-01-11 11:31:25 +010097#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -070098static char g_timeout_str[80];
Joe Perches5657a812018-05-24 13:38:59 -060099module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
Jens Axboe24941b92018-02-28 09:18:57 -0700100
101static char g_requeue_str[80];
Joe Perches5657a812018-05-24 13:38:59 -0600102module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
Arnd Bergmann33f782c2018-01-11 11:31:25 +0100103#endif
Jens Axboe93b57042018-01-10 09:06:23 -0700104
Shaohua Li2984c862017-08-14 15:04:52 -0700105static int g_queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700106
107static int null_param_store_val(const char *str, int *val, int min, int max)
108{
109 int ret, new_val;
110
111 ret = kstrtoint(str, 10, &new_val);
112 if (ret)
113 return -EINVAL;
114
115 if (new_val < min || new_val > max)
116 return -EINVAL;
117
118 *val = new_val;
119 return 0;
120}
121
122static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
123{
Shaohua Li2984c862017-08-14 15:04:52 -0700124 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
Matias Bjorling709c8662014-11-26 14:45:48 -0700125}
126
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930127static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700128 .set = null_set_queue_mode,
129 .get = param_get_int,
130};
131
Joe Perches5657a812018-05-24 13:38:59 -0600132device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400133MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100134
Shaohua Li2984c862017-08-14 15:04:52 -0700135static int g_gb = 250;
Joe Perches5657a812018-05-24 13:38:59 -0600136module_param_named(gb, g_gb, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100137MODULE_PARM_DESC(gb, "Size in GB");
138
Shaohua Li2984c862017-08-14 15:04:52 -0700139static int g_bs = 512;
Joe Perches5657a812018-05-24 13:38:59 -0600140module_param_named(bs, g_bs, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100141MODULE_PARM_DESC(bs, "Block size (in bytes)");
142
Jens Axboe82f402f2017-06-20 14:22:01 -0600143static int nr_devices = 1;
Joe Perches5657a812018-05-24 13:38:59 -0600144module_param(nr_devices, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100145MODULE_PARM_DESC(nr_devices, "Number of devices to register");
146
Shaohua Li2984c862017-08-14 15:04:52 -0700147static bool g_blocking;
Joe Perches5657a812018-05-24 13:38:59 -0600148module_param_named(blocking, g_blocking, bool, 0444);
Jens Axboedb5bcf82017-03-30 13:44:26 -0600149MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
150
Jens Axboe82f402f2017-06-20 14:22:01 -0600151static bool shared_tags;
Joe Perches5657a812018-05-24 13:38:59 -0600152module_param(shared_tags, bool, 0444);
Jens Axboe82f402f2017-06-20 14:22:01 -0600153MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
154
Shaohua Li2984c862017-08-14 15:04:52 -0700155static int g_irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700156
157static int null_set_irqmode(const char *str, const struct kernel_param *kp)
158{
Shaohua Li2984c862017-08-14 15:04:52 -0700159 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
Matias Bjorling709c8662014-11-26 14:45:48 -0700160 NULL_IRQ_TIMER);
161}
162
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930163static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700164 .set = null_set_irqmode,
165 .get = param_get_int,
166};
167
Joe Perches5657a812018-05-24 13:38:59 -0600168device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100169MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
170
Shaohua Li2984c862017-08-14 15:04:52 -0700171static unsigned long g_completion_nsec = 10000;
Joe Perches5657a812018-05-24 13:38:59 -0600172module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100173MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
174
Shaohua Li2984c862017-08-14 15:04:52 -0700175static int g_hw_queue_depth = 64;
Joe Perches5657a812018-05-24 13:38:59 -0600176module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100177MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
178
Shaohua Li2984c862017-08-14 15:04:52 -0700179static bool g_use_per_node_hctx;
Joe Perches5657a812018-05-24 13:38:59 -0600180module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
Matias Bjørling20005242013-12-21 00:11:00 +0100181MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100182
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200183static bool g_zoned;
184module_param_named(zoned, g_zoned, bool, S_IRUGO);
185MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
186
187static unsigned long g_zone_size = 256;
188module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
189MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
190
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900191static unsigned int g_zone_nr_conv;
192module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
193MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
194
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700195static struct nullb_device *null_alloc_dev(void);
196static void null_free_dev(struct nullb_device *dev);
Shaohua Licedcafa2017-08-14 15:04:54 -0700197static void null_del_dev(struct nullb *nullb);
198static int null_add_dev(struct nullb_device *dev);
Shaohua Lideb78b42017-08-14 15:04:59 -0700199static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700200
201static inline struct nullb_device *to_nullb_device(struct config_item *item)
202{
203 return item ? container_of(item, struct nullb_device, item) : NULL;
204}
205
206static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
207{
208 return snprintf(page, PAGE_SIZE, "%u\n", val);
209}
210
211static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
212 char *page)
213{
214 return snprintf(page, PAGE_SIZE, "%lu\n", val);
215}
216
217static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
218{
219 return snprintf(page, PAGE_SIZE, "%u\n", val);
220}
221
222static ssize_t nullb_device_uint_attr_store(unsigned int *val,
223 const char *page, size_t count)
224{
225 unsigned int tmp;
226 int result;
227
228 result = kstrtouint(page, 0, &tmp);
229 if (result)
230 return result;
231
232 *val = tmp;
233 return count;
234}
235
236static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
237 const char *page, size_t count)
238{
239 int result;
240 unsigned long tmp;
241
242 result = kstrtoul(page, 0, &tmp);
243 if (result)
244 return result;
245
246 *val = tmp;
247 return count;
248}
249
250static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
251 size_t count)
252{
253 bool tmp;
254 int result;
255
256 result = kstrtobool(page, &tmp);
257 if (result)
258 return result;
259
260 *val = tmp;
261 return count;
262}
263
264/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
265#define NULLB_DEVICE_ATTR(NAME, TYPE) \
266static ssize_t \
267nullb_device_##NAME##_show(struct config_item *item, char *page) \
268{ \
269 return nullb_device_##TYPE##_attr_show( \
270 to_nullb_device(item)->NAME, page); \
271} \
272static ssize_t \
273nullb_device_##NAME##_store(struct config_item *item, const char *page, \
274 size_t count) \
275{ \
276 if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
277 return -EBUSY; \
278 return nullb_device_##TYPE##_attr_store( \
279 &to_nullb_device(item)->NAME, page, count); \
280} \
281CONFIGFS_ATTR(nullb_device_, NAME);
282
283NULLB_DEVICE_ATTR(size, ulong);
284NULLB_DEVICE_ATTR(completion_nsec, ulong);
285NULLB_DEVICE_ATTR(submit_queues, uint);
286NULLB_DEVICE_ATTR(home_node, uint);
287NULLB_DEVICE_ATTR(queue_mode, uint);
288NULLB_DEVICE_ATTR(blocksize, uint);
289NULLB_DEVICE_ATTR(irqmode, uint);
290NULLB_DEVICE_ATTR(hw_queue_depth, uint);
Shaohua Licedcafa2017-08-14 15:04:54 -0700291NULLB_DEVICE_ATTR(index, uint);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700292NULLB_DEVICE_ATTR(blocking, bool);
293NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700294NULLB_DEVICE_ATTR(memory_backed, bool);
Shaohua Li306eb6b2017-08-14 15:04:57 -0700295NULLB_DEVICE_ATTR(discard, bool);
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700296NULLB_DEVICE_ATTR(mbps, uint);
Shaohua Lideb78b42017-08-14 15:04:59 -0700297NULLB_DEVICE_ATTR(cache_size, ulong);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200298NULLB_DEVICE_ATTR(zoned, bool);
299NULLB_DEVICE_ATTR(zone_size, ulong);
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900300NULLB_DEVICE_ATTR(zone_nr_conv, uint);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700301
Shaohua Licedcafa2017-08-14 15:04:54 -0700302static ssize_t nullb_device_power_show(struct config_item *item, char *page)
303{
304 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
305}
306
307static ssize_t nullb_device_power_store(struct config_item *item,
308 const char *page, size_t count)
309{
310 struct nullb_device *dev = to_nullb_device(item);
311 bool newp = false;
312 ssize_t ret;
313
314 ret = nullb_device_bool_attr_store(&newp, page, count);
315 if (ret < 0)
316 return ret;
317
318 if (!dev->power && newp) {
319 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
320 return count;
321 if (null_add_dev(dev)) {
322 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
323 return -ENOMEM;
324 }
325
326 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
327 dev->power = newp;
Jens Axboeb3c30512017-08-28 15:06:31 -0600328 } else if (dev->power && !newp) {
Shaohua Licedcafa2017-08-14 15:04:54 -0700329 mutex_lock(&lock);
330 dev->power = newp;
331 null_del_dev(dev->nullb);
332 mutex_unlock(&lock);
333 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
Liu Bo00a8cdb2018-07-06 03:07:13 +0800334 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
Shaohua Licedcafa2017-08-14 15:04:54 -0700335 }
336
337 return count;
338}
339
340CONFIGFS_ATTR(nullb_device_, power);
341
Shaohua Li2f54a612017-08-14 15:05:00 -0700342static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
343{
344 struct nullb_device *t_dev = to_nullb_device(item);
345
346 return badblocks_show(&t_dev->badblocks, page, 0);
347}
348
349static ssize_t nullb_device_badblocks_store(struct config_item *item,
350 const char *page, size_t count)
351{
352 struct nullb_device *t_dev = to_nullb_device(item);
353 char *orig, *buf, *tmp;
354 u64 start, end;
355 int ret;
356
357 orig = kstrndup(page, count, GFP_KERNEL);
358 if (!orig)
359 return -ENOMEM;
360
361 buf = strstrip(orig);
362
363 ret = -EINVAL;
364 if (buf[0] != '+' && buf[0] != '-')
365 goto out;
366 tmp = strchr(&buf[1], '-');
367 if (!tmp)
368 goto out;
369 *tmp = '\0';
370 ret = kstrtoull(buf + 1, 0, &start);
371 if (ret)
372 goto out;
373 ret = kstrtoull(tmp + 1, 0, &end);
374 if (ret)
375 goto out;
376 ret = -EINVAL;
377 if (start > end)
378 goto out;
379 /* enable badblocks */
380 cmpxchg(&t_dev->badblocks.shift, -1, 0);
381 if (buf[0] == '+')
382 ret = badblocks_set(&t_dev->badblocks, start,
383 end - start + 1, 1);
384 else
385 ret = badblocks_clear(&t_dev->badblocks, start,
386 end - start + 1);
387 if (ret == 0)
388 ret = count;
389out:
390 kfree(orig);
391 return ret;
392}
393CONFIGFS_ATTR(nullb_device_, badblocks);
394
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700395static struct configfs_attribute *nullb_device_attrs[] = {
396 &nullb_device_attr_size,
397 &nullb_device_attr_completion_nsec,
398 &nullb_device_attr_submit_queues,
399 &nullb_device_attr_home_node,
400 &nullb_device_attr_queue_mode,
401 &nullb_device_attr_blocksize,
402 &nullb_device_attr_irqmode,
403 &nullb_device_attr_hw_queue_depth,
Shaohua Licedcafa2017-08-14 15:04:54 -0700404 &nullb_device_attr_index,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700405 &nullb_device_attr_blocking,
406 &nullb_device_attr_use_per_node_hctx,
Shaohua Licedcafa2017-08-14 15:04:54 -0700407 &nullb_device_attr_power,
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700408 &nullb_device_attr_memory_backed,
Shaohua Li306eb6b2017-08-14 15:04:57 -0700409 &nullb_device_attr_discard,
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700410 &nullb_device_attr_mbps,
Shaohua Lideb78b42017-08-14 15:04:59 -0700411 &nullb_device_attr_cache_size,
Shaohua Li2f54a612017-08-14 15:05:00 -0700412 &nullb_device_attr_badblocks,
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200413 &nullb_device_attr_zoned,
414 &nullb_device_attr_zone_size,
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900415 &nullb_device_attr_zone_nr_conv,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700416 NULL,
417};
418
419static void nullb_device_release(struct config_item *item)
420{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700421 struct nullb_device *dev = to_nullb_device(item);
422
Shaohua Lideb78b42017-08-14 15:04:59 -0700423 null_free_device_storage(dev, false);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700424 null_free_dev(dev);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700425}
426
427static struct configfs_item_operations nullb_device_ops = {
428 .release = nullb_device_release,
429};
430
Bhumika Goyale1919df2017-10-16 17:18:49 +0200431static const struct config_item_type nullb_device_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700432 .ct_item_ops = &nullb_device_ops,
433 .ct_attrs = nullb_device_attrs,
434 .ct_owner = THIS_MODULE,
435};
436
437static struct
438config_item *nullb_group_make_item(struct config_group *group, const char *name)
439{
440 struct nullb_device *dev;
441
442 dev = null_alloc_dev();
443 if (!dev)
444 return ERR_PTR(-ENOMEM);
445
446 config_item_init_type_name(&dev->item, name, &nullb_device_type);
447
448 return &dev->item;
449}
450
451static void
452nullb_group_drop_item(struct config_group *group, struct config_item *item)
453{
Shaohua Licedcafa2017-08-14 15:04:54 -0700454 struct nullb_device *dev = to_nullb_device(item);
455
456 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
457 mutex_lock(&lock);
458 dev->power = false;
459 null_del_dev(dev->nullb);
460 mutex_unlock(&lock);
461 }
462
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700463 config_item_put(item);
464}
465
466static ssize_t memb_group_features_show(struct config_item *item, char *page)
467{
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200468 return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700469}
470
471CONFIGFS_ATTR_RO(memb_group_, features);
472
473static struct configfs_attribute *nullb_group_attrs[] = {
474 &memb_group_attr_features,
475 NULL,
476};
477
478static struct configfs_group_operations nullb_group_ops = {
479 .make_item = nullb_group_make_item,
480 .drop_item = nullb_group_drop_item,
481};
482
Bhumika Goyale1919df2017-10-16 17:18:49 +0200483static const struct config_item_type nullb_group_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700484 .ct_group_ops = &nullb_group_ops,
485 .ct_attrs = nullb_group_attrs,
486 .ct_owner = THIS_MODULE,
487};
488
489static struct configfs_subsystem nullb_subsys = {
490 .su_group = {
491 .cg_item = {
492 .ci_namebuf = "nullb",
493 .ci_type = &nullb_group_type,
494 },
495 },
496};
497
Shaohua Lideb78b42017-08-14 15:04:59 -0700498static inline int null_cache_active(struct nullb *nullb)
499{
500 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
501}
502
Shaohua Li2984c862017-08-14 15:04:52 -0700503static struct nullb_device *null_alloc_dev(void)
504{
505 struct nullb_device *dev;
506
507 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
508 if (!dev)
509 return NULL;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700510 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
Shaohua Lideb78b42017-08-14 15:04:59 -0700511 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
Shaohua Li2f54a612017-08-14 15:05:00 -0700512 if (badblocks_init(&dev->badblocks, 0)) {
513 kfree(dev);
514 return NULL;
515 }
516
Shaohua Li2984c862017-08-14 15:04:52 -0700517 dev->size = g_gb * 1024;
518 dev->completion_nsec = g_completion_nsec;
519 dev->submit_queues = g_submit_queues;
520 dev->home_node = g_home_node;
521 dev->queue_mode = g_queue_mode;
522 dev->blocksize = g_bs;
523 dev->irqmode = g_irqmode;
524 dev->hw_queue_depth = g_hw_queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -0700525 dev->blocking = g_blocking;
526 dev->use_per_node_hctx = g_use_per_node_hctx;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200527 dev->zoned = g_zoned;
528 dev->zone_size = g_zone_size;
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900529 dev->zone_nr_conv = g_zone_nr_conv;
Shaohua Li2984c862017-08-14 15:04:52 -0700530 return dev;
531}
532
533static void null_free_dev(struct nullb_device *dev)
534{
David Disseldorp1addb792017-11-08 17:29:44 +0100535 if (!dev)
536 return;
537
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200538 null_zone_exit(dev);
David Disseldorp1addb792017-11-08 17:29:44 +0100539 badblocks_exit(&dev->badblocks);
Shaohua Li2984c862017-08-14 15:04:52 -0700540 kfree(dev);
541}
542
Jens Axboef2298c02013-10-25 11:52:25 +0100543static void put_tag(struct nullb_queue *nq, unsigned int tag)
544{
545 clear_bit_unlock(tag, nq->tag_map);
546
547 if (waitqueue_active(&nq->wait))
548 wake_up(&nq->wait);
549}
550
551static unsigned int get_tag(struct nullb_queue *nq)
552{
553 unsigned int tag;
554
555 do {
556 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
557 if (tag >= nq->queue_depth)
558 return -1U;
559 } while (test_and_set_bit_lock(tag, nq->tag_map));
560
561 return tag;
562}
563
564static void free_cmd(struct nullb_cmd *cmd)
565{
566 put_tag(cmd->nq, cmd->tag);
567}
568
Paolo Valente3c395a92015-12-01 11:48:17 +0100569static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
570
Jens Axboef2298c02013-10-25 11:52:25 +0100571static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
572{
573 struct nullb_cmd *cmd;
574 unsigned int tag;
575
576 tag = get_tag(nq);
577 if (tag != -1U) {
578 cmd = &nq->cmds[tag];
579 cmd->tag = tag;
580 cmd->nq = nq;
Shaohua Li2984c862017-08-14 15:04:52 -0700581 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +0100582 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
583 HRTIMER_MODE_REL);
584 cmd->timer.function = null_cmd_timer_expired;
585 }
Jens Axboef2298c02013-10-25 11:52:25 +0100586 return cmd;
587 }
588
589 return NULL;
590}
591
592static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
593{
594 struct nullb_cmd *cmd;
595 DEFINE_WAIT(wait);
596
597 cmd = __alloc_cmd(nq);
598 if (cmd || !can_wait)
599 return cmd;
600
601 do {
602 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
603 cmd = __alloc_cmd(nq);
604 if (cmd)
605 break;
606
607 io_schedule();
608 } while (1);
609
610 finish_wait(&nq->wait, &wait);
611 return cmd;
612}
613
614static void end_cmd(struct nullb_cmd *cmd)
615{
Shaohua Li2984c862017-08-14 15:04:52 -0700616 int queue_mode = cmd->nq->dev->queue_mode;
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100617
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800618 switch (queue_mode) {
619 case NULL_Q_MQ:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700620 blk_mq_end_request(cmd->rq, cmd->error);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800621 return;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800622 case NULL_Q_BIO:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700623 cmd->bio->bi_status = cmd->error;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200624 bio_endio(cmd->bio);
Jens Axboe48cc6612015-12-28 13:02:47 -0700625 break;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800626 }
Jens Axboef2298c02013-10-25 11:52:25 +0100627
Jens Axboe48cc6612015-12-28 13:02:47 -0700628 free_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +0100629}
630
631static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
632{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100633 end_cmd(container_of(timer, struct nullb_cmd, timer));
Jens Axboef2298c02013-10-25 11:52:25 +0100634
635 return HRTIMER_NORESTART;
636}
637
638static void null_cmd_end_timer(struct nullb_cmd *cmd)
639{
Shaohua Li2984c862017-08-14 15:04:52 -0700640 ktime_t kt = cmd->nq->dev->completion_nsec;
Jens Axboef2298c02013-10-25 11:52:25 +0100641
Paolo Valente3c395a92015-12-01 11:48:17 +0100642 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +0100643}
644
Christoph Hellwig49f66132018-11-10 09:30:45 +0100645static void null_complete_rq(struct request *rq)
Jens Axboef2298c02013-10-25 11:52:25 +0100646{
Christoph Hellwig49f66132018-11-10 09:30:45 +0100647 end_cmd(blk_mq_rq_to_pdu(rq));
Jens Axboef2298c02013-10-25 11:52:25 +0100648}
649
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700650static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
Jens Axboef2298c02013-10-25 11:52:25 +0100651{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700652 struct nullb_page *t_page;
653
654 t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
655 if (!t_page)
656 goto out;
657
658 t_page->page = alloc_pages(gfp_flags, 0);
659 if (!t_page->page)
660 goto out_freepage;
661
Ming Lei66231ad2018-03-06 12:07:13 +0800662 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700663 return t_page;
664out_freepage:
665 kfree(t_page);
666out:
667 return NULL;
668}
669
670static void null_free_page(struct nullb_page *t_page)
671{
Ming Lei66231ad2018-03-06 12:07:13 +0800672 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
673 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
Shaohua Lideb78b42017-08-14 15:04:59 -0700674 return;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700675 __free_page(t_page->page);
676 kfree(t_page);
677}
678
Ming Lei66231ad2018-03-06 12:07:13 +0800679static bool null_page_empty(struct nullb_page *page)
680{
681 int size = MAP_SZ - 2;
682
683 return find_first_bit(page->bitmap, size) == size;
684}
685
Shaohua Lideb78b42017-08-14 15:04:59 -0700686static void null_free_sector(struct nullb *nullb, sector_t sector,
687 bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700688{
689 unsigned int sector_bit;
690 u64 idx;
691 struct nullb_page *t_page, *ret;
692 struct radix_tree_root *root;
693
Shaohua Lideb78b42017-08-14 15:04:59 -0700694 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700695 idx = sector >> PAGE_SECTORS_SHIFT;
696 sector_bit = (sector & SECTOR_MASK);
697
698 t_page = radix_tree_lookup(root, idx);
699 if (t_page) {
Ming Lei66231ad2018-03-06 12:07:13 +0800700 __clear_bit(sector_bit, t_page->bitmap);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700701
Ming Lei66231ad2018-03-06 12:07:13 +0800702 if (null_page_empty(t_page)) {
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700703 ret = radix_tree_delete_item(root, idx, t_page);
704 WARN_ON(ret != t_page);
705 null_free_page(ret);
Shaohua Lideb78b42017-08-14 15:04:59 -0700706 if (is_cache)
707 nullb->dev->curr_cache -= PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700708 }
709 }
710}
711
712static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
Shaohua Lideb78b42017-08-14 15:04:59 -0700713 struct nullb_page *t_page, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700714{
715 struct radix_tree_root *root;
716
Shaohua Lideb78b42017-08-14 15:04:59 -0700717 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700718
719 if (radix_tree_insert(root, idx, t_page)) {
720 null_free_page(t_page);
721 t_page = radix_tree_lookup(root, idx);
722 WARN_ON(!t_page || t_page->page->index != idx);
Shaohua Lideb78b42017-08-14 15:04:59 -0700723 } else if (is_cache)
724 nullb->dev->curr_cache += PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700725
726 return t_page;
727}
728
Shaohua Lideb78b42017-08-14 15:04:59 -0700729static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700730{
731 unsigned long pos = 0;
732 int nr_pages;
733 struct nullb_page *ret, *t_pages[FREE_BATCH];
734 struct radix_tree_root *root;
735
Shaohua Lideb78b42017-08-14 15:04:59 -0700736 root = is_cache ? &dev->cache : &dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700737
738 do {
739 int i;
740
741 nr_pages = radix_tree_gang_lookup(root,
742 (void **)t_pages, pos, FREE_BATCH);
743
744 for (i = 0; i < nr_pages; i++) {
745 pos = t_pages[i]->page->index;
746 ret = radix_tree_delete_item(root, pos, t_pages[i]);
747 WARN_ON(ret != t_pages[i]);
748 null_free_page(ret);
749 }
750
751 pos++;
752 } while (nr_pages == FREE_BATCH);
Shaohua Lideb78b42017-08-14 15:04:59 -0700753
754 if (is_cache)
755 dev->curr_cache = 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700756}
757
Shaohua Lideb78b42017-08-14 15:04:59 -0700758static struct nullb_page *__null_lookup_page(struct nullb *nullb,
759 sector_t sector, bool for_write, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700760{
761 unsigned int sector_bit;
762 u64 idx;
763 struct nullb_page *t_page;
Shaohua Lideb78b42017-08-14 15:04:59 -0700764 struct radix_tree_root *root;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700765
766 idx = sector >> PAGE_SECTORS_SHIFT;
767 sector_bit = (sector & SECTOR_MASK);
768
Shaohua Lideb78b42017-08-14 15:04:59 -0700769 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
770 t_page = radix_tree_lookup(root, idx);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700771 WARN_ON(t_page && t_page->page->index != idx);
772
Ming Lei66231ad2018-03-06 12:07:13 +0800773 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700774 return t_page;
775
776 return NULL;
777}
778
Shaohua Lideb78b42017-08-14 15:04:59 -0700779static struct nullb_page *null_lookup_page(struct nullb *nullb,
780 sector_t sector, bool for_write, bool ignore_cache)
781{
782 struct nullb_page *page = NULL;
783
784 if (!ignore_cache)
785 page = __null_lookup_page(nullb, sector, for_write, true);
786 if (page)
787 return page;
788 return __null_lookup_page(nullb, sector, for_write, false);
789}
790
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700791static struct nullb_page *null_insert_page(struct nullb *nullb,
Jens Axboe61884de2018-08-09 14:22:41 -0600792 sector_t sector, bool ignore_cache)
793 __releases(&nullb->lock)
794 __acquires(&nullb->lock)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700795{
796 u64 idx;
797 struct nullb_page *t_page;
798
Shaohua Lideb78b42017-08-14 15:04:59 -0700799 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700800 if (t_page)
801 return t_page;
802
803 spin_unlock_irq(&nullb->lock);
804
805 t_page = null_alloc_page(GFP_NOIO);
806 if (!t_page)
807 goto out_lock;
808
809 if (radix_tree_preload(GFP_NOIO))
810 goto out_freepage;
811
812 spin_lock_irq(&nullb->lock);
813 idx = sector >> PAGE_SECTORS_SHIFT;
814 t_page->page->index = idx;
Shaohua Lideb78b42017-08-14 15:04:59 -0700815 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700816 radix_tree_preload_end();
817
818 return t_page;
819out_freepage:
820 null_free_page(t_page);
821out_lock:
822 spin_lock_irq(&nullb->lock);
Shaohua Lideb78b42017-08-14 15:04:59 -0700823 return null_lookup_page(nullb, sector, true, ignore_cache);
824}
825
826static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
827{
828 int i;
829 unsigned int offset;
830 u64 idx;
831 struct nullb_page *t_page, *ret;
832 void *dst, *src;
833
834 idx = c_page->page->index;
835
836 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
837
Ming Lei66231ad2018-03-06 12:07:13 +0800838 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
839 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
Shaohua Lideb78b42017-08-14 15:04:59 -0700840 null_free_page(c_page);
Ming Lei66231ad2018-03-06 12:07:13 +0800841 if (t_page && null_page_empty(t_page)) {
Shaohua Lideb78b42017-08-14 15:04:59 -0700842 ret = radix_tree_delete_item(&nullb->dev->data,
843 idx, t_page);
844 null_free_page(t_page);
845 }
846 return 0;
847 }
848
849 if (!t_page)
850 return -ENOMEM;
851
852 src = kmap_atomic(c_page->page);
853 dst = kmap_atomic(t_page->page);
854
855 for (i = 0; i < PAGE_SECTORS;
856 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
Ming Lei66231ad2018-03-06 12:07:13 +0800857 if (test_bit(i, c_page->bitmap)) {
Shaohua Lideb78b42017-08-14 15:04:59 -0700858 offset = (i << SECTOR_SHIFT);
859 memcpy(dst + offset, src + offset,
860 nullb->dev->blocksize);
Ming Lei66231ad2018-03-06 12:07:13 +0800861 __set_bit(i, t_page->bitmap);
Shaohua Lideb78b42017-08-14 15:04:59 -0700862 }
863 }
864
865 kunmap_atomic(dst);
866 kunmap_atomic(src);
867
868 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
869 null_free_page(ret);
870 nullb->dev->curr_cache -= PAGE_SIZE;
871
872 return 0;
873}
874
875static int null_make_cache_space(struct nullb *nullb, unsigned long n)
876{
877 int i, err, nr_pages;
878 struct nullb_page *c_pages[FREE_BATCH];
879 unsigned long flushed = 0, one_round;
880
881again:
882 if ((nullb->dev->cache_size * 1024 * 1024) >
883 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
884 return 0;
885
886 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
887 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
888 /*
889 * nullb_flush_cache_page could unlock before using the c_pages. To
890 * avoid race, we don't allow page free
891 */
892 for (i = 0; i < nr_pages; i++) {
893 nullb->cache_flush_pos = c_pages[i]->page->index;
894 /*
895 * We found the page which is being flushed to disk by other
896 * threads
897 */
Ming Lei66231ad2018-03-06 12:07:13 +0800898 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
Shaohua Lideb78b42017-08-14 15:04:59 -0700899 c_pages[i] = NULL;
900 else
Ming Lei66231ad2018-03-06 12:07:13 +0800901 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
Shaohua Lideb78b42017-08-14 15:04:59 -0700902 }
903
904 one_round = 0;
905 for (i = 0; i < nr_pages; i++) {
906 if (c_pages[i] == NULL)
907 continue;
908 err = null_flush_cache_page(nullb, c_pages[i]);
909 if (err)
910 return err;
911 one_round++;
912 }
913 flushed += one_round << PAGE_SHIFT;
914
915 if (n > flushed) {
916 if (nr_pages == 0)
917 nullb->cache_flush_pos = 0;
918 if (one_round == 0) {
919 /* give other threads a chance */
920 spin_unlock_irq(&nullb->lock);
921 spin_lock_irq(&nullb->lock);
922 }
923 goto again;
924 }
925 return 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700926}
927
928static int copy_to_nullb(struct nullb *nullb, struct page *source,
Shaohua Lideb78b42017-08-14 15:04:59 -0700929 unsigned int off, sector_t sector, size_t n, bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700930{
931 size_t temp, count = 0;
932 unsigned int offset;
933 struct nullb_page *t_page;
934 void *dst, *src;
935
936 while (count < n) {
937 temp = min_t(size_t, nullb->dev->blocksize, n - count);
938
Shaohua Lideb78b42017-08-14 15:04:59 -0700939 if (null_cache_active(nullb) && !is_fua)
940 null_make_cache_space(nullb, PAGE_SIZE);
941
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700942 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -0700943 t_page = null_insert_page(nullb, sector,
944 !null_cache_active(nullb) || is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700945 if (!t_page)
946 return -ENOSPC;
947
948 src = kmap_atomic(source);
949 dst = kmap_atomic(t_page->page);
950 memcpy(dst + offset, src + off + count, temp);
951 kunmap_atomic(dst);
952 kunmap_atomic(src);
953
Ming Lei66231ad2018-03-06 12:07:13 +0800954 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700955
Shaohua Lideb78b42017-08-14 15:04:59 -0700956 if (is_fua)
957 null_free_sector(nullb, sector, true);
958
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700959 count += temp;
960 sector += temp >> SECTOR_SHIFT;
961 }
962 return 0;
963}
964
965static int copy_from_nullb(struct nullb *nullb, struct page *dest,
966 unsigned int off, sector_t sector, size_t n)
967{
968 size_t temp, count = 0;
969 unsigned int offset;
970 struct nullb_page *t_page;
971 void *dst, *src;
972
973 while (count < n) {
974 temp = min_t(size_t, nullb->dev->blocksize, n - count);
975
976 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -0700977 t_page = null_lookup_page(nullb, sector, false,
978 !null_cache_active(nullb));
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700979
980 dst = kmap_atomic(dest);
981 if (!t_page) {
982 memset(dst + off + count, 0, temp);
983 goto next;
984 }
985 src = kmap_atomic(t_page->page);
986 memcpy(dst + off + count, src + offset, temp);
987 kunmap_atomic(src);
988next:
989 kunmap_atomic(dst);
990
991 count += temp;
992 sector += temp >> SECTOR_SHIFT;
993 }
994 return 0;
995}
996
Shaohua Li306eb6b2017-08-14 15:04:57 -0700997static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
998{
999 size_t temp;
1000
1001 spin_lock_irq(&nullb->lock);
1002 while (n > 0) {
1003 temp = min_t(size_t, n, nullb->dev->blocksize);
Shaohua Lideb78b42017-08-14 15:04:59 -07001004 null_free_sector(nullb, sector, false);
1005 if (null_cache_active(nullb))
1006 null_free_sector(nullb, sector, true);
Shaohua Li306eb6b2017-08-14 15:04:57 -07001007 sector += temp >> SECTOR_SHIFT;
1008 n -= temp;
1009 }
1010 spin_unlock_irq(&nullb->lock);
1011}
1012
Shaohua Lideb78b42017-08-14 15:04:59 -07001013static int null_handle_flush(struct nullb *nullb)
1014{
1015 int err;
1016
1017 if (!null_cache_active(nullb))
1018 return 0;
1019
1020 spin_lock_irq(&nullb->lock);
1021 while (true) {
1022 err = null_make_cache_space(nullb,
1023 nullb->dev->cache_size * 1024 * 1024);
1024 if (err || nullb->dev->curr_cache == 0)
1025 break;
1026 }
1027
1028 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1029 spin_unlock_irq(&nullb->lock);
1030 return err;
1031}
1032
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001033static int null_transfer(struct nullb *nullb, struct page *page,
Shaohua Lideb78b42017-08-14 15:04:59 -07001034 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1035 bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001036{
1037 int err = 0;
1038
1039 if (!is_write) {
1040 err = copy_from_nullb(nullb, page, off, sector, len);
1041 flush_dcache_page(page);
1042 } else {
1043 flush_dcache_page(page);
Shaohua Lideb78b42017-08-14 15:04:59 -07001044 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001045 }
1046
1047 return err;
1048}
1049
1050static int null_handle_rq(struct nullb_cmd *cmd)
1051{
1052 struct request *rq = cmd->rq;
1053 struct nullb *nullb = cmd->nq->dev->nullb;
1054 int err;
1055 unsigned int len;
1056 sector_t sector;
1057 struct req_iterator iter;
1058 struct bio_vec bvec;
1059
1060 sector = blk_rq_pos(rq);
1061
Shaohua Li306eb6b2017-08-14 15:04:57 -07001062 if (req_op(rq) == REQ_OP_DISCARD) {
1063 null_handle_discard(nullb, sector, blk_rq_bytes(rq));
1064 return 0;
1065 }
1066
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001067 spin_lock_irq(&nullb->lock);
1068 rq_for_each_segment(bvec, rq, iter) {
1069 len = bvec.bv_len;
1070 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001071 op_is_write(req_op(rq)), sector,
1072 req_op(rq) & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001073 if (err) {
1074 spin_unlock_irq(&nullb->lock);
1075 return err;
1076 }
1077 sector += len >> SECTOR_SHIFT;
1078 }
1079 spin_unlock_irq(&nullb->lock);
1080
1081 return 0;
1082}
1083
1084static int null_handle_bio(struct nullb_cmd *cmd)
1085{
1086 struct bio *bio = cmd->bio;
1087 struct nullb *nullb = cmd->nq->dev->nullb;
1088 int err;
1089 unsigned int len;
1090 sector_t sector;
1091 struct bio_vec bvec;
1092 struct bvec_iter iter;
1093
1094 sector = bio->bi_iter.bi_sector;
1095
Shaohua Li306eb6b2017-08-14 15:04:57 -07001096 if (bio_op(bio) == REQ_OP_DISCARD) {
1097 null_handle_discard(nullb, sector,
1098 bio_sectors(bio) << SECTOR_SHIFT);
1099 return 0;
1100 }
1101
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001102 spin_lock_irq(&nullb->lock);
1103 bio_for_each_segment(bvec, bio, iter) {
1104 len = bvec.bv_len;
1105 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001106 op_is_write(bio_op(bio)), sector,
1107 bio_op(bio) & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001108 if (err) {
1109 spin_unlock_irq(&nullb->lock);
1110 return err;
1111 }
1112 sector += len >> SECTOR_SHIFT;
1113 }
1114 spin_unlock_irq(&nullb->lock);
1115 return 0;
1116}
1117
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001118static void null_stop_queue(struct nullb *nullb)
1119{
1120 struct request_queue *q = nullb->q;
1121
1122 if (nullb->dev->queue_mode == NULL_Q_MQ)
1123 blk_mq_stop_hw_queues(q);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001124}
1125
1126static void null_restart_queue_async(struct nullb *nullb)
1127{
1128 struct request_queue *q = nullb->q;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001129
1130 if (nullb->dev->queue_mode == NULL_Q_MQ)
1131 blk_mq_start_stopped_hw_queues(q, true);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001132}
1133
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001134static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1135{
1136 struct nullb_device *dev = cmd->nq->dev;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001137 struct nullb *nullb = dev->nullb;
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001138 int err = 0;
1139
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001140 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1141 struct request *rq = cmd->rq;
1142
1143 if (!hrtimer_active(&nullb->bw_timer))
1144 hrtimer_restart(&nullb->bw_timer);
1145
1146 if (atomic_long_sub_return(blk_rq_bytes(rq),
1147 &nullb->cur_bytes) < 0) {
1148 null_stop_queue(nullb);
1149 /* race with timer */
1150 if (atomic_long_read(&nullb->cur_bytes) > 0)
1151 null_restart_queue_async(nullb);
Jens Axboee50b1e32018-10-11 17:58:17 -06001152 /* requeue request */
1153 return BLK_STS_DEV_RESOURCE;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001154 }
1155 }
1156
Shaohua Li2f54a612017-08-14 15:05:00 -07001157 if (nullb->dev->badblocks.shift != -1) {
1158 int bad_sectors;
1159 sector_t sector, size, first_bad;
1160 bool is_flush = true;
1161
1162 if (dev->queue_mode == NULL_Q_BIO &&
1163 bio_op(cmd->bio) != REQ_OP_FLUSH) {
1164 is_flush = false;
1165 sector = cmd->bio->bi_iter.bi_sector;
1166 size = bio_sectors(cmd->bio);
1167 }
1168 if (dev->queue_mode != NULL_Q_BIO &&
1169 req_op(cmd->rq) != REQ_OP_FLUSH) {
1170 is_flush = false;
1171 sector = blk_rq_pos(cmd->rq);
1172 size = blk_rq_sectors(cmd->rq);
1173 }
1174 if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
1175 size, &first_bad, &bad_sectors)) {
1176 cmd->error = BLK_STS_IOERR;
1177 goto out;
1178 }
1179 }
1180
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001181 if (dev->memory_backed) {
Shaohua Lideb78b42017-08-14 15:04:59 -07001182 if (dev->queue_mode == NULL_Q_BIO) {
1183 if (bio_op(cmd->bio) == REQ_OP_FLUSH)
1184 err = null_handle_flush(nullb);
1185 else
1186 err = null_handle_bio(cmd);
1187 } else {
1188 if (req_op(cmd->rq) == REQ_OP_FLUSH)
1189 err = null_handle_flush(nullb);
1190 else
1191 err = null_handle_rq(cmd);
1192 }
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001193 }
1194 cmd->error = errno_to_blk_status(err);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001195
1196 if (!cmd->error && dev->zoned) {
Jens Axboeb228ba12018-09-12 18:21:11 -06001197 sector_t sector;
1198 unsigned int nr_sectors;
1199 int op;
1200
1201 if (dev->queue_mode == NULL_Q_BIO) {
1202 op = bio_op(cmd->bio);
1203 sector = cmd->bio->bi_iter.bi_sector;
1204 nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
1205 } else {
1206 op = req_op(cmd->rq);
1207 sector = blk_rq_pos(cmd->rq);
1208 nr_sectors = blk_rq_sectors(cmd->rq);
1209 }
1210
1211 if (op == REQ_OP_WRITE)
1212 null_zone_write(cmd, sector, nr_sectors);
1213 else if (op == REQ_OP_ZONE_RESET)
1214 null_zone_reset(cmd, sector);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001215 }
Shaohua Li2f54a612017-08-14 15:05:00 -07001216out:
Jens Axboef2298c02013-10-25 11:52:25 +01001217 /* Complete IO by inline, softirq or timer */
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001218 switch (dev->irqmode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001219 case NULL_IRQ_SOFTIRQ:
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001220 switch (dev->queue_mode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001221 case NULL_Q_MQ:
Christoph Hellwig08e00292017-04-20 16:03:09 +02001222 blk_mq_complete_request(cmd->rq);
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001223 break;
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001224 case NULL_Q_BIO:
1225 /*
1226 * XXX: no proper submitting cpu information available.
1227 */
1228 end_cmd(cmd);
1229 break;
1230 }
1231 break;
Jens Axboef2298c02013-10-25 11:52:25 +01001232 case NULL_IRQ_NONE:
1233 end_cmd(cmd);
1234 break;
Jens Axboef2298c02013-10-25 11:52:25 +01001235 case NULL_IRQ_TIMER:
1236 null_cmd_end_timer(cmd);
1237 break;
1238 }
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001239 return BLK_STS_OK;
Jens Axboef2298c02013-10-25 11:52:25 +01001240}
1241
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001242static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1243{
1244 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1245 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1246 unsigned int mbps = nullb->dev->mbps;
1247
1248 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1249 return HRTIMER_NORESTART;
1250
1251 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1252 null_restart_queue_async(nullb);
1253
1254 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1255
1256 return HRTIMER_RESTART;
1257}
1258
1259static void nullb_setup_bwtimer(struct nullb *nullb)
1260{
1261 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1262
1263 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1264 nullb->bw_timer.function = nullb_bwtimer_fn;
1265 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1266 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +01001267}
1268
1269static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1270{
1271 int index = 0;
1272
1273 if (nullb->nr_queues != 1)
1274 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1275
1276 return &nullb->queues[index];
1277}
1278
Jens Axboedece1632015-11-05 10:41:16 -07001279static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +01001280{
1281 struct nullb *nullb = q->queuedata;
1282 struct nullb_queue *nq = nullb_to_queue(nullb);
1283 struct nullb_cmd *cmd;
1284
1285 cmd = alloc_cmd(nq, 1);
1286 cmd->bio = bio;
1287
1288 null_handle_cmd(cmd);
Jens Axboedece1632015-11-05 10:41:16 -07001289 return BLK_QC_T_NONE;
Jens Axboef2298c02013-10-25 11:52:25 +01001290}
1291
Jens Axboe93b57042018-01-10 09:06:23 -07001292static bool should_timeout_request(struct request *rq)
1293{
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001294#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -07001295 if (g_timeout_str[0])
1296 return should_fail(&null_timeout_attr, 1);
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001297#endif
Jens Axboe24941b92018-02-28 09:18:57 -07001298 return false;
1299}
Jens Axboe93b57042018-01-10 09:06:23 -07001300
Jens Axboe24941b92018-02-28 09:18:57 -07001301static bool should_requeue_request(struct request *rq)
1302{
1303#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1304 if (g_requeue_str[0])
1305 return should_fail(&null_requeue_attr, 1);
1306#endif
Jens Axboe93b57042018-01-10 09:06:23 -07001307 return false;
1308}
1309
Jens Axboe5448aca2018-01-09 12:47:24 -07001310static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
1311{
1312 pr_info("null: rq %p timed out\n", rq);
Christoph Hellwig0df0bb02018-05-29 15:52:33 +02001313 blk_mq_complete_request(rq);
1314 return BLK_EH_DONE;
Jens Axboe5448aca2018-01-09 12:47:24 -07001315}
1316
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001317static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
Jens Axboe74c45052014-10-29 11:14:52 -06001318 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +01001319{
Jens Axboe74c45052014-10-29 11:14:52 -06001320 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Shaohua Li2984c862017-08-14 15:04:52 -07001321 struct nullb_queue *nq = hctx->driver_data;
Jens Axboef2298c02013-10-25 11:52:25 +01001322
Jens Axboedb5bcf82017-03-30 13:44:26 -06001323 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1324
Shaohua Li2984c862017-08-14 15:04:52 -07001325 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +01001326 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1327 cmd->timer.function = null_cmd_timer_expired;
1328 }
Jens Axboe74c45052014-10-29 11:14:52 -06001329 cmd->rq = bd->rq;
Shaohua Li2984c862017-08-14 15:04:52 -07001330 cmd->nq = nq;
Jens Axboef2298c02013-10-25 11:52:25 +01001331
Jens Axboe74c45052014-10-29 11:14:52 -06001332 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -07001333
Jens Axboe24941b92018-02-28 09:18:57 -07001334 if (should_requeue_request(bd->rq)) {
1335 /*
1336 * Alternate between hitting the core BUSY path, and the
1337 * driver driven requeue path
1338 */
1339 nq->requeue_selection++;
1340 if (nq->requeue_selection & 1)
1341 return BLK_STS_RESOURCE;
1342 else {
1343 blk_mq_requeue_request(bd->rq, true);
1344 return BLK_STS_OK;
1345 }
1346 }
1347 if (should_timeout_request(bd->rq))
1348 return BLK_STS_OK;
Jens Axboe93b57042018-01-10 09:06:23 -07001349
Jens Axboe24941b92018-02-28 09:18:57 -07001350 return null_handle_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +01001351}
1352
Eric Biggersf363b082017-03-30 13:39:16 -07001353static const struct blk_mq_ops null_mq_ops = {
Jens Axboef2298c02013-10-25 11:52:25 +01001354 .queue_rq = null_queue_rq,
Christoph Hellwig49f66132018-11-10 09:30:45 +01001355 .complete = null_complete_rq,
Jens Axboe5448aca2018-01-09 12:47:24 -07001356 .timeout = null_timeout_rq,
Jens Axboef2298c02013-10-25 11:52:25 +01001357};
1358
Matias Bjørlingde65d2d2015-08-31 14:17:18 +02001359static void cleanup_queue(struct nullb_queue *nq)
1360{
1361 kfree(nq->tag_map);
1362 kfree(nq->cmds);
1363}
1364
1365static void cleanup_queues(struct nullb *nullb)
1366{
1367 int i;
1368
1369 for (i = 0; i < nullb->nr_queues; i++)
1370 cleanup_queue(&nullb->queues[i]);
1371
1372 kfree(nullb->queues);
1373}
1374
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001375static void null_del_dev(struct nullb *nullb)
1376{
Shaohua Li2984c862017-08-14 15:04:52 -07001377 struct nullb_device *dev = nullb->dev;
1378
Shaohua Li94bc02e2017-08-14 15:04:55 -07001379 ida_simple_remove(&nullb_indexes, nullb->index);
1380
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001381 list_del_init(&nullb->list);
1382
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001383 del_gendisk(nullb->disk);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001384
1385 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1386 hrtimer_cancel(&nullb->bw_timer);
1387 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1388 null_restart_queue_async(nullb);
1389 }
1390
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001391 blk_cleanup_queue(nullb->q);
Shaohua Li2984c862017-08-14 15:04:52 -07001392 if (dev->queue_mode == NULL_Q_MQ &&
1393 nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001394 blk_mq_free_tag_set(nullb->tag_set);
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001395 put_disk(nullb->disk);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001396 cleanup_queues(nullb);
Shaohua Lideb78b42017-08-14 15:04:59 -07001397 if (null_cache_active(nullb))
1398 null_free_device_storage(nullb->dev, true);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001399 kfree(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001400 dev->nullb = NULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001401}
1402
Shaohua Li306eb6b2017-08-14 15:04:57 -07001403static void null_config_discard(struct nullb *nullb)
1404{
1405 if (nullb->dev->discard == false)
1406 return;
1407 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1408 nullb->q->limits.discard_alignment = nullb->dev->blocksize;
1409 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
Bart Van Assche8b904b52018-03-07 17:10:10 -08001410 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001411}
1412
Jens Axboef2298c02013-10-25 11:52:25 +01001413static int null_open(struct block_device *bdev, fmode_t mode)
1414{
1415 return 0;
1416}
1417
1418static void null_release(struct gendisk *disk, fmode_t mode)
1419{
1420}
1421
1422static const struct block_device_operations null_fops = {
1423 .owner = THIS_MODULE,
1424 .open = null_open,
1425 .release = null_release,
Christoph Hellwige76239a2018-10-12 19:08:49 +09001426 .report_zones = null_zone_report,
Jens Axboef2298c02013-10-25 11:52:25 +01001427};
1428
Jens Axboe82f402f2017-06-20 14:22:01 -06001429static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1430{
1431 BUG_ON(!nullb);
1432 BUG_ON(!nq);
1433
1434 init_waitqueue_head(&nq->wait);
1435 nq->queue_depth = nullb->queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -07001436 nq->dev = nullb->dev;
Jens Axboe82f402f2017-06-20 14:22:01 -06001437}
1438
1439static void null_init_queues(struct nullb *nullb)
1440{
1441 struct request_queue *q = nullb->q;
1442 struct blk_mq_hw_ctx *hctx;
1443 struct nullb_queue *nq;
1444 int i;
1445
1446 queue_for_each_hw_ctx(q, hctx, i) {
1447 if (!hctx->nr_ctx || !hctx->tags)
1448 continue;
1449 nq = &nullb->queues[i];
1450 hctx->driver_data = nq;
1451 null_init_queue(nullb, nq);
1452 nullb->nr_queues++;
1453 }
1454}
1455
Jens Axboef2298c02013-10-25 11:52:25 +01001456static int setup_commands(struct nullb_queue *nq)
1457{
1458 struct nullb_cmd *cmd;
1459 int i, tag_size;
1460
Kees Cook6396bb22018-06-12 14:03:40 -07001461 nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001462 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001463 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001464
1465 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
Kees Cook6396bb22018-06-12 14:03:40 -07001466 nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001467 if (!nq->tag_map) {
1468 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +01001469 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001470 }
1471
1472 for (i = 0; i < nq->queue_depth; i++) {
1473 cmd = &nq->cmds[i];
1474 INIT_LIST_HEAD(&cmd->list);
1475 cmd->ll_list.next = NULL;
1476 cmd->tag = -1U;
1477 }
1478
1479 return 0;
1480}
1481
Jens Axboef2298c02013-10-25 11:52:25 +01001482static int setup_queues(struct nullb *nullb)
1483{
Kees Cook6396bb22018-06-12 14:03:40 -07001484 nullb->queues = kcalloc(nullb->dev->submit_queues,
1485 sizeof(struct nullb_queue),
1486 GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001487 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001488 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001489
1490 nullb->nr_queues = 0;
Shaohua Li2984c862017-08-14 15:04:52 -07001491 nullb->queue_depth = nullb->dev->hw_queue_depth;
Jens Axboef2298c02013-10-25 11:52:25 +01001492
Matias Bjorling2d263a782013-12-18 13:41:43 +01001493 return 0;
1494}
1495
1496static int init_driver_queues(struct nullb *nullb)
1497{
1498 struct nullb_queue *nq;
1499 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001500
Shaohua Li2984c862017-08-14 15:04:52 -07001501 for (i = 0; i < nullb->dev->submit_queues; i++) {
Jens Axboef2298c02013-10-25 11:52:25 +01001502 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +01001503
1504 null_init_queue(nullb, nq);
1505
1506 ret = setup_commands(nq);
1507 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +02001508 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01001509 nullb->nr_queues++;
1510 }
Matias Bjorling2d263a782013-12-18 13:41:43 +01001511 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001512}
1513
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001514static int null_gendisk_register(struct nullb *nullb)
Jens Axboef2298c02013-10-25 11:52:25 +01001515{
1516 struct gendisk *disk;
Jens Axboef2298c02013-10-25 11:52:25 +01001517 sector_t size;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001518
Shaohua Li2984c862017-08-14 15:04:52 -07001519 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001520 if (!disk)
1521 return -ENOMEM;
Shaohua Li2984c862017-08-14 15:04:52 -07001522 size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001523 set_capacity(disk, size >> 9);
1524
1525 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
1526 disk->major = null_major;
1527 disk->first_minor = nullb->index;
1528 disk->fops = &null_fops;
1529 disk->private_data = nullb;
1530 disk->queue = nullb->q;
1531 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1532
Damien Le Moalbf505452018-10-12 19:08:50 +09001533 if (nullb->dev->zoned) {
1534 int ret = blk_revalidate_disk_zones(disk);
1535
1536 if (ret != 0)
1537 return ret;
1538 }
1539
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001540 add_disk(disk);
1541 return 0;
1542}
1543
Shaohua Li2984c862017-08-14 15:04:52 -07001544static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001545{
1546 set->ops = &null_mq_ops;
Shaohua Li2984c862017-08-14 15:04:52 -07001547 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
1548 g_submit_queues;
1549 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
1550 g_hw_queue_depth;
1551 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
Jens Axboe82f402f2017-06-20 14:22:01 -06001552 set->cmd_size = sizeof(struct nullb_cmd);
1553 set->flags = BLK_MQ_F_SHOULD_MERGE;
weiping zhangb3cffc32017-09-30 09:49:21 +08001554 if (g_no_sched)
1555 set->flags |= BLK_MQ_F_NO_SCHED;
Jens Axboe82f402f2017-06-20 14:22:01 -06001556 set->driver_data = NULL;
1557
Shaohua Li0d06a422017-08-25 13:46:25 -07001558 if ((nullb && nullb->dev->blocking) || g_blocking)
Jens Axboe82f402f2017-06-20 14:22:01 -06001559 set->flags |= BLK_MQ_F_BLOCKING;
1560
1561 return blk_mq_alloc_tag_set(set);
1562}
1563
Shaohua Licedcafa2017-08-14 15:04:54 -07001564static void null_validate_conf(struct nullb_device *dev)
1565{
1566 dev->blocksize = round_down(dev->blocksize, 512);
1567 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
Shaohua Licedcafa2017-08-14 15:04:54 -07001568
1569 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
1570 if (dev->submit_queues != nr_online_nodes)
1571 dev->submit_queues = nr_online_nodes;
1572 } else if (dev->submit_queues > nr_cpu_ids)
1573 dev->submit_queues = nr_cpu_ids;
1574 else if (dev->submit_queues == 0)
1575 dev->submit_queues = 1;
1576
1577 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
1578 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001579
1580 /* Do memory allocation, so set blocking */
1581 if (dev->memory_backed)
1582 dev->blocking = true;
Shaohua Lideb78b42017-08-14 15:04:59 -07001583 else /* cache is meaningless */
1584 dev->cache_size = 0;
1585 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1586 dev->cache_size);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001587 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1588 /* can not stop a queue */
1589 if (dev->queue_mode == NULL_Q_BIO)
1590 dev->mbps = 0;
Shaohua Licedcafa2017-08-14 15:04:54 -07001591}
1592
Jens Axboe24941b92018-02-28 09:18:57 -07001593#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1594static bool __null_setup_fault(struct fault_attr *attr, char *str)
1595{
1596 if (!str[0])
1597 return true;
1598
1599 if (!setup_fault_attr(attr, str))
1600 return false;
1601
1602 attr->verbose = 0;
1603 return true;
1604}
1605#endif
1606
Jens Axboe93b57042018-01-10 09:06:23 -07001607static bool null_setup_fault(void)
1608{
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001609#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe24941b92018-02-28 09:18:57 -07001610 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
Jens Axboe93b57042018-01-10 09:06:23 -07001611 return false;
Jens Axboe24941b92018-02-28 09:18:57 -07001612 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
1613 return false;
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001614#endif
Jens Axboe93b57042018-01-10 09:06:23 -07001615 return true;
1616}
1617
Shaohua Li2984c862017-08-14 15:04:52 -07001618static int null_add_dev(struct nullb_device *dev)
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001619{
1620 struct nullb *nullb;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001621 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +01001622
Shaohua Licedcafa2017-08-14 15:04:54 -07001623 null_validate_conf(dev);
1624
Shaohua Li2984c862017-08-14 15:04:52 -07001625 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001626 if (!nullb) {
1627 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001628 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001629 }
Shaohua Li2984c862017-08-14 15:04:52 -07001630 nullb->dev = dev;
1631 dev->nullb = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01001632
1633 spin_lock_init(&nullb->lock);
1634
Robert Elliottdc501dc2014-09-02 11:38:49 -05001635 rv = setup_queues(nullb);
1636 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001637 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01001638
Shaohua Li2984c862017-08-14 15:04:52 -07001639 if (dev->queue_mode == NULL_Q_MQ) {
Jens Axboe82f402f2017-06-20 14:22:01 -06001640 if (shared_tags) {
1641 nullb->tag_set = &tag_set;
1642 rv = 0;
1643 } else {
1644 nullb->tag_set = &nullb->__tag_set;
Shaohua Li2984c862017-08-14 15:04:52 -07001645 rv = null_init_tag_set(nullb, nullb->tag_set);
Jens Axboe82f402f2017-06-20 14:22:01 -06001646 }
Jens Axboef2298c02013-10-25 11:52:25 +01001647
Robert Elliottdc501dc2014-09-02 11:38:49 -05001648 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001649 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +01001650
Jens Axboe93b57042018-01-10 09:06:23 -07001651 if (!null_setup_fault())
1652 goto out_cleanup_queues;
1653
Jens Axboe5448aca2018-01-09 12:47:24 -07001654 nullb->tag_set->timeout = 5 * HZ;
Jens Axboe82f402f2017-06-20 14:22:01 -06001655 nullb->q = blk_mq_init_queue(nullb->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +00001656 if (IS_ERR(nullb->q)) {
Robert Elliottdc501dc2014-09-02 11:38:49 -05001657 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001658 goto out_cleanup_tags;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001659 }
Jens Axboe82f402f2017-06-20 14:22:01 -06001660 null_init_queues(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001661 } else if (dev->queue_mode == NULL_Q_BIO) {
Christoph Hellwig6d469642018-11-14 17:02:18 +01001662 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001663 if (!nullb->q) {
1664 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001665 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001666 }
Jens Axboef2298c02013-10-25 11:52:25 +01001667 blk_queue_make_request(nullb->q, null_queue_bio);
Jan Kara31f96902014-10-22 15:34:21 +02001668 rv = init_driver_queues(nullb);
1669 if (rv)
1670 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001671 }
1672
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001673 if (dev->mbps) {
1674 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1675 nullb_setup_bwtimer(nullb);
1676 }
1677
Shaohua Lideb78b42017-08-14 15:04:59 -07001678 if (dev->cache_size > 0) {
1679 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1680 blk_queue_write_cache(nullb->q, true, true);
1681 blk_queue_flush_queueable(nullb->q, true);
1682 }
1683
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001684 if (dev->zoned) {
1685 rv = null_zone_init(dev);
1686 if (rv)
1687 goto out_cleanup_blk_queue;
1688
1689 blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects);
1690 nullb->q->limits.zoned = BLK_ZONED_HM;
1691 }
1692
Jens Axboef2298c02013-10-25 11:52:25 +01001693 nullb->q->queuedata = nullb;
Bart Van Assche8b904b52018-03-07 17:10:10 -08001694 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
1695 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +01001696
Jens Axboef2298c02013-10-25 11:52:25 +01001697 mutex_lock(&lock);
Shaohua Li94bc02e2017-08-14 15:04:55 -07001698 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
Shaohua Licedcafa2017-08-14 15:04:54 -07001699 dev->index = nullb->index;
Jens Axboef2298c02013-10-25 11:52:25 +01001700 mutex_unlock(&lock);
1701
Shaohua Li2984c862017-08-14 15:04:52 -07001702 blk_queue_logical_block_size(nullb->q, dev->blocksize);
1703 blk_queue_physical_block_size(nullb->q, dev->blocksize);
Jens Axboef2298c02013-10-25 11:52:25 +01001704
Shaohua Li306eb6b2017-08-14 15:04:57 -07001705 null_config_discard(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +01001706
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001707 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1708
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001709 rv = null_gendisk_register(nullb);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001710 if (rv)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001711 goto out_cleanup_zone;
Jens Axboef2298c02013-10-25 11:52:25 +01001712
Matias Bjørlinga5143792016-02-11 14:49:13 +01001713 mutex_lock(&lock);
1714 list_add_tail(&nullb->list, &nullb_list);
1715 mutex_unlock(&lock);
Wenwei Tao3681c85d2016-03-05 00:27:04 +08001716
Jens Axboef2298c02013-10-25 11:52:25 +01001717 return 0;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001718out_cleanup_zone:
1719 if (dev->zoned)
1720 null_zone_exit(dev);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001721out_cleanup_blk_queue:
1722 blk_cleanup_queue(nullb->q);
1723out_cleanup_tags:
Shaohua Li2984c862017-08-14 15:04:52 -07001724 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001725 blk_mq_free_tag_set(nullb->tag_set);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001726out_cleanup_queues:
1727 cleanup_queues(nullb);
1728out_free_nullb:
1729 kfree(nullb);
1730out:
Robert Elliottdc501dc2014-09-02 11:38:49 -05001731 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +01001732}
1733
1734static int __init null_init(void)
1735{
Minfei Huangaf096e22015-12-08 13:47:34 -07001736 int ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001737 unsigned int i;
Minfei Huangaf096e22015-12-08 13:47:34 -07001738 struct nullb *nullb;
Shaohua Li2984c862017-08-14 15:04:52 -07001739 struct nullb_device *dev;
Jens Axboef2298c02013-10-25 11:52:25 +01001740
Shaohua Li2984c862017-08-14 15:04:52 -07001741 if (g_bs > PAGE_SIZE) {
Raghavendra K T9967d8a2014-01-21 16:59:59 +05301742 pr_warn("null_blk: invalid block size\n");
1743 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
Shaohua Li2984c862017-08-14 15:04:52 -07001744 g_bs = PAGE_SIZE;
Raghavendra K T9967d8a2014-01-21 16:59:59 +05301745 }
Jens Axboef2298c02013-10-25 11:52:25 +01001746
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001747 if (!is_power_of_2(g_zone_size)) {
1748 pr_err("null_blk: zone_size must be power-of-two\n");
1749 return -EINVAL;
1750 }
1751
Jens Axboee50b1e32018-10-11 17:58:17 -06001752 if (g_queue_mode == NULL_Q_RQ) {
1753 pr_err("null_blk: legacy IO path no longer available\n");
1754 return -EINVAL;
1755 }
Shaohua Li2984c862017-08-14 15:04:52 -07001756 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
1757 if (g_submit_queues != nr_online_nodes) {
weiping zhang558ab3002017-08-03 00:26:39 +08001758 pr_warn("null_blk: submit_queues param is set to %u.\n",
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +01001759 nr_online_nodes);
Shaohua Li2984c862017-08-14 15:04:52 -07001760 g_submit_queues = nr_online_nodes;
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01001761 }
Shaohua Li2984c862017-08-14 15:04:52 -07001762 } else if (g_submit_queues > nr_cpu_ids)
1763 g_submit_queues = nr_cpu_ids;
1764 else if (g_submit_queues <= 0)
1765 g_submit_queues = 1;
Jens Axboef2298c02013-10-25 11:52:25 +01001766
Shaohua Li2984c862017-08-14 15:04:52 -07001767 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
1768 ret = null_init_tag_set(NULL, &tag_set);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001769 if (ret)
1770 return ret;
1771 }
1772
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001773 config_group_init(&nullb_subsys.su_group);
1774 mutex_init(&nullb_subsys.su_mutex);
1775
1776 ret = configfs_register_subsystem(&nullb_subsys);
1777 if (ret)
1778 goto err_tagset;
1779
Jens Axboef2298c02013-10-25 11:52:25 +01001780 mutex_init(&lock);
1781
Jens Axboef2298c02013-10-25 11:52:25 +01001782 null_major = register_blkdev(0, "nullb");
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001783 if (null_major < 0) {
1784 ret = null_major;
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001785 goto err_conf;
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001786 }
Jens Axboef2298c02013-10-25 11:52:25 +01001787
Minfei Huangaf096e22015-12-08 13:47:34 -07001788 for (i = 0; i < nr_devices; i++) {
Shaohua Li2984c862017-08-14 15:04:52 -07001789 dev = null_alloc_dev();
Wei Yongjun30c516d2017-10-17 12:11:46 +00001790 if (!dev) {
1791 ret = -ENOMEM;
Minfei Huangaf096e22015-12-08 13:47:34 -07001792 goto err_dev;
Wei Yongjun30c516d2017-10-17 12:11:46 +00001793 }
Shaohua Li2984c862017-08-14 15:04:52 -07001794 ret = null_add_dev(dev);
1795 if (ret) {
1796 null_free_dev(dev);
1797 goto err_dev;
1798 }
Minfei Huangaf096e22015-12-08 13:47:34 -07001799 }
1800
Jens Axboef2298c02013-10-25 11:52:25 +01001801 pr_info("null: module loaded\n");
1802 return 0;
Minfei Huangaf096e22015-12-08 13:47:34 -07001803
1804err_dev:
1805 while (!list_empty(&nullb_list)) {
1806 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07001807 dev = nullb->dev;
Minfei Huangaf096e22015-12-08 13:47:34 -07001808 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001809 null_free_dev(dev);
Minfei Huangaf096e22015-12-08 13:47:34 -07001810 }
Minfei Huangaf096e22015-12-08 13:47:34 -07001811 unregister_blkdev(null_major, "nullb");
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001812err_conf:
1813 configfs_unregister_subsystem(&nullb_subsys);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001814err_tagset:
Shaohua Li2984c862017-08-14 15:04:52 -07001815 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001816 blk_mq_free_tag_set(&tag_set);
Minfei Huangaf096e22015-12-08 13:47:34 -07001817 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01001818}
1819
1820static void __exit null_exit(void)
1821{
1822 struct nullb *nullb;
1823
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001824 configfs_unregister_subsystem(&nullb_subsys);
1825
Jens Axboef2298c02013-10-25 11:52:25 +01001826 unregister_blkdev(null_major, "nullb");
1827
1828 mutex_lock(&lock);
1829 while (!list_empty(&nullb_list)) {
Shaohua Li2984c862017-08-14 15:04:52 -07001830 struct nullb_device *dev;
1831
Jens Axboef2298c02013-10-25 11:52:25 +01001832 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07001833 dev = nullb->dev;
Jens Axboef2298c02013-10-25 11:52:25 +01001834 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001835 null_free_dev(dev);
Jens Axboef2298c02013-10-25 11:52:25 +01001836 }
1837 mutex_unlock(&lock);
Matias Bjørling6bb95352015-11-19 12:50:08 +01001838
Shaohua Li2984c862017-08-14 15:04:52 -07001839 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Jens Axboe82f402f2017-06-20 14:22:01 -06001840 blk_mq_free_tag_set(&tag_set);
Jens Axboef2298c02013-10-25 11:52:25 +01001841}
1842
1843module_init(null_init);
1844module_exit(null_exit);
1845
Jens Axboe231b3db2017-08-25 12:53:15 -06001846MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
Jens Axboef2298c02013-10-25 11:52:25 +01001847MODULE_LICENSE("GPL");