blob: 6127e3ff7b4b343816baca729606511cb653d404 [file] [log] [blame]
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001/*
2 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
3 * Shaohua Li <shli@fb.com>
4 */
Jens Axboef2298c02013-10-25 11:52:25 +01005#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01006
Jens Axboef2298c02013-10-25 11:52:25 +01007#include <linux/moduleparam.h>
8#include <linux/sched.h>
9#include <linux/fs.h>
Jens Axboef2298c02013-10-25 11:52:25 +010010#include <linux/init.h>
Matias Bjørling6dad38d2018-07-06 19:38:38 +020011#include "null_blk.h"
Jens Axboef2298c02013-10-25 11:52:25 +010012
Shaohua Li5bcd0e02017-08-14 15:04:56 -070013#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
14#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070015#define SECTOR_MASK (PAGE_SECTORS - 1)
16
17#define FREE_BATCH 16
18
Shaohua Lieff2c4f2017-08-14 15:04:58 -070019#define TICKS_PER_SEC 50ULL
20#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
21
Arnd Bergmann33f782c2018-01-11 11:31:25 +010022#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -070023static DECLARE_FAULT_ATTR(null_timeout_attr);
Jens Axboe24941b92018-02-28 09:18:57 -070024static DECLARE_FAULT_ATTR(null_requeue_attr);
Arnd Bergmann33f782c2018-01-11 11:31:25 +010025#endif
Jens Axboe93b57042018-01-10 09:06:23 -070026
Shaohua Lieff2c4f2017-08-14 15:04:58 -070027static inline u64 mb_per_tick(int mbps)
28{
29 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
30}
Jens Axboef2298c02013-10-25 11:52:25 +010031
Shaohua Li3bf2bd22017-08-14 15:04:53 -070032/*
33 * Status flags for nullb_device.
34 *
35 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
36 * UP: Device is currently on and visible in userspace.
Shaohua Lieff2c4f2017-08-14 15:04:58 -070037 * THROTTLED: Device is being throttled.
Shaohua Lideb78b42017-08-14 15:04:59 -070038 * CACHE: Device is using a write-back cache.
Shaohua Li3bf2bd22017-08-14 15:04:53 -070039 */
40enum nullb_device_flags {
41 NULLB_DEV_FL_CONFIGURED = 0,
42 NULLB_DEV_FL_UP = 1,
Shaohua Lieff2c4f2017-08-14 15:04:58 -070043 NULLB_DEV_FL_THROTTLED = 2,
Shaohua Lideb78b42017-08-14 15:04:59 -070044 NULLB_DEV_FL_CACHE = 3,
Shaohua Li3bf2bd22017-08-14 15:04:53 -070045};
46
Ming Lei66231ad2018-03-06 12:07:13 +080047#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070048/*
49 * nullb_page is a page in memory for nullb devices.
50 *
51 * @page: The page holding the data.
52 * @bitmap: The bitmap represents which sector in the page has data.
53 * Each bit represents one block size. For example, sector 8
54 * will use the 7th bit
Shaohua Lideb78b42017-08-14 15:04:59 -070055 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
56 * page is being flushing to storage. FREE means the cache page is freed and
57 * should be skipped from flushing to storage. Please see
58 * null_make_cache_space
Shaohua Li5bcd0e02017-08-14 15:04:56 -070059 */
60struct nullb_page {
61 struct page *page;
Ming Lei66231ad2018-03-06 12:07:13 +080062 DECLARE_BITMAP(bitmap, MAP_SZ);
Shaohua Li5bcd0e02017-08-14 15:04:56 -070063};
Ming Lei66231ad2018-03-06 12:07:13 +080064#define NULLB_PAGE_LOCK (MAP_SZ - 1)
65#define NULLB_PAGE_FREE (MAP_SZ - 2)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070066
Jens Axboef2298c02013-10-25 11:52:25 +010067static LIST_HEAD(nullb_list);
68static struct mutex lock;
69static int null_major;
Shaohua Li94bc02e2017-08-14 15:04:55 -070070static DEFINE_IDA(nullb_indexes);
Jens Axboe82f402f2017-06-20 14:22:01 -060071static struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +010072
Jens Axboef2298c02013-10-25 11:52:25 +010073enum {
74 NULL_IRQ_NONE = 0,
75 NULL_IRQ_SOFTIRQ = 1,
76 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -080077};
Jens Axboef2298c02013-10-25 11:52:25 +010078
Christoph Hellwigce2c3502014-02-10 03:24:40 -080079enum {
Jens Axboef2298c02013-10-25 11:52:25 +010080 NULL_Q_BIO = 0,
81 NULL_Q_RQ = 1,
82 NULL_Q_MQ = 2,
83};
84
weiping zhangb3cffc32017-09-30 09:49:21 +080085static int g_no_sched;
Joe Perches5657a812018-05-24 13:38:59 -060086module_param_named(no_sched, g_no_sched, int, 0444);
weiping zhangb3cffc32017-09-30 09:49:21 +080087MODULE_PARM_DESC(no_sched, "No io scheduler");
88
Shaohua Li2984c862017-08-14 15:04:52 -070089static int g_submit_queues = 1;
Joe Perches5657a812018-05-24 13:38:59 -060090module_param_named(submit_queues, g_submit_queues, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +010091MODULE_PARM_DESC(submit_queues, "Number of submission queues");
92
Shaohua Li2984c862017-08-14 15:04:52 -070093static int g_home_node = NUMA_NO_NODE;
Joe Perches5657a812018-05-24 13:38:59 -060094module_param_named(home_node, g_home_node, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +010095MODULE_PARM_DESC(home_node, "Home node for the device");
96
Arnd Bergmann33f782c2018-01-11 11:31:25 +010097#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -070098static char g_timeout_str[80];
Joe Perches5657a812018-05-24 13:38:59 -060099module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
Jens Axboe24941b92018-02-28 09:18:57 -0700100
101static char g_requeue_str[80];
Joe Perches5657a812018-05-24 13:38:59 -0600102module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
Arnd Bergmann33f782c2018-01-11 11:31:25 +0100103#endif
Jens Axboe93b57042018-01-10 09:06:23 -0700104
Shaohua Li2984c862017-08-14 15:04:52 -0700105static int g_queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700106
107static int null_param_store_val(const char *str, int *val, int min, int max)
108{
109 int ret, new_val;
110
111 ret = kstrtoint(str, 10, &new_val);
112 if (ret)
113 return -EINVAL;
114
115 if (new_val < min || new_val > max)
116 return -EINVAL;
117
118 *val = new_val;
119 return 0;
120}
121
122static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
123{
Shaohua Li2984c862017-08-14 15:04:52 -0700124 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
Matias Bjorling709c8662014-11-26 14:45:48 -0700125}
126
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930127static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700128 .set = null_set_queue_mode,
129 .get = param_get_int,
130};
131
Joe Perches5657a812018-05-24 13:38:59 -0600132device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400133MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100134
Shaohua Li2984c862017-08-14 15:04:52 -0700135static int g_gb = 250;
Joe Perches5657a812018-05-24 13:38:59 -0600136module_param_named(gb, g_gb, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100137MODULE_PARM_DESC(gb, "Size in GB");
138
Shaohua Li2984c862017-08-14 15:04:52 -0700139static int g_bs = 512;
Joe Perches5657a812018-05-24 13:38:59 -0600140module_param_named(bs, g_bs, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100141MODULE_PARM_DESC(bs, "Block size (in bytes)");
142
Jens Axboe82f402f2017-06-20 14:22:01 -0600143static int nr_devices = 1;
Joe Perches5657a812018-05-24 13:38:59 -0600144module_param(nr_devices, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100145MODULE_PARM_DESC(nr_devices, "Number of devices to register");
146
Shaohua Li2984c862017-08-14 15:04:52 -0700147static bool g_blocking;
Joe Perches5657a812018-05-24 13:38:59 -0600148module_param_named(blocking, g_blocking, bool, 0444);
Jens Axboedb5bcf82017-03-30 13:44:26 -0600149MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
150
Jens Axboe82f402f2017-06-20 14:22:01 -0600151static bool shared_tags;
Joe Perches5657a812018-05-24 13:38:59 -0600152module_param(shared_tags, bool, 0444);
Jens Axboe82f402f2017-06-20 14:22:01 -0600153MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
154
Shaohua Li2984c862017-08-14 15:04:52 -0700155static int g_irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700156
157static int null_set_irqmode(const char *str, const struct kernel_param *kp)
158{
Shaohua Li2984c862017-08-14 15:04:52 -0700159 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
Matias Bjorling709c8662014-11-26 14:45:48 -0700160 NULL_IRQ_TIMER);
161}
162
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930163static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700164 .set = null_set_irqmode,
165 .get = param_get_int,
166};
167
Joe Perches5657a812018-05-24 13:38:59 -0600168device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100169MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
170
Shaohua Li2984c862017-08-14 15:04:52 -0700171static unsigned long g_completion_nsec = 10000;
Joe Perches5657a812018-05-24 13:38:59 -0600172module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100173MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
174
Shaohua Li2984c862017-08-14 15:04:52 -0700175static int g_hw_queue_depth = 64;
Joe Perches5657a812018-05-24 13:38:59 -0600176module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100177MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
178
Shaohua Li2984c862017-08-14 15:04:52 -0700179static bool g_use_per_node_hctx;
Joe Perches5657a812018-05-24 13:38:59 -0600180module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
Matias Bjørling20005242013-12-21 00:11:00 +0100181MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100182
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200183static bool g_zoned;
184module_param_named(zoned, g_zoned, bool, S_IRUGO);
185MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
186
187static unsigned long g_zone_size = 256;
188module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
189MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
190
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700191static struct nullb_device *null_alloc_dev(void);
192static void null_free_dev(struct nullb_device *dev);
Shaohua Licedcafa2017-08-14 15:04:54 -0700193static void null_del_dev(struct nullb *nullb);
194static int null_add_dev(struct nullb_device *dev);
Shaohua Lideb78b42017-08-14 15:04:59 -0700195static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700196
197static inline struct nullb_device *to_nullb_device(struct config_item *item)
198{
199 return item ? container_of(item, struct nullb_device, item) : NULL;
200}
201
202static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
203{
204 return snprintf(page, PAGE_SIZE, "%u\n", val);
205}
206
207static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
208 char *page)
209{
210 return snprintf(page, PAGE_SIZE, "%lu\n", val);
211}
212
213static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
214{
215 return snprintf(page, PAGE_SIZE, "%u\n", val);
216}
217
218static ssize_t nullb_device_uint_attr_store(unsigned int *val,
219 const char *page, size_t count)
220{
221 unsigned int tmp;
222 int result;
223
224 result = kstrtouint(page, 0, &tmp);
225 if (result)
226 return result;
227
228 *val = tmp;
229 return count;
230}
231
232static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
233 const char *page, size_t count)
234{
235 int result;
236 unsigned long tmp;
237
238 result = kstrtoul(page, 0, &tmp);
239 if (result)
240 return result;
241
242 *val = tmp;
243 return count;
244}
245
246static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
247 size_t count)
248{
249 bool tmp;
250 int result;
251
252 result = kstrtobool(page, &tmp);
253 if (result)
254 return result;
255
256 *val = tmp;
257 return count;
258}
259
260/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
261#define NULLB_DEVICE_ATTR(NAME, TYPE) \
262static ssize_t \
263nullb_device_##NAME##_show(struct config_item *item, char *page) \
264{ \
265 return nullb_device_##TYPE##_attr_show( \
266 to_nullb_device(item)->NAME, page); \
267} \
268static ssize_t \
269nullb_device_##NAME##_store(struct config_item *item, const char *page, \
270 size_t count) \
271{ \
272 if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
273 return -EBUSY; \
274 return nullb_device_##TYPE##_attr_store( \
275 &to_nullb_device(item)->NAME, page, count); \
276} \
277CONFIGFS_ATTR(nullb_device_, NAME);
278
279NULLB_DEVICE_ATTR(size, ulong);
280NULLB_DEVICE_ATTR(completion_nsec, ulong);
281NULLB_DEVICE_ATTR(submit_queues, uint);
282NULLB_DEVICE_ATTR(home_node, uint);
283NULLB_DEVICE_ATTR(queue_mode, uint);
284NULLB_DEVICE_ATTR(blocksize, uint);
285NULLB_DEVICE_ATTR(irqmode, uint);
286NULLB_DEVICE_ATTR(hw_queue_depth, uint);
Shaohua Licedcafa2017-08-14 15:04:54 -0700287NULLB_DEVICE_ATTR(index, uint);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700288NULLB_DEVICE_ATTR(blocking, bool);
289NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700290NULLB_DEVICE_ATTR(memory_backed, bool);
Shaohua Li306eb6b2017-08-14 15:04:57 -0700291NULLB_DEVICE_ATTR(discard, bool);
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700292NULLB_DEVICE_ATTR(mbps, uint);
Shaohua Lideb78b42017-08-14 15:04:59 -0700293NULLB_DEVICE_ATTR(cache_size, ulong);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200294NULLB_DEVICE_ATTR(zoned, bool);
295NULLB_DEVICE_ATTR(zone_size, ulong);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700296
Shaohua Licedcafa2017-08-14 15:04:54 -0700297static ssize_t nullb_device_power_show(struct config_item *item, char *page)
298{
299 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
300}
301
302static ssize_t nullb_device_power_store(struct config_item *item,
303 const char *page, size_t count)
304{
305 struct nullb_device *dev = to_nullb_device(item);
306 bool newp = false;
307 ssize_t ret;
308
309 ret = nullb_device_bool_attr_store(&newp, page, count);
310 if (ret < 0)
311 return ret;
312
313 if (!dev->power && newp) {
314 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
315 return count;
316 if (null_add_dev(dev)) {
317 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
318 return -ENOMEM;
319 }
320
321 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
322 dev->power = newp;
Jens Axboeb3c30512017-08-28 15:06:31 -0600323 } else if (dev->power && !newp) {
Shaohua Licedcafa2017-08-14 15:04:54 -0700324 mutex_lock(&lock);
325 dev->power = newp;
326 null_del_dev(dev->nullb);
327 mutex_unlock(&lock);
328 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
Liu Bo00a8cdb2018-07-06 03:07:13 +0800329 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
Shaohua Licedcafa2017-08-14 15:04:54 -0700330 }
331
332 return count;
333}
334
335CONFIGFS_ATTR(nullb_device_, power);
336
Shaohua Li2f54a612017-08-14 15:05:00 -0700337static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
338{
339 struct nullb_device *t_dev = to_nullb_device(item);
340
341 return badblocks_show(&t_dev->badblocks, page, 0);
342}
343
344static ssize_t nullb_device_badblocks_store(struct config_item *item,
345 const char *page, size_t count)
346{
347 struct nullb_device *t_dev = to_nullb_device(item);
348 char *orig, *buf, *tmp;
349 u64 start, end;
350 int ret;
351
352 orig = kstrndup(page, count, GFP_KERNEL);
353 if (!orig)
354 return -ENOMEM;
355
356 buf = strstrip(orig);
357
358 ret = -EINVAL;
359 if (buf[0] != '+' && buf[0] != '-')
360 goto out;
361 tmp = strchr(&buf[1], '-');
362 if (!tmp)
363 goto out;
364 *tmp = '\0';
365 ret = kstrtoull(buf + 1, 0, &start);
366 if (ret)
367 goto out;
368 ret = kstrtoull(tmp + 1, 0, &end);
369 if (ret)
370 goto out;
371 ret = -EINVAL;
372 if (start > end)
373 goto out;
374 /* enable badblocks */
375 cmpxchg(&t_dev->badblocks.shift, -1, 0);
376 if (buf[0] == '+')
377 ret = badblocks_set(&t_dev->badblocks, start,
378 end - start + 1, 1);
379 else
380 ret = badblocks_clear(&t_dev->badblocks, start,
381 end - start + 1);
382 if (ret == 0)
383 ret = count;
384out:
385 kfree(orig);
386 return ret;
387}
388CONFIGFS_ATTR(nullb_device_, badblocks);
389
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700390static struct configfs_attribute *nullb_device_attrs[] = {
391 &nullb_device_attr_size,
392 &nullb_device_attr_completion_nsec,
393 &nullb_device_attr_submit_queues,
394 &nullb_device_attr_home_node,
395 &nullb_device_attr_queue_mode,
396 &nullb_device_attr_blocksize,
397 &nullb_device_attr_irqmode,
398 &nullb_device_attr_hw_queue_depth,
Shaohua Licedcafa2017-08-14 15:04:54 -0700399 &nullb_device_attr_index,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700400 &nullb_device_attr_blocking,
401 &nullb_device_attr_use_per_node_hctx,
Shaohua Licedcafa2017-08-14 15:04:54 -0700402 &nullb_device_attr_power,
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700403 &nullb_device_attr_memory_backed,
Shaohua Li306eb6b2017-08-14 15:04:57 -0700404 &nullb_device_attr_discard,
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700405 &nullb_device_attr_mbps,
Shaohua Lideb78b42017-08-14 15:04:59 -0700406 &nullb_device_attr_cache_size,
Shaohua Li2f54a612017-08-14 15:05:00 -0700407 &nullb_device_attr_badblocks,
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200408 &nullb_device_attr_zoned,
409 &nullb_device_attr_zone_size,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700410 NULL,
411};
412
413static void nullb_device_release(struct config_item *item)
414{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700415 struct nullb_device *dev = to_nullb_device(item);
416
Shaohua Lideb78b42017-08-14 15:04:59 -0700417 null_free_device_storage(dev, false);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700418 null_free_dev(dev);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700419}
420
421static struct configfs_item_operations nullb_device_ops = {
422 .release = nullb_device_release,
423};
424
Bhumika Goyale1919df2017-10-16 17:18:49 +0200425static const struct config_item_type nullb_device_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700426 .ct_item_ops = &nullb_device_ops,
427 .ct_attrs = nullb_device_attrs,
428 .ct_owner = THIS_MODULE,
429};
430
431static struct
432config_item *nullb_group_make_item(struct config_group *group, const char *name)
433{
434 struct nullb_device *dev;
435
436 dev = null_alloc_dev();
437 if (!dev)
438 return ERR_PTR(-ENOMEM);
439
440 config_item_init_type_name(&dev->item, name, &nullb_device_type);
441
442 return &dev->item;
443}
444
445static void
446nullb_group_drop_item(struct config_group *group, struct config_item *item)
447{
Shaohua Licedcafa2017-08-14 15:04:54 -0700448 struct nullb_device *dev = to_nullb_device(item);
449
450 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
451 mutex_lock(&lock);
452 dev->power = false;
453 null_del_dev(dev->nullb);
454 mutex_unlock(&lock);
455 }
456
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700457 config_item_put(item);
458}
459
460static ssize_t memb_group_features_show(struct config_item *item, char *page)
461{
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200462 return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700463}
464
465CONFIGFS_ATTR_RO(memb_group_, features);
466
467static struct configfs_attribute *nullb_group_attrs[] = {
468 &memb_group_attr_features,
469 NULL,
470};
471
472static struct configfs_group_operations nullb_group_ops = {
473 .make_item = nullb_group_make_item,
474 .drop_item = nullb_group_drop_item,
475};
476
Bhumika Goyale1919df2017-10-16 17:18:49 +0200477static const struct config_item_type nullb_group_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700478 .ct_group_ops = &nullb_group_ops,
479 .ct_attrs = nullb_group_attrs,
480 .ct_owner = THIS_MODULE,
481};
482
483static struct configfs_subsystem nullb_subsys = {
484 .su_group = {
485 .cg_item = {
486 .ci_namebuf = "nullb",
487 .ci_type = &nullb_group_type,
488 },
489 },
490};
491
Shaohua Lideb78b42017-08-14 15:04:59 -0700492static inline int null_cache_active(struct nullb *nullb)
493{
494 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
495}
496
Shaohua Li2984c862017-08-14 15:04:52 -0700497static struct nullb_device *null_alloc_dev(void)
498{
499 struct nullb_device *dev;
500
501 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
502 if (!dev)
503 return NULL;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700504 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
Shaohua Lideb78b42017-08-14 15:04:59 -0700505 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
Shaohua Li2f54a612017-08-14 15:05:00 -0700506 if (badblocks_init(&dev->badblocks, 0)) {
507 kfree(dev);
508 return NULL;
509 }
510
Shaohua Li2984c862017-08-14 15:04:52 -0700511 dev->size = g_gb * 1024;
512 dev->completion_nsec = g_completion_nsec;
513 dev->submit_queues = g_submit_queues;
514 dev->home_node = g_home_node;
515 dev->queue_mode = g_queue_mode;
516 dev->blocksize = g_bs;
517 dev->irqmode = g_irqmode;
518 dev->hw_queue_depth = g_hw_queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -0700519 dev->blocking = g_blocking;
520 dev->use_per_node_hctx = g_use_per_node_hctx;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200521 dev->zoned = g_zoned;
522 dev->zone_size = g_zone_size;
Shaohua Li2984c862017-08-14 15:04:52 -0700523 return dev;
524}
525
526static void null_free_dev(struct nullb_device *dev)
527{
David Disseldorp1addb792017-11-08 17:29:44 +0100528 if (!dev)
529 return;
530
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200531 null_zone_exit(dev);
David Disseldorp1addb792017-11-08 17:29:44 +0100532 badblocks_exit(&dev->badblocks);
Shaohua Li2984c862017-08-14 15:04:52 -0700533 kfree(dev);
534}
535
Jens Axboef2298c02013-10-25 11:52:25 +0100536static void put_tag(struct nullb_queue *nq, unsigned int tag)
537{
538 clear_bit_unlock(tag, nq->tag_map);
539
540 if (waitqueue_active(&nq->wait))
541 wake_up(&nq->wait);
542}
543
544static unsigned int get_tag(struct nullb_queue *nq)
545{
546 unsigned int tag;
547
548 do {
549 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
550 if (tag >= nq->queue_depth)
551 return -1U;
552 } while (test_and_set_bit_lock(tag, nq->tag_map));
553
554 return tag;
555}
556
557static void free_cmd(struct nullb_cmd *cmd)
558{
559 put_tag(cmd->nq, cmd->tag);
560}
561
Paolo Valente3c395a92015-12-01 11:48:17 +0100562static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
563
Jens Axboef2298c02013-10-25 11:52:25 +0100564static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
565{
566 struct nullb_cmd *cmd;
567 unsigned int tag;
568
569 tag = get_tag(nq);
570 if (tag != -1U) {
571 cmd = &nq->cmds[tag];
572 cmd->tag = tag;
573 cmd->nq = nq;
Shaohua Li2984c862017-08-14 15:04:52 -0700574 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +0100575 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
576 HRTIMER_MODE_REL);
577 cmd->timer.function = null_cmd_timer_expired;
578 }
Jens Axboef2298c02013-10-25 11:52:25 +0100579 return cmd;
580 }
581
582 return NULL;
583}
584
585static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
586{
587 struct nullb_cmd *cmd;
588 DEFINE_WAIT(wait);
589
590 cmd = __alloc_cmd(nq);
591 if (cmd || !can_wait)
592 return cmd;
593
594 do {
595 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
596 cmd = __alloc_cmd(nq);
597 if (cmd)
598 break;
599
600 io_schedule();
601 } while (1);
602
603 finish_wait(&nq->wait, &wait);
604 return cmd;
605}
606
607static void end_cmd(struct nullb_cmd *cmd)
608{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100609 struct request_queue *q = NULL;
Shaohua Li2984c862017-08-14 15:04:52 -0700610 int queue_mode = cmd->nq->dev->queue_mode;
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100611
Mike Krinkine8271202015-12-15 12:56:40 +0300612 if (cmd->rq)
613 q = cmd->rq->q;
614
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800615 switch (queue_mode) {
616 case NULL_Q_MQ:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700617 blk_mq_end_request(cmd->rq, cmd->error);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800618 return;
619 case NULL_Q_RQ:
620 INIT_LIST_HEAD(&cmd->rq->queuelist);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700621 blk_end_request_all(cmd->rq, cmd->error);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800622 break;
623 case NULL_Q_BIO:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700624 cmd->bio->bi_status = cmd->error;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200625 bio_endio(cmd->bio);
Jens Axboe48cc6612015-12-28 13:02:47 -0700626 break;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800627 }
Jens Axboef2298c02013-10-25 11:52:25 +0100628
Jens Axboe48cc6612015-12-28 13:02:47 -0700629 free_cmd(cmd);
630
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100631 /* Restart queue if needed, as we are freeing a tag */
Jens Axboe48cc6612015-12-28 13:02:47 -0700632 if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100633 unsigned long flags;
634
635 spin_lock_irqsave(q->queue_lock, flags);
Jens Axboe48cc6612015-12-28 13:02:47 -0700636 blk_start_queue_async(q);
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100637 spin_unlock_irqrestore(q->queue_lock, flags);
638 }
Jens Axboef2298c02013-10-25 11:52:25 +0100639}
640
641static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
642{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100643 end_cmd(container_of(timer, struct nullb_cmd, timer));
Jens Axboef2298c02013-10-25 11:52:25 +0100644
645 return HRTIMER_NORESTART;
646}
647
648static void null_cmd_end_timer(struct nullb_cmd *cmd)
649{
Shaohua Li2984c862017-08-14 15:04:52 -0700650 ktime_t kt = cmd->nq->dev->completion_nsec;
Jens Axboef2298c02013-10-25 11:52:25 +0100651
Paolo Valente3c395a92015-12-01 11:48:17 +0100652 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +0100653}
654
655static void null_softirq_done_fn(struct request *rq)
656{
Shaohua Li2984c862017-08-14 15:04:52 -0700657 struct nullb *nullb = rq->q->queuedata;
658
659 if (nullb->dev->queue_mode == NULL_Q_MQ)
Jens Axboed891fa72014-06-16 11:40:25 -0600660 end_cmd(blk_mq_rq_to_pdu(rq));
661 else
662 end_cmd(rq->special);
Jens Axboef2298c02013-10-25 11:52:25 +0100663}
664
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700665static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
Jens Axboef2298c02013-10-25 11:52:25 +0100666{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700667 struct nullb_page *t_page;
668
669 t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
670 if (!t_page)
671 goto out;
672
673 t_page->page = alloc_pages(gfp_flags, 0);
674 if (!t_page->page)
675 goto out_freepage;
676
Ming Lei66231ad2018-03-06 12:07:13 +0800677 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700678 return t_page;
679out_freepage:
680 kfree(t_page);
681out:
682 return NULL;
683}
684
685static void null_free_page(struct nullb_page *t_page)
686{
Ming Lei66231ad2018-03-06 12:07:13 +0800687 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
688 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
Shaohua Lideb78b42017-08-14 15:04:59 -0700689 return;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700690 __free_page(t_page->page);
691 kfree(t_page);
692}
693
Ming Lei66231ad2018-03-06 12:07:13 +0800694static bool null_page_empty(struct nullb_page *page)
695{
696 int size = MAP_SZ - 2;
697
698 return find_first_bit(page->bitmap, size) == size;
699}
700
Shaohua Lideb78b42017-08-14 15:04:59 -0700701static void null_free_sector(struct nullb *nullb, sector_t sector,
702 bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700703{
704 unsigned int sector_bit;
705 u64 idx;
706 struct nullb_page *t_page, *ret;
707 struct radix_tree_root *root;
708
Shaohua Lideb78b42017-08-14 15:04:59 -0700709 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700710 idx = sector >> PAGE_SECTORS_SHIFT;
711 sector_bit = (sector & SECTOR_MASK);
712
713 t_page = radix_tree_lookup(root, idx);
714 if (t_page) {
Ming Lei66231ad2018-03-06 12:07:13 +0800715 __clear_bit(sector_bit, t_page->bitmap);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700716
Ming Lei66231ad2018-03-06 12:07:13 +0800717 if (null_page_empty(t_page)) {
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700718 ret = radix_tree_delete_item(root, idx, t_page);
719 WARN_ON(ret != t_page);
720 null_free_page(ret);
Shaohua Lideb78b42017-08-14 15:04:59 -0700721 if (is_cache)
722 nullb->dev->curr_cache -= PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700723 }
724 }
725}
726
727static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
Shaohua Lideb78b42017-08-14 15:04:59 -0700728 struct nullb_page *t_page, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700729{
730 struct radix_tree_root *root;
731
Shaohua Lideb78b42017-08-14 15:04:59 -0700732 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700733
734 if (radix_tree_insert(root, idx, t_page)) {
735 null_free_page(t_page);
736 t_page = radix_tree_lookup(root, idx);
737 WARN_ON(!t_page || t_page->page->index != idx);
Shaohua Lideb78b42017-08-14 15:04:59 -0700738 } else if (is_cache)
739 nullb->dev->curr_cache += PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700740
741 return t_page;
742}
743
Shaohua Lideb78b42017-08-14 15:04:59 -0700744static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700745{
746 unsigned long pos = 0;
747 int nr_pages;
748 struct nullb_page *ret, *t_pages[FREE_BATCH];
749 struct radix_tree_root *root;
750
Shaohua Lideb78b42017-08-14 15:04:59 -0700751 root = is_cache ? &dev->cache : &dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700752
753 do {
754 int i;
755
756 nr_pages = radix_tree_gang_lookup(root,
757 (void **)t_pages, pos, FREE_BATCH);
758
759 for (i = 0; i < nr_pages; i++) {
760 pos = t_pages[i]->page->index;
761 ret = radix_tree_delete_item(root, pos, t_pages[i]);
762 WARN_ON(ret != t_pages[i]);
763 null_free_page(ret);
764 }
765
766 pos++;
767 } while (nr_pages == FREE_BATCH);
Shaohua Lideb78b42017-08-14 15:04:59 -0700768
769 if (is_cache)
770 dev->curr_cache = 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700771}
772
Shaohua Lideb78b42017-08-14 15:04:59 -0700773static struct nullb_page *__null_lookup_page(struct nullb *nullb,
774 sector_t sector, bool for_write, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700775{
776 unsigned int sector_bit;
777 u64 idx;
778 struct nullb_page *t_page;
Shaohua Lideb78b42017-08-14 15:04:59 -0700779 struct radix_tree_root *root;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700780
781 idx = sector >> PAGE_SECTORS_SHIFT;
782 sector_bit = (sector & SECTOR_MASK);
783
Shaohua Lideb78b42017-08-14 15:04:59 -0700784 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
785 t_page = radix_tree_lookup(root, idx);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700786 WARN_ON(t_page && t_page->page->index != idx);
787
Ming Lei66231ad2018-03-06 12:07:13 +0800788 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700789 return t_page;
790
791 return NULL;
792}
793
Shaohua Lideb78b42017-08-14 15:04:59 -0700794static struct nullb_page *null_lookup_page(struct nullb *nullb,
795 sector_t sector, bool for_write, bool ignore_cache)
796{
797 struct nullb_page *page = NULL;
798
799 if (!ignore_cache)
800 page = __null_lookup_page(nullb, sector, for_write, true);
801 if (page)
802 return page;
803 return __null_lookup_page(nullb, sector, for_write, false);
804}
805
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700806static struct nullb_page *null_insert_page(struct nullb *nullb,
Jens Axboe61884de2018-08-09 14:22:41 -0600807 sector_t sector, bool ignore_cache)
808 __releases(&nullb->lock)
809 __acquires(&nullb->lock)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700810{
811 u64 idx;
812 struct nullb_page *t_page;
813
Shaohua Lideb78b42017-08-14 15:04:59 -0700814 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700815 if (t_page)
816 return t_page;
817
818 spin_unlock_irq(&nullb->lock);
819
820 t_page = null_alloc_page(GFP_NOIO);
821 if (!t_page)
822 goto out_lock;
823
824 if (radix_tree_preload(GFP_NOIO))
825 goto out_freepage;
826
827 spin_lock_irq(&nullb->lock);
828 idx = sector >> PAGE_SECTORS_SHIFT;
829 t_page->page->index = idx;
Shaohua Lideb78b42017-08-14 15:04:59 -0700830 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700831 radix_tree_preload_end();
832
833 return t_page;
834out_freepage:
835 null_free_page(t_page);
836out_lock:
837 spin_lock_irq(&nullb->lock);
Shaohua Lideb78b42017-08-14 15:04:59 -0700838 return null_lookup_page(nullb, sector, true, ignore_cache);
839}
840
841static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
842{
843 int i;
844 unsigned int offset;
845 u64 idx;
846 struct nullb_page *t_page, *ret;
847 void *dst, *src;
848
849 idx = c_page->page->index;
850
851 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
852
Ming Lei66231ad2018-03-06 12:07:13 +0800853 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
854 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
Shaohua Lideb78b42017-08-14 15:04:59 -0700855 null_free_page(c_page);
Ming Lei66231ad2018-03-06 12:07:13 +0800856 if (t_page && null_page_empty(t_page)) {
Shaohua Lideb78b42017-08-14 15:04:59 -0700857 ret = radix_tree_delete_item(&nullb->dev->data,
858 idx, t_page);
859 null_free_page(t_page);
860 }
861 return 0;
862 }
863
864 if (!t_page)
865 return -ENOMEM;
866
867 src = kmap_atomic(c_page->page);
868 dst = kmap_atomic(t_page->page);
869
870 for (i = 0; i < PAGE_SECTORS;
871 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
Ming Lei66231ad2018-03-06 12:07:13 +0800872 if (test_bit(i, c_page->bitmap)) {
Shaohua Lideb78b42017-08-14 15:04:59 -0700873 offset = (i << SECTOR_SHIFT);
874 memcpy(dst + offset, src + offset,
875 nullb->dev->blocksize);
Ming Lei66231ad2018-03-06 12:07:13 +0800876 __set_bit(i, t_page->bitmap);
Shaohua Lideb78b42017-08-14 15:04:59 -0700877 }
878 }
879
880 kunmap_atomic(dst);
881 kunmap_atomic(src);
882
883 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
884 null_free_page(ret);
885 nullb->dev->curr_cache -= PAGE_SIZE;
886
887 return 0;
888}
889
890static int null_make_cache_space(struct nullb *nullb, unsigned long n)
891{
892 int i, err, nr_pages;
893 struct nullb_page *c_pages[FREE_BATCH];
894 unsigned long flushed = 0, one_round;
895
896again:
897 if ((nullb->dev->cache_size * 1024 * 1024) >
898 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
899 return 0;
900
901 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
902 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
903 /*
904 * nullb_flush_cache_page could unlock before using the c_pages. To
905 * avoid race, we don't allow page free
906 */
907 for (i = 0; i < nr_pages; i++) {
908 nullb->cache_flush_pos = c_pages[i]->page->index;
909 /*
910 * We found the page which is being flushed to disk by other
911 * threads
912 */
Ming Lei66231ad2018-03-06 12:07:13 +0800913 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
Shaohua Lideb78b42017-08-14 15:04:59 -0700914 c_pages[i] = NULL;
915 else
Ming Lei66231ad2018-03-06 12:07:13 +0800916 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
Shaohua Lideb78b42017-08-14 15:04:59 -0700917 }
918
919 one_round = 0;
920 for (i = 0; i < nr_pages; i++) {
921 if (c_pages[i] == NULL)
922 continue;
923 err = null_flush_cache_page(nullb, c_pages[i]);
924 if (err)
925 return err;
926 one_round++;
927 }
928 flushed += one_round << PAGE_SHIFT;
929
930 if (n > flushed) {
931 if (nr_pages == 0)
932 nullb->cache_flush_pos = 0;
933 if (one_round == 0) {
934 /* give other threads a chance */
935 spin_unlock_irq(&nullb->lock);
936 spin_lock_irq(&nullb->lock);
937 }
938 goto again;
939 }
940 return 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700941}
942
943static int copy_to_nullb(struct nullb *nullb, struct page *source,
Shaohua Lideb78b42017-08-14 15:04:59 -0700944 unsigned int off, sector_t sector, size_t n, bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700945{
946 size_t temp, count = 0;
947 unsigned int offset;
948 struct nullb_page *t_page;
949 void *dst, *src;
950
951 while (count < n) {
952 temp = min_t(size_t, nullb->dev->blocksize, n - count);
953
Shaohua Lideb78b42017-08-14 15:04:59 -0700954 if (null_cache_active(nullb) && !is_fua)
955 null_make_cache_space(nullb, PAGE_SIZE);
956
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700957 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -0700958 t_page = null_insert_page(nullb, sector,
959 !null_cache_active(nullb) || is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700960 if (!t_page)
961 return -ENOSPC;
962
963 src = kmap_atomic(source);
964 dst = kmap_atomic(t_page->page);
965 memcpy(dst + offset, src + off + count, temp);
966 kunmap_atomic(dst);
967 kunmap_atomic(src);
968
Ming Lei66231ad2018-03-06 12:07:13 +0800969 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700970
Shaohua Lideb78b42017-08-14 15:04:59 -0700971 if (is_fua)
972 null_free_sector(nullb, sector, true);
973
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700974 count += temp;
975 sector += temp >> SECTOR_SHIFT;
976 }
977 return 0;
978}
979
980static int copy_from_nullb(struct nullb *nullb, struct page *dest,
981 unsigned int off, sector_t sector, size_t n)
982{
983 size_t temp, count = 0;
984 unsigned int offset;
985 struct nullb_page *t_page;
986 void *dst, *src;
987
988 while (count < n) {
989 temp = min_t(size_t, nullb->dev->blocksize, n - count);
990
991 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -0700992 t_page = null_lookup_page(nullb, sector, false,
993 !null_cache_active(nullb));
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700994
995 dst = kmap_atomic(dest);
996 if (!t_page) {
997 memset(dst + off + count, 0, temp);
998 goto next;
999 }
1000 src = kmap_atomic(t_page->page);
1001 memcpy(dst + off + count, src + offset, temp);
1002 kunmap_atomic(src);
1003next:
1004 kunmap_atomic(dst);
1005
1006 count += temp;
1007 sector += temp >> SECTOR_SHIFT;
1008 }
1009 return 0;
1010}
1011
Shaohua Li306eb6b2017-08-14 15:04:57 -07001012static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
1013{
1014 size_t temp;
1015
1016 spin_lock_irq(&nullb->lock);
1017 while (n > 0) {
1018 temp = min_t(size_t, n, nullb->dev->blocksize);
Shaohua Lideb78b42017-08-14 15:04:59 -07001019 null_free_sector(nullb, sector, false);
1020 if (null_cache_active(nullb))
1021 null_free_sector(nullb, sector, true);
Shaohua Li306eb6b2017-08-14 15:04:57 -07001022 sector += temp >> SECTOR_SHIFT;
1023 n -= temp;
1024 }
1025 spin_unlock_irq(&nullb->lock);
1026}
1027
Shaohua Lideb78b42017-08-14 15:04:59 -07001028static int null_handle_flush(struct nullb *nullb)
1029{
1030 int err;
1031
1032 if (!null_cache_active(nullb))
1033 return 0;
1034
1035 spin_lock_irq(&nullb->lock);
1036 while (true) {
1037 err = null_make_cache_space(nullb,
1038 nullb->dev->cache_size * 1024 * 1024);
1039 if (err || nullb->dev->curr_cache == 0)
1040 break;
1041 }
1042
1043 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1044 spin_unlock_irq(&nullb->lock);
1045 return err;
1046}
1047
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001048static int null_transfer(struct nullb *nullb, struct page *page,
Shaohua Lideb78b42017-08-14 15:04:59 -07001049 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1050 bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001051{
1052 int err = 0;
1053
1054 if (!is_write) {
1055 err = copy_from_nullb(nullb, page, off, sector, len);
1056 flush_dcache_page(page);
1057 } else {
1058 flush_dcache_page(page);
Shaohua Lideb78b42017-08-14 15:04:59 -07001059 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001060 }
1061
1062 return err;
1063}
1064
1065static int null_handle_rq(struct nullb_cmd *cmd)
1066{
1067 struct request *rq = cmd->rq;
1068 struct nullb *nullb = cmd->nq->dev->nullb;
1069 int err;
1070 unsigned int len;
1071 sector_t sector;
1072 struct req_iterator iter;
1073 struct bio_vec bvec;
1074
1075 sector = blk_rq_pos(rq);
1076
Shaohua Li306eb6b2017-08-14 15:04:57 -07001077 if (req_op(rq) == REQ_OP_DISCARD) {
1078 null_handle_discard(nullb, sector, blk_rq_bytes(rq));
1079 return 0;
1080 }
1081
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001082 spin_lock_irq(&nullb->lock);
1083 rq_for_each_segment(bvec, rq, iter) {
1084 len = bvec.bv_len;
1085 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001086 op_is_write(req_op(rq)), sector,
1087 req_op(rq) & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001088 if (err) {
1089 spin_unlock_irq(&nullb->lock);
1090 return err;
1091 }
1092 sector += len >> SECTOR_SHIFT;
1093 }
1094 spin_unlock_irq(&nullb->lock);
1095
1096 return 0;
1097}
1098
1099static int null_handle_bio(struct nullb_cmd *cmd)
1100{
1101 struct bio *bio = cmd->bio;
1102 struct nullb *nullb = cmd->nq->dev->nullb;
1103 int err;
1104 unsigned int len;
1105 sector_t sector;
1106 struct bio_vec bvec;
1107 struct bvec_iter iter;
1108
1109 sector = bio->bi_iter.bi_sector;
1110
Shaohua Li306eb6b2017-08-14 15:04:57 -07001111 if (bio_op(bio) == REQ_OP_DISCARD) {
1112 null_handle_discard(nullb, sector,
1113 bio_sectors(bio) << SECTOR_SHIFT);
1114 return 0;
1115 }
1116
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001117 spin_lock_irq(&nullb->lock);
1118 bio_for_each_segment(bvec, bio, iter) {
1119 len = bvec.bv_len;
1120 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001121 op_is_write(bio_op(bio)), sector,
1122 bio_op(bio) & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001123 if (err) {
1124 spin_unlock_irq(&nullb->lock);
1125 return err;
1126 }
1127 sector += len >> SECTOR_SHIFT;
1128 }
1129 spin_unlock_irq(&nullb->lock);
1130 return 0;
1131}
1132
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001133static void null_stop_queue(struct nullb *nullb)
1134{
1135 struct request_queue *q = nullb->q;
1136
1137 if (nullb->dev->queue_mode == NULL_Q_MQ)
1138 blk_mq_stop_hw_queues(q);
1139 else {
1140 spin_lock_irq(q->queue_lock);
1141 blk_stop_queue(q);
1142 spin_unlock_irq(q->queue_lock);
1143 }
1144}
1145
1146static void null_restart_queue_async(struct nullb *nullb)
1147{
1148 struct request_queue *q = nullb->q;
1149 unsigned long flags;
1150
1151 if (nullb->dev->queue_mode == NULL_Q_MQ)
1152 blk_mq_start_stopped_hw_queues(q, true);
1153 else {
1154 spin_lock_irqsave(q->queue_lock, flags);
1155 blk_start_queue_async(q);
1156 spin_unlock_irqrestore(q->queue_lock, flags);
1157 }
1158}
1159
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001160static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1161{
1162 struct nullb_device *dev = cmd->nq->dev;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001163 struct nullb *nullb = dev->nullb;
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001164 int err = 0;
1165
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001166 if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
1167 cmd->error = null_zone_report(nullb, cmd);
1168 goto out;
1169 }
1170
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001171 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1172 struct request *rq = cmd->rq;
1173
1174 if (!hrtimer_active(&nullb->bw_timer))
1175 hrtimer_restart(&nullb->bw_timer);
1176
1177 if (atomic_long_sub_return(blk_rq_bytes(rq),
1178 &nullb->cur_bytes) < 0) {
1179 null_stop_queue(nullb);
1180 /* race with timer */
1181 if (atomic_long_read(&nullb->cur_bytes) > 0)
1182 null_restart_queue_async(nullb);
1183 if (dev->queue_mode == NULL_Q_RQ) {
1184 struct request_queue *q = nullb->q;
1185
1186 spin_lock_irq(q->queue_lock);
1187 rq->rq_flags |= RQF_DONTPREP;
1188 blk_requeue_request(q, rq);
1189 spin_unlock_irq(q->queue_lock);
1190 return BLK_STS_OK;
1191 } else
1192 /* requeue request */
Ming Lei86ff7c22018-01-30 22:04:57 -05001193 return BLK_STS_DEV_RESOURCE;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001194 }
1195 }
1196
Shaohua Li2f54a612017-08-14 15:05:00 -07001197 if (nullb->dev->badblocks.shift != -1) {
1198 int bad_sectors;
1199 sector_t sector, size, first_bad;
1200 bool is_flush = true;
1201
1202 if (dev->queue_mode == NULL_Q_BIO &&
1203 bio_op(cmd->bio) != REQ_OP_FLUSH) {
1204 is_flush = false;
1205 sector = cmd->bio->bi_iter.bi_sector;
1206 size = bio_sectors(cmd->bio);
1207 }
1208 if (dev->queue_mode != NULL_Q_BIO &&
1209 req_op(cmd->rq) != REQ_OP_FLUSH) {
1210 is_flush = false;
1211 sector = blk_rq_pos(cmd->rq);
1212 size = blk_rq_sectors(cmd->rq);
1213 }
1214 if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
1215 size, &first_bad, &bad_sectors)) {
1216 cmd->error = BLK_STS_IOERR;
1217 goto out;
1218 }
1219 }
1220
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001221 if (dev->memory_backed) {
Shaohua Lideb78b42017-08-14 15:04:59 -07001222 if (dev->queue_mode == NULL_Q_BIO) {
1223 if (bio_op(cmd->bio) == REQ_OP_FLUSH)
1224 err = null_handle_flush(nullb);
1225 else
1226 err = null_handle_bio(cmd);
1227 } else {
1228 if (req_op(cmd->rq) == REQ_OP_FLUSH)
1229 err = null_handle_flush(nullb);
1230 else
1231 err = null_handle_rq(cmd);
1232 }
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001233 }
1234 cmd->error = errno_to_blk_status(err);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001235
1236 if (!cmd->error && dev->zoned) {
1237 if (req_op(cmd->rq) == REQ_OP_WRITE)
1238 null_zone_write(cmd);
1239 else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET)
1240 null_zone_reset(cmd);
1241 }
Shaohua Li2f54a612017-08-14 15:05:00 -07001242out:
Jens Axboef2298c02013-10-25 11:52:25 +01001243 /* Complete IO by inline, softirq or timer */
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001244 switch (dev->irqmode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001245 case NULL_IRQ_SOFTIRQ:
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001246 switch (dev->queue_mode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001247 case NULL_Q_MQ:
Christoph Hellwig08e00292017-04-20 16:03:09 +02001248 blk_mq_complete_request(cmd->rq);
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001249 break;
1250 case NULL_Q_RQ:
1251 blk_complete_request(cmd->rq);
1252 break;
1253 case NULL_Q_BIO:
1254 /*
1255 * XXX: no proper submitting cpu information available.
1256 */
1257 end_cmd(cmd);
1258 break;
1259 }
1260 break;
Jens Axboef2298c02013-10-25 11:52:25 +01001261 case NULL_IRQ_NONE:
1262 end_cmd(cmd);
1263 break;
Jens Axboef2298c02013-10-25 11:52:25 +01001264 case NULL_IRQ_TIMER:
1265 null_cmd_end_timer(cmd);
1266 break;
1267 }
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001268 return BLK_STS_OK;
Jens Axboef2298c02013-10-25 11:52:25 +01001269}
1270
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001271static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1272{
1273 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1274 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1275 unsigned int mbps = nullb->dev->mbps;
1276
1277 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1278 return HRTIMER_NORESTART;
1279
1280 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1281 null_restart_queue_async(nullb);
1282
1283 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1284
1285 return HRTIMER_RESTART;
1286}
1287
1288static void nullb_setup_bwtimer(struct nullb *nullb)
1289{
1290 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1291
1292 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1293 nullb->bw_timer.function = nullb_bwtimer_fn;
1294 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1295 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +01001296}
1297
1298static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1299{
1300 int index = 0;
1301
1302 if (nullb->nr_queues != 1)
1303 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1304
1305 return &nullb->queues[index];
1306}
1307
Jens Axboedece1632015-11-05 10:41:16 -07001308static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +01001309{
1310 struct nullb *nullb = q->queuedata;
1311 struct nullb_queue *nq = nullb_to_queue(nullb);
1312 struct nullb_cmd *cmd;
1313
1314 cmd = alloc_cmd(nq, 1);
1315 cmd->bio = bio;
1316
1317 null_handle_cmd(cmd);
Jens Axboedece1632015-11-05 10:41:16 -07001318 return BLK_QC_T_NONE;
Jens Axboef2298c02013-10-25 11:52:25 +01001319}
1320
Jens Axboe5448aca2018-01-09 12:47:24 -07001321static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
1322{
1323 pr_info("null: rq %p timed out\n", rq);
Christoph Hellwig0cc61e62018-06-19 18:40:14 +02001324 __blk_complete_request(rq);
Christoph Hellwig0df0bb02018-05-29 15:52:33 +02001325 return BLK_EH_DONE;
Jens Axboe5448aca2018-01-09 12:47:24 -07001326}
1327
Jens Axboef2298c02013-10-25 11:52:25 +01001328static int null_rq_prep_fn(struct request_queue *q, struct request *req)
1329{
1330 struct nullb *nullb = q->queuedata;
1331 struct nullb_queue *nq = nullb_to_queue(nullb);
1332 struct nullb_cmd *cmd;
1333
1334 cmd = alloc_cmd(nq, 0);
1335 if (cmd) {
1336 cmd->rq = req;
1337 req->special = cmd;
1338 return BLKPREP_OK;
1339 }
Akinobu Mita8b70f452015-06-02 08:35:10 +09001340 blk_stop_queue(q);
Jens Axboef2298c02013-10-25 11:52:25 +01001341
1342 return BLKPREP_DEFER;
1343}
1344
Jens Axboe93b57042018-01-10 09:06:23 -07001345static bool should_timeout_request(struct request *rq)
1346{
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001347#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -07001348 if (g_timeout_str[0])
1349 return should_fail(&null_timeout_attr, 1);
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001350#endif
Jens Axboe24941b92018-02-28 09:18:57 -07001351 return false;
1352}
Jens Axboe93b57042018-01-10 09:06:23 -07001353
Jens Axboe24941b92018-02-28 09:18:57 -07001354static bool should_requeue_request(struct request *rq)
1355{
1356#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1357 if (g_requeue_str[0])
1358 return should_fail(&null_requeue_attr, 1);
1359#endif
Jens Axboe93b57042018-01-10 09:06:23 -07001360 return false;
1361}
1362
Jens Axboef2298c02013-10-25 11:52:25 +01001363static void null_request_fn(struct request_queue *q)
1364{
1365 struct request *rq;
1366
1367 while ((rq = blk_fetch_request(q)) != NULL) {
1368 struct nullb_cmd *cmd = rq->special;
1369
Jens Axboe24941b92018-02-28 09:18:57 -07001370 /* just ignore the request */
1371 if (should_timeout_request(rq))
1372 continue;
1373 if (should_requeue_request(rq)) {
1374 blk_requeue_request(q, rq);
1375 continue;
Jens Axboe93b57042018-01-10 09:06:23 -07001376 }
Jens Axboe24941b92018-02-28 09:18:57 -07001377
1378 spin_unlock_irq(q->queue_lock);
1379 null_handle_cmd(cmd);
1380 spin_lock_irq(q->queue_lock);
Jens Axboef2298c02013-10-25 11:52:25 +01001381 }
1382}
1383
Jens Axboe5448aca2018-01-09 12:47:24 -07001384static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
1385{
1386 pr_info("null: rq %p timed out\n", rq);
Christoph Hellwig0df0bb02018-05-29 15:52:33 +02001387 blk_mq_complete_request(rq);
1388 return BLK_EH_DONE;
Jens Axboe5448aca2018-01-09 12:47:24 -07001389}
1390
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001391static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
Jens Axboe74c45052014-10-29 11:14:52 -06001392 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +01001393{
Jens Axboe74c45052014-10-29 11:14:52 -06001394 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Shaohua Li2984c862017-08-14 15:04:52 -07001395 struct nullb_queue *nq = hctx->driver_data;
Jens Axboef2298c02013-10-25 11:52:25 +01001396
Jens Axboedb5bcf82017-03-30 13:44:26 -06001397 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1398
Shaohua Li2984c862017-08-14 15:04:52 -07001399 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +01001400 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1401 cmd->timer.function = null_cmd_timer_expired;
1402 }
Jens Axboe74c45052014-10-29 11:14:52 -06001403 cmd->rq = bd->rq;
Shaohua Li2984c862017-08-14 15:04:52 -07001404 cmd->nq = nq;
Jens Axboef2298c02013-10-25 11:52:25 +01001405
Jens Axboe74c45052014-10-29 11:14:52 -06001406 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -07001407
Jens Axboe24941b92018-02-28 09:18:57 -07001408 if (should_requeue_request(bd->rq)) {
1409 /*
1410 * Alternate between hitting the core BUSY path, and the
1411 * driver driven requeue path
1412 */
1413 nq->requeue_selection++;
1414 if (nq->requeue_selection & 1)
1415 return BLK_STS_RESOURCE;
1416 else {
1417 blk_mq_requeue_request(bd->rq, true);
1418 return BLK_STS_OK;
1419 }
1420 }
1421 if (should_timeout_request(bd->rq))
1422 return BLK_STS_OK;
Jens Axboe93b57042018-01-10 09:06:23 -07001423
Jens Axboe24941b92018-02-28 09:18:57 -07001424 return null_handle_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +01001425}
1426
Eric Biggersf363b082017-03-30 13:39:16 -07001427static const struct blk_mq_ops null_mq_ops = {
Jens Axboef2298c02013-10-25 11:52:25 +01001428 .queue_rq = null_queue_rq,
Christoph Hellwigce2c3502014-02-10 03:24:40 -08001429 .complete = null_softirq_done_fn,
Jens Axboe5448aca2018-01-09 12:47:24 -07001430 .timeout = null_timeout_rq,
Jens Axboef2298c02013-10-25 11:52:25 +01001431};
1432
Matias Bjørlingde65d2d2015-08-31 14:17:18 +02001433static void cleanup_queue(struct nullb_queue *nq)
1434{
1435 kfree(nq->tag_map);
1436 kfree(nq->cmds);
1437}
1438
1439static void cleanup_queues(struct nullb *nullb)
1440{
1441 int i;
1442
1443 for (i = 0; i < nullb->nr_queues; i++)
1444 cleanup_queue(&nullb->queues[i]);
1445
1446 kfree(nullb->queues);
1447}
1448
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001449static void null_del_dev(struct nullb *nullb)
1450{
Shaohua Li2984c862017-08-14 15:04:52 -07001451 struct nullb_device *dev = nullb->dev;
1452
Shaohua Li94bc02e2017-08-14 15:04:55 -07001453 ida_simple_remove(&nullb_indexes, nullb->index);
1454
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001455 list_del_init(&nullb->list);
1456
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001457 del_gendisk(nullb->disk);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001458
1459 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1460 hrtimer_cancel(&nullb->bw_timer);
1461 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1462 null_restart_queue_async(nullb);
1463 }
1464
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001465 blk_cleanup_queue(nullb->q);
Shaohua Li2984c862017-08-14 15:04:52 -07001466 if (dev->queue_mode == NULL_Q_MQ &&
1467 nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001468 blk_mq_free_tag_set(nullb->tag_set);
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001469 put_disk(nullb->disk);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001470 cleanup_queues(nullb);
Shaohua Lideb78b42017-08-14 15:04:59 -07001471 if (null_cache_active(nullb))
1472 null_free_device_storage(nullb->dev, true);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001473 kfree(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001474 dev->nullb = NULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001475}
1476
Shaohua Li306eb6b2017-08-14 15:04:57 -07001477static void null_config_discard(struct nullb *nullb)
1478{
1479 if (nullb->dev->discard == false)
1480 return;
1481 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1482 nullb->q->limits.discard_alignment = nullb->dev->blocksize;
1483 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
Bart Van Assche8b904b52018-03-07 17:10:10 -08001484 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001485}
1486
Jens Axboef2298c02013-10-25 11:52:25 +01001487static int null_open(struct block_device *bdev, fmode_t mode)
1488{
1489 return 0;
1490}
1491
1492static void null_release(struct gendisk *disk, fmode_t mode)
1493{
1494}
1495
1496static const struct block_device_operations null_fops = {
1497 .owner = THIS_MODULE,
1498 .open = null_open,
1499 .release = null_release,
1500};
1501
Jens Axboe82f402f2017-06-20 14:22:01 -06001502static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1503{
1504 BUG_ON(!nullb);
1505 BUG_ON(!nq);
1506
1507 init_waitqueue_head(&nq->wait);
1508 nq->queue_depth = nullb->queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -07001509 nq->dev = nullb->dev;
Jens Axboe82f402f2017-06-20 14:22:01 -06001510}
1511
1512static void null_init_queues(struct nullb *nullb)
1513{
1514 struct request_queue *q = nullb->q;
1515 struct blk_mq_hw_ctx *hctx;
1516 struct nullb_queue *nq;
1517 int i;
1518
1519 queue_for_each_hw_ctx(q, hctx, i) {
1520 if (!hctx->nr_ctx || !hctx->tags)
1521 continue;
1522 nq = &nullb->queues[i];
1523 hctx->driver_data = nq;
1524 null_init_queue(nullb, nq);
1525 nullb->nr_queues++;
1526 }
1527}
1528
Jens Axboef2298c02013-10-25 11:52:25 +01001529static int setup_commands(struct nullb_queue *nq)
1530{
1531 struct nullb_cmd *cmd;
1532 int i, tag_size;
1533
Kees Cook6396bb22018-06-12 14:03:40 -07001534 nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001535 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001536 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001537
1538 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
Kees Cook6396bb22018-06-12 14:03:40 -07001539 nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001540 if (!nq->tag_map) {
1541 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +01001542 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001543 }
1544
1545 for (i = 0; i < nq->queue_depth; i++) {
1546 cmd = &nq->cmds[i];
1547 INIT_LIST_HEAD(&cmd->list);
1548 cmd->ll_list.next = NULL;
1549 cmd->tag = -1U;
1550 }
1551
1552 return 0;
1553}
1554
Jens Axboef2298c02013-10-25 11:52:25 +01001555static int setup_queues(struct nullb *nullb)
1556{
Kees Cook6396bb22018-06-12 14:03:40 -07001557 nullb->queues = kcalloc(nullb->dev->submit_queues,
1558 sizeof(struct nullb_queue),
1559 GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001560 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001561 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001562
1563 nullb->nr_queues = 0;
Shaohua Li2984c862017-08-14 15:04:52 -07001564 nullb->queue_depth = nullb->dev->hw_queue_depth;
Jens Axboef2298c02013-10-25 11:52:25 +01001565
Matias Bjorling2d263a782013-12-18 13:41:43 +01001566 return 0;
1567}
1568
1569static int init_driver_queues(struct nullb *nullb)
1570{
1571 struct nullb_queue *nq;
1572 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001573
Shaohua Li2984c862017-08-14 15:04:52 -07001574 for (i = 0; i < nullb->dev->submit_queues; i++) {
Jens Axboef2298c02013-10-25 11:52:25 +01001575 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +01001576
1577 null_init_queue(nullb, nq);
1578
1579 ret = setup_commands(nq);
1580 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +02001581 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01001582 nullb->nr_queues++;
1583 }
Matias Bjorling2d263a782013-12-18 13:41:43 +01001584 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001585}
1586
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001587static int null_gendisk_register(struct nullb *nullb)
Jens Axboef2298c02013-10-25 11:52:25 +01001588{
1589 struct gendisk *disk;
Jens Axboef2298c02013-10-25 11:52:25 +01001590 sector_t size;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001591
Shaohua Li2984c862017-08-14 15:04:52 -07001592 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001593 if (!disk)
1594 return -ENOMEM;
Shaohua Li2984c862017-08-14 15:04:52 -07001595 size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001596 set_capacity(disk, size >> 9);
1597
1598 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
1599 disk->major = null_major;
1600 disk->first_minor = nullb->index;
1601 disk->fops = &null_fops;
1602 disk->private_data = nullb;
1603 disk->queue = nullb->q;
1604 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1605
1606 add_disk(disk);
1607 return 0;
1608}
1609
Shaohua Li2984c862017-08-14 15:04:52 -07001610static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001611{
1612 set->ops = &null_mq_ops;
Shaohua Li2984c862017-08-14 15:04:52 -07001613 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
1614 g_submit_queues;
1615 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
1616 g_hw_queue_depth;
1617 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
Jens Axboe82f402f2017-06-20 14:22:01 -06001618 set->cmd_size = sizeof(struct nullb_cmd);
1619 set->flags = BLK_MQ_F_SHOULD_MERGE;
weiping zhangb3cffc32017-09-30 09:49:21 +08001620 if (g_no_sched)
1621 set->flags |= BLK_MQ_F_NO_SCHED;
Jens Axboe82f402f2017-06-20 14:22:01 -06001622 set->driver_data = NULL;
1623
Shaohua Li0d06a422017-08-25 13:46:25 -07001624 if ((nullb && nullb->dev->blocking) || g_blocking)
Jens Axboe82f402f2017-06-20 14:22:01 -06001625 set->flags |= BLK_MQ_F_BLOCKING;
1626
1627 return blk_mq_alloc_tag_set(set);
1628}
1629
Shaohua Licedcafa2017-08-14 15:04:54 -07001630static void null_validate_conf(struct nullb_device *dev)
1631{
1632 dev->blocksize = round_down(dev->blocksize, 512);
1633 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
Shaohua Licedcafa2017-08-14 15:04:54 -07001634
1635 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
1636 if (dev->submit_queues != nr_online_nodes)
1637 dev->submit_queues = nr_online_nodes;
1638 } else if (dev->submit_queues > nr_cpu_ids)
1639 dev->submit_queues = nr_cpu_ids;
1640 else if (dev->submit_queues == 0)
1641 dev->submit_queues = 1;
1642
1643 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
1644 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001645
1646 /* Do memory allocation, so set blocking */
1647 if (dev->memory_backed)
1648 dev->blocking = true;
Shaohua Lideb78b42017-08-14 15:04:59 -07001649 else /* cache is meaningless */
1650 dev->cache_size = 0;
1651 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1652 dev->cache_size);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001653 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1654 /* can not stop a queue */
1655 if (dev->queue_mode == NULL_Q_BIO)
1656 dev->mbps = 0;
Shaohua Licedcafa2017-08-14 15:04:54 -07001657}
1658
Jens Axboe24941b92018-02-28 09:18:57 -07001659#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1660static bool __null_setup_fault(struct fault_attr *attr, char *str)
1661{
1662 if (!str[0])
1663 return true;
1664
1665 if (!setup_fault_attr(attr, str))
1666 return false;
1667
1668 attr->verbose = 0;
1669 return true;
1670}
1671#endif
1672
Jens Axboe93b57042018-01-10 09:06:23 -07001673static bool null_setup_fault(void)
1674{
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001675#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe24941b92018-02-28 09:18:57 -07001676 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
Jens Axboe93b57042018-01-10 09:06:23 -07001677 return false;
Jens Axboe24941b92018-02-28 09:18:57 -07001678 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
1679 return false;
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001680#endif
Jens Axboe93b57042018-01-10 09:06:23 -07001681 return true;
1682}
1683
Shaohua Li2984c862017-08-14 15:04:52 -07001684static int null_add_dev(struct nullb_device *dev)
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001685{
1686 struct nullb *nullb;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001687 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +01001688
Shaohua Licedcafa2017-08-14 15:04:54 -07001689 null_validate_conf(dev);
1690
Shaohua Li2984c862017-08-14 15:04:52 -07001691 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001692 if (!nullb) {
1693 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001694 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001695 }
Shaohua Li2984c862017-08-14 15:04:52 -07001696 nullb->dev = dev;
1697 dev->nullb = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01001698
1699 spin_lock_init(&nullb->lock);
1700
Robert Elliottdc501dc2014-09-02 11:38:49 -05001701 rv = setup_queues(nullb);
1702 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001703 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01001704
Shaohua Li2984c862017-08-14 15:04:52 -07001705 if (dev->queue_mode == NULL_Q_MQ) {
Jens Axboe82f402f2017-06-20 14:22:01 -06001706 if (shared_tags) {
1707 nullb->tag_set = &tag_set;
1708 rv = 0;
1709 } else {
1710 nullb->tag_set = &nullb->__tag_set;
Shaohua Li2984c862017-08-14 15:04:52 -07001711 rv = null_init_tag_set(nullb, nullb->tag_set);
Jens Axboe82f402f2017-06-20 14:22:01 -06001712 }
Jens Axboef2298c02013-10-25 11:52:25 +01001713
Robert Elliottdc501dc2014-09-02 11:38:49 -05001714 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001715 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +01001716
Jens Axboe93b57042018-01-10 09:06:23 -07001717 if (!null_setup_fault())
1718 goto out_cleanup_queues;
1719
Jens Axboe5448aca2018-01-09 12:47:24 -07001720 nullb->tag_set->timeout = 5 * HZ;
Jens Axboe82f402f2017-06-20 14:22:01 -06001721 nullb->q = blk_mq_init_queue(nullb->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +00001722 if (IS_ERR(nullb->q)) {
Robert Elliottdc501dc2014-09-02 11:38:49 -05001723 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001724 goto out_cleanup_tags;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001725 }
Jens Axboe82f402f2017-06-20 14:22:01 -06001726 null_init_queues(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001727 } else if (dev->queue_mode == NULL_Q_BIO) {
Bart Van Assche5ee05242018-02-28 10:15:31 -08001728 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node,
1729 NULL);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001730 if (!nullb->q) {
1731 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001732 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001733 }
Jens Axboef2298c02013-10-25 11:52:25 +01001734 blk_queue_make_request(nullb->q, null_queue_bio);
Jan Kara31f96902014-10-22 15:34:21 +02001735 rv = init_driver_queues(nullb);
1736 if (rv)
1737 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001738 } else {
Shaohua Li2984c862017-08-14 15:04:52 -07001739 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock,
1740 dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05001741 if (!nullb->q) {
1742 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001743 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -05001744 }
Jens Axboe93b57042018-01-10 09:06:23 -07001745
1746 if (!null_setup_fault())
1747 goto out_cleanup_blk_queue;
1748
Jens Axboef2298c02013-10-25 11:52:25 +01001749 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001750 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
Jens Axboe5448aca2018-01-09 12:47:24 -07001751 blk_queue_rq_timed_out(nullb->q, null_rq_timed_out_fn);
1752 nullb->q->rq_timeout = 5 * HZ;
Jan Kara31f96902014-10-22 15:34:21 +02001753 rv = init_driver_queues(nullb);
1754 if (rv)
1755 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +01001756 }
1757
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001758 if (dev->mbps) {
1759 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1760 nullb_setup_bwtimer(nullb);
1761 }
1762
Shaohua Lideb78b42017-08-14 15:04:59 -07001763 if (dev->cache_size > 0) {
1764 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1765 blk_queue_write_cache(nullb->q, true, true);
1766 blk_queue_flush_queueable(nullb->q, true);
1767 }
1768
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001769 if (dev->zoned) {
1770 rv = null_zone_init(dev);
1771 if (rv)
1772 goto out_cleanup_blk_queue;
1773
1774 blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects);
1775 nullb->q->limits.zoned = BLK_ZONED_HM;
1776 }
1777
Jens Axboef2298c02013-10-25 11:52:25 +01001778 nullb->q->queuedata = nullb;
Bart Van Assche8b904b52018-03-07 17:10:10 -08001779 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
1780 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +01001781
Jens Axboef2298c02013-10-25 11:52:25 +01001782 mutex_lock(&lock);
Shaohua Li94bc02e2017-08-14 15:04:55 -07001783 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
Shaohua Licedcafa2017-08-14 15:04:54 -07001784 dev->index = nullb->index;
Jens Axboef2298c02013-10-25 11:52:25 +01001785 mutex_unlock(&lock);
1786
Shaohua Li2984c862017-08-14 15:04:52 -07001787 blk_queue_logical_block_size(nullb->q, dev->blocksize);
1788 blk_queue_physical_block_size(nullb->q, dev->blocksize);
Jens Axboef2298c02013-10-25 11:52:25 +01001789
Shaohua Li306eb6b2017-08-14 15:04:57 -07001790 null_config_discard(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +01001791
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001792 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1793
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001794 rv = null_gendisk_register(nullb);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001795 if (rv)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001796 goto out_cleanup_zone;
Jens Axboef2298c02013-10-25 11:52:25 +01001797
Matias Bjørlinga5143792016-02-11 14:49:13 +01001798 mutex_lock(&lock);
1799 list_add_tail(&nullb->list, &nullb_list);
1800 mutex_unlock(&lock);
Wenwei Tao3681c852016-03-05 00:27:04 +08001801
Jens Axboef2298c02013-10-25 11:52:25 +01001802 return 0;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001803out_cleanup_zone:
1804 if (dev->zoned)
1805 null_zone_exit(dev);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001806out_cleanup_blk_queue:
1807 blk_cleanup_queue(nullb->q);
1808out_cleanup_tags:
Shaohua Li2984c862017-08-14 15:04:52 -07001809 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001810 blk_mq_free_tag_set(nullb->tag_set);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001811out_cleanup_queues:
1812 cleanup_queues(nullb);
1813out_free_nullb:
1814 kfree(nullb);
1815out:
Robert Elliottdc501dc2014-09-02 11:38:49 -05001816 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +01001817}
1818
1819static int __init null_init(void)
1820{
Minfei Huangaf096e22015-12-08 13:47:34 -07001821 int ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001822 unsigned int i;
Minfei Huangaf096e22015-12-08 13:47:34 -07001823 struct nullb *nullb;
Shaohua Li2984c862017-08-14 15:04:52 -07001824 struct nullb_device *dev;
Jens Axboef2298c02013-10-25 11:52:25 +01001825
Shaohua Li2984c862017-08-14 15:04:52 -07001826 if (g_bs > PAGE_SIZE) {
Raghavendra K T9967d8a2014-01-21 16:59:59 +05301827 pr_warn("null_blk: invalid block size\n");
1828 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
Shaohua Li2984c862017-08-14 15:04:52 -07001829 g_bs = PAGE_SIZE;
Raghavendra K T9967d8a2014-01-21 16:59:59 +05301830 }
Jens Axboef2298c02013-10-25 11:52:25 +01001831
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001832 if (!is_power_of_2(g_zone_size)) {
1833 pr_err("null_blk: zone_size must be power-of-two\n");
1834 return -EINVAL;
1835 }
1836
Shaohua Li2984c862017-08-14 15:04:52 -07001837 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
1838 if (g_submit_queues != nr_online_nodes) {
weiping zhang558ab3002017-08-03 00:26:39 +08001839 pr_warn("null_blk: submit_queues param is set to %u.\n",
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +01001840 nr_online_nodes);
Shaohua Li2984c862017-08-14 15:04:52 -07001841 g_submit_queues = nr_online_nodes;
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01001842 }
Shaohua Li2984c862017-08-14 15:04:52 -07001843 } else if (g_submit_queues > nr_cpu_ids)
1844 g_submit_queues = nr_cpu_ids;
1845 else if (g_submit_queues <= 0)
1846 g_submit_queues = 1;
Jens Axboef2298c02013-10-25 11:52:25 +01001847
Shaohua Li2984c862017-08-14 15:04:52 -07001848 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
1849 ret = null_init_tag_set(NULL, &tag_set);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001850 if (ret)
1851 return ret;
1852 }
1853
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001854 config_group_init(&nullb_subsys.su_group);
1855 mutex_init(&nullb_subsys.su_mutex);
1856
1857 ret = configfs_register_subsystem(&nullb_subsys);
1858 if (ret)
1859 goto err_tagset;
1860
Jens Axboef2298c02013-10-25 11:52:25 +01001861 mutex_init(&lock);
1862
Jens Axboef2298c02013-10-25 11:52:25 +01001863 null_major = register_blkdev(0, "nullb");
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001864 if (null_major < 0) {
1865 ret = null_major;
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001866 goto err_conf;
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001867 }
Jens Axboef2298c02013-10-25 11:52:25 +01001868
Minfei Huangaf096e22015-12-08 13:47:34 -07001869 for (i = 0; i < nr_devices; i++) {
Shaohua Li2984c862017-08-14 15:04:52 -07001870 dev = null_alloc_dev();
Wei Yongjun30c516d2017-10-17 12:11:46 +00001871 if (!dev) {
1872 ret = -ENOMEM;
Minfei Huangaf096e22015-12-08 13:47:34 -07001873 goto err_dev;
Wei Yongjun30c516d2017-10-17 12:11:46 +00001874 }
Shaohua Li2984c862017-08-14 15:04:52 -07001875 ret = null_add_dev(dev);
1876 if (ret) {
1877 null_free_dev(dev);
1878 goto err_dev;
1879 }
Minfei Huangaf096e22015-12-08 13:47:34 -07001880 }
1881
Jens Axboef2298c02013-10-25 11:52:25 +01001882 pr_info("null: module loaded\n");
1883 return 0;
Minfei Huangaf096e22015-12-08 13:47:34 -07001884
1885err_dev:
1886 while (!list_empty(&nullb_list)) {
1887 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07001888 dev = nullb->dev;
Minfei Huangaf096e22015-12-08 13:47:34 -07001889 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001890 null_free_dev(dev);
Minfei Huangaf096e22015-12-08 13:47:34 -07001891 }
Minfei Huangaf096e22015-12-08 13:47:34 -07001892 unregister_blkdev(null_major, "nullb");
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001893err_conf:
1894 configfs_unregister_subsystem(&nullb_subsys);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001895err_tagset:
Shaohua Li2984c862017-08-14 15:04:52 -07001896 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Max Gurtovoydb2d1532017-07-06 18:00:07 +03001897 blk_mq_free_tag_set(&tag_set);
Minfei Huangaf096e22015-12-08 13:47:34 -07001898 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01001899}
1900
1901static void __exit null_exit(void)
1902{
1903 struct nullb *nullb;
1904
Shaohua Li3bf2bd22017-08-14 15:04:53 -07001905 configfs_unregister_subsystem(&nullb_subsys);
1906
Jens Axboef2298c02013-10-25 11:52:25 +01001907 unregister_blkdev(null_major, "nullb");
1908
1909 mutex_lock(&lock);
1910 while (!list_empty(&nullb_list)) {
Shaohua Li2984c862017-08-14 15:04:52 -07001911 struct nullb_device *dev;
1912
Jens Axboef2298c02013-10-25 11:52:25 +01001913 nullb = list_entry(nullb_list.next, struct nullb, list);
Shaohua Li2984c862017-08-14 15:04:52 -07001914 dev = nullb->dev;
Jens Axboef2298c02013-10-25 11:52:25 +01001915 null_del_dev(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001916 null_free_dev(dev);
Jens Axboef2298c02013-10-25 11:52:25 +01001917 }
1918 mutex_unlock(&lock);
Matias Bjørling6bb95352015-11-19 12:50:08 +01001919
Shaohua Li2984c862017-08-14 15:04:52 -07001920 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Jens Axboe82f402f2017-06-20 14:22:01 -06001921 blk_mq_free_tag_set(&tag_set);
Jens Axboef2298c02013-10-25 11:52:25 +01001922}
1923
1924module_init(null_init);
1925module_exit(null_exit);
1926
Jens Axboe231b3db2017-08-25 12:53:15 -06001927MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
Jens Axboef2298c02013-10-25 11:52:25 +01001928MODULE_LICENSE("GPL");