blob: bc837862b7679ac88b0e8ef15d33e61f388114f5 [file] [log] [blame]
Matias Bjørling6dad38d2018-07-06 19:38:38 +02001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __BLK_NULL_BLK_H
3#define __BLK_NULL_BLK_H
4
André Almeida9c7eddf2019-09-16 11:07:59 -03005#undef pr_fmt
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
Matias Bjørling6dad38d2018-07-06 19:38:38 +02008#include <linux/blkdev.h>
9#include <linux/slab.h>
10#include <linux/blk-mq.h>
11#include <linux/hrtimer.h>
12#include <linux/configfs.h>
13#include <linux/badblocks.h>
14#include <linux/fault-inject.h>
15
16struct nullb_cmd {
17 struct list_head list;
18 struct llist_node ll_list;
19 struct __call_single_data csd;
20 struct request *rq;
21 struct bio *bio;
22 unsigned int tag;
23 blk_status_t error;
24 struct nullb_queue *nq;
25 struct hrtimer timer;
26};
27
28struct nullb_queue {
29 unsigned long *tag_map;
30 wait_queue_head_t wait;
31 unsigned int queue_depth;
32 struct nullb_device *dev;
33 unsigned int requeue_selection;
34
35 struct nullb_cmd *cmds;
36};
37
38struct nullb_device {
39 struct nullb *nullb;
40 struct config_item item;
41 struct radix_tree_root data; /* data stored in the disk */
42 struct radix_tree_root cache; /* disk cache data */
43 unsigned long flags; /* device flags */
44 unsigned int curr_cache;
45 struct badblocks badblocks;
46
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020047 unsigned int nr_zones;
48 struct blk_zone *zones;
49 sector_t zone_size_sects;
50
Matias Bjørling6dad38d2018-07-06 19:38:38 +020051 unsigned long size; /* device size in MB */
52 unsigned long completion_nsec; /* time in ns to complete a request */
53 unsigned long cache_size; /* disk cache size in MB */
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020054 unsigned long zone_size; /* zone size in MB if device is zoned */
Masato Suzukiea2c18e2018-10-30 16:14:05 +090055 unsigned int zone_nr_conv; /* number of conventional zones */
Matias Bjørling6dad38d2018-07-06 19:38:38 +020056 unsigned int submit_queues; /* number of submission queues */
57 unsigned int home_node; /* home node for the device */
58 unsigned int queue_mode; /* block interface */
59 unsigned int blocksize; /* block size */
60 unsigned int irqmode; /* IRQ completion handler */
61 unsigned int hw_queue_depth; /* queue depth */
62 unsigned int index; /* index of the disk, only valid with a disk */
63 unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
64 bool blocking; /* blocking blk-mq device */
65 bool use_per_node_hctx; /* use per-node allocation for hardware context */
66 bool power; /* power on/off the device */
67 bool memory_backed; /* if data is stored in memory */
68 bool discard; /* if support discard */
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020069 bool zoned; /* if device is zoned */
Matias Bjørling6dad38d2018-07-06 19:38:38 +020070};
71
72struct nullb {
73 struct nullb_device *dev;
74 struct list_head list;
75 unsigned int index;
76 struct request_queue *q;
77 struct gendisk *disk;
78 struct blk_mq_tag_set *tag_set;
79 struct blk_mq_tag_set __tag_set;
80 unsigned int queue_depth;
81 atomic_long_t cur_bytes;
82 struct hrtimer bw_timer;
83 unsigned long cache_flush_pos;
84 spinlock_t lock;
85
86 struct nullb_queue *queues;
87 unsigned int nr_queues;
88 char disk_name[DISK_NAME_LEN];
89};
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020090
91#ifdef CONFIG_BLK_DEV_ZONED
92int null_zone_init(struct nullb_device *dev);
93void null_zone_exit(struct nullb_device *dev);
Christoph Hellwig7fc8fb52019-11-11 11:39:27 +090094int null_report_zones(struct gendisk *disk, sector_t sector,
Christoph Hellwigd4100352019-11-11 11:39:30 +090095 unsigned int nr_zones, report_zones_cb cb, void *data);
Jens Axboe38b4e092019-08-23 12:49:00 -060096blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
97 enum req_opf op, sector_t sector,
98 sector_t nr_sectors);
Ajay Joshidd85b492019-10-17 14:19:43 -070099size_t null_zone_valid_read_len(struct nullb *nullb,
100 sector_t sector, unsigned int len);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200101#else
102static inline int null_zone_init(struct nullb_device *dev)
103{
André Almeida9c7eddf2019-09-16 11:07:59 -0300104 pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200105 return -EINVAL;
106}
107static inline void null_zone_exit(struct nullb_device *dev) {}
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700108static inline blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
109 enum req_opf op, sector_t sector,
110 sector_t nr_sectors)
Jens Axboeb228ba12018-09-12 18:21:11 -0600111{
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700112 return BLK_STS_NOTSUPP;
Jens Axboeb228ba12018-09-12 18:21:11 -0600113}
Ajay Joshidd85b492019-10-17 14:19:43 -0700114static inline size_t null_zone_valid_read_len(struct nullb *nullb,
115 sector_t sector,
116 unsigned int len)
117{
118 return len;
119}
Christoph Hellwig7fc8fb52019-11-11 11:39:27 +0900120#define null_report_zones NULL
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200121#endif /* CONFIG_BLK_DEV_ZONED */
Matias Bjørling6dad38d2018-07-06 19:38:38 +0200122#endif /* __NULL_BLK_H */