blob: 3d25c9ad23831bc126f13d9263586fa833c0d1fd [file] [log] [blame]
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001// SPDX-License-Identifier: GPL-2.0
2#include <linux/vmalloc.h>
3#include "null_blk.h"
4
Chaitanya Kulkarni766c3292020-03-25 10:49:56 -07005#define CREATE_TRACE_POINTS
6#include "null_blk_trace.h"
7
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02008/* zone_size in MBs to sectors. */
9#define ZONE_SIZE_SHIFT 11
10
11static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
12{
13 return sect >> ilog2(dev->zone_size_sects);
14}
15
Damien Le Moald205bde2020-04-23 12:02:38 +090016int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020017{
18 sector_t dev_size = (sector_t)dev->size * 1024 * 1024;
19 sector_t sector = 0;
20 unsigned int i;
21
22 if (!is_power_of_2(dev->zone_size)) {
André Almeida9c7eddf2019-09-16 11:07:59 -030023 pr_err("zone_size must be power-of-two\n");
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020024 return -EINVAL;
25 }
Chaitanya Kulkarnie2748322020-05-20 16:01:51 -070026 if (dev->zone_size > dev->size) {
27 pr_err("Zone size larger than device capacity\n");
28 return -EINVAL;
29 }
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020030
Aravind Ramesh089565f2020-06-29 12:06:38 -070031 if (!dev->zone_capacity)
32 dev->zone_capacity = dev->zone_size;
33
34 if (dev->zone_capacity > dev->zone_size) {
35 pr_err("null_blk: zone capacity (%lu MB) larger than zone size (%lu MB)\n",
36 dev->zone_capacity, dev->zone_size);
37 return -EINVAL;
38 }
39
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020040 dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
41 dev->nr_zones = dev_size >>
42 (SECTOR_SHIFT + ilog2(dev->zone_size_sects));
43 dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone),
44 GFP_KERNEL | __GFP_ZERO);
45 if (!dev->zones)
46 return -ENOMEM;
47
Masato Suzukiea2c18e2018-10-30 16:14:05 +090048 if (dev->zone_nr_conv >= dev->nr_zones) {
49 dev->zone_nr_conv = dev->nr_zones - 1;
André Almeida9c7eddf2019-09-16 11:07:59 -030050 pr_info("changed the number of conventional zones to %u",
Masato Suzukiea2c18e2018-10-30 16:14:05 +090051 dev->zone_nr_conv);
52 }
53
54 for (i = 0; i < dev->zone_nr_conv; i++) {
55 struct blk_zone *zone = &dev->zones[i];
56
57 zone->start = sector;
58 zone->len = dev->zone_size_sects;
Matias Bjørling82394db2020-06-29 12:06:37 -070059 zone->capacity = zone->len;
Masato Suzukiea2c18e2018-10-30 16:14:05 +090060 zone->wp = zone->start + zone->len;
61 zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
62 zone->cond = BLK_ZONE_COND_NOT_WP;
63
64 sector += dev->zone_size_sects;
65 }
66
67 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020068 struct blk_zone *zone = &dev->zones[i];
69
70 zone->start = zone->wp = sector;
71 zone->len = dev->zone_size_sects;
Aravind Ramesh089565f2020-06-29 12:06:38 -070072 zone->capacity = dev->zone_capacity << ZONE_SIZE_SHIFT;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020073 zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
74 zone->cond = BLK_ZONE_COND_EMPTY;
75
76 sector += dev->zone_size_sects;
77 }
78
Damien Le Moald205bde2020-04-23 12:02:38 +090079 q->limits.zoned = BLK_ZONED_HM;
80 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
81 blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
82
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020083 return 0;
84}
85
Damien Le Moald205bde2020-04-23 12:02:38 +090086int null_register_zoned_dev(struct nullb *nullb)
87{
Damien Le Moale0489ed2020-05-12 17:55:52 +090088 struct nullb_device *dev = nullb->dev;
Damien Le Moald205bde2020-04-23 12:02:38 +090089 struct request_queue *q = nullb->q;
90
Damien Le Moale0489ed2020-05-12 17:55:52 +090091 if (queue_is_mq(q)) {
92 int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
Damien Le Moald205bde2020-04-23 12:02:38 +090093
Damien Le Moale0489ed2020-05-12 17:55:52 +090094 if (ret)
95 return ret;
96 } else {
97 blk_queue_chunk_sectors(q, dev->zone_size_sects);
98 q->nr_zones = blkdev_nr_zones(nullb->disk);
99 }
100
101 blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
Damien Le Moald205bde2020-04-23 12:02:38 +0900102
103 return 0;
104}
105
106void null_free_zoned_dev(struct nullb_device *dev)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200107{
108 kvfree(dev->zones);
109}
110
Christoph Hellwig7fc8fb52019-11-11 11:39:27 +0900111int null_report_zones(struct gendisk *disk, sector_t sector,
Christoph Hellwigd4100352019-11-11 11:39:30 +0900112 unsigned int nr_zones, report_zones_cb cb, void *data)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200113{
Christoph Hellwige76239a2018-10-12 19:08:49 +0900114 struct nullb *nullb = disk->private_data;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200115 struct nullb_device *dev = nullb->dev;
Christoph Hellwigd4100352019-11-11 11:39:30 +0900116 unsigned int first_zone, i;
117 struct blk_zone zone;
118 int error;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200119
Christoph Hellwigd4100352019-11-11 11:39:30 +0900120 first_zone = null_zone_no(dev, sector);
121 if (first_zone >= dev->nr_zones)
122 return 0;
123
124 nr_zones = min(nr_zones, dev->nr_zones - first_zone);
Chaitanya Kulkarni766c3292020-03-25 10:49:56 -0700125 trace_nullb_report_zones(nullb, nr_zones);
126
Christoph Hellwigd4100352019-11-11 11:39:30 +0900127 for (i = 0; i < nr_zones; i++) {
128 /*
129 * Stacked DM target drivers will remap the zone information by
130 * modifying the zone information passed to the report callback.
131 * So use a local copy to avoid corruption of the device zone
132 * array.
133 */
134 memcpy(&zone, &dev->zones[first_zone + i],
135 sizeof(struct blk_zone));
136 error = cb(&zone, i, data);
137 if (error)
138 return error;
Christoph Hellwige76239a2018-10-12 19:08:49 +0900139 }
140
Christoph Hellwigd4100352019-11-11 11:39:30 +0900141 return nr_zones;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200142}
143
Ajay Joshidd85b492019-10-17 14:19:43 -0700144size_t null_zone_valid_read_len(struct nullb *nullb,
145 sector_t sector, unsigned int len)
146{
147 struct nullb_device *dev = nullb->dev;
148 struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
149 unsigned int nr_sectors = len >> SECTOR_SHIFT;
150
151 /* Read must be below the write pointer position */
152 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
153 sector + nr_sectors <= zone->wp)
154 return len;
155
156 if (sector > zone->wp)
157 return 0;
158
159 return (zone->wp - sector) << SECTOR_SHIFT;
160}
161
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700162static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
Damien Le Moale0489ed2020-05-12 17:55:52 +0900163 unsigned int nr_sectors, bool append)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200164{
165 struct nullb_device *dev = cmd->nq->dev;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200166 unsigned int zno = null_zone_no(dev, sector);
167 struct blk_zone *zone = &dev->zones[zno];
Damien Le Moal9dd44c72020-04-23 12:02:37 +0900168 blk_status_t ret;
169
170 trace_nullb_zone_op(cmd, zno, zone->cond);
171
172 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
173 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200174
175 switch (zone->cond) {
176 case BLK_ZONE_COND_FULL:
177 /* Cannot write to a full zone */
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700178 return BLK_STS_IOERR;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200179 case BLK_ZONE_COND_EMPTY:
180 case BLK_ZONE_COND_IMP_OPEN:
Damien Le Moal16c731f2020-01-09 14:03:55 +0900181 case BLK_ZONE_COND_EXP_OPEN:
182 case BLK_ZONE_COND_CLOSED:
Damien Le Moale0489ed2020-05-12 17:55:52 +0900183 /*
184 * Regular writes must be at the write pointer position.
185 * Zone append writes are automatically issued at the write
186 * pointer and the position returned using the request or BIO
187 * sector.
188 */
189 if (append) {
190 sector = zone->wp;
191 if (cmd->bio)
192 cmd->bio->bi_iter.bi_sector = sector;
193 else
194 cmd->rq->__sector = sector;
195 } else if (sector != zone->wp) {
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700196 return BLK_STS_IOERR;
Damien Le Moale0489ed2020-05-12 17:55:52 +0900197 }
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200198
Aravind Ramesh089565f2020-06-29 12:06:38 -0700199 if (zone->wp + nr_sectors > zone->start + zone->capacity)
200 return BLK_STS_IOERR;
201
Damien Le Moal16c731f2020-01-09 14:03:55 +0900202 if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200203 zone->cond = BLK_ZONE_COND_IMP_OPEN;
204
Damien Le Moal9dd44c72020-04-23 12:02:37 +0900205 ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
206 if (ret != BLK_STS_OK)
207 return ret;
208
Jens Axboeb228ba12018-09-12 18:21:11 -0600209 zone->wp += nr_sectors;
Aravind Ramesh089565f2020-06-29 12:06:38 -0700210 if (zone->wp == zone->start + zone->capacity)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200211 zone->cond = BLK_ZONE_COND_FULL;
Damien Le Moal9dd44c72020-04-23 12:02:37 +0900212 return BLK_STS_OK;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200213 default:
214 /* Invalid zone condition */
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700215 return BLK_STS_IOERR;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200216 }
217}
218
Ajay Joshida644b22019-10-27 23:05:49 +0900219static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
220 sector_t sector)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200221{
222 struct nullb_device *dev = cmd->nq->dev;
Chaitanya Kulkarni766c3292020-03-25 10:49:56 -0700223 unsigned int zone_no = null_zone_no(dev, sector);
224 struct blk_zone *zone = &dev->zones[zone_no];
Chaitanya Kulkarnia61dbfb2019-08-01 10:26:38 -0700225 size_t i;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200226
Ajay Joshida644b22019-10-27 23:05:49 +0900227 switch (op) {
Chaitanya Kulkarnia61dbfb2019-08-01 10:26:38 -0700228 case REQ_OP_ZONE_RESET_ALL:
229 for (i = 0; i < dev->nr_zones; i++) {
230 if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
231 continue;
232 zone[i].cond = BLK_ZONE_COND_EMPTY;
233 zone[i].wp = zone[i].start;
234 }
235 break;
236 case REQ_OP_ZONE_RESET:
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700237 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
238 return BLK_STS_IOERR;
Chaitanya Kulkarnia61dbfb2019-08-01 10:26:38 -0700239
240 zone->cond = BLK_ZONE_COND_EMPTY;
241 zone->wp = zone->start;
242 break;
Ajay Joshida644b22019-10-27 23:05:49 +0900243 case REQ_OP_ZONE_OPEN:
244 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
245 return BLK_STS_IOERR;
246 if (zone->cond == BLK_ZONE_COND_FULL)
247 return BLK_STS_IOERR;
248
249 zone->cond = BLK_ZONE_COND_EXP_OPEN;
250 break;
251 case REQ_OP_ZONE_CLOSE:
252 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
253 return BLK_STS_IOERR;
254 if (zone->cond == BLK_ZONE_COND_FULL)
255 return BLK_STS_IOERR;
256
Damien Le Moalc7d776f2019-12-26 15:54:25 +0900257 if (zone->wp == zone->start)
258 zone->cond = BLK_ZONE_COND_EMPTY;
259 else
260 zone->cond = BLK_ZONE_COND_CLOSED;
Ajay Joshida644b22019-10-27 23:05:49 +0900261 break;
262 case REQ_OP_ZONE_FINISH:
263 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
264 return BLK_STS_IOERR;
265
266 zone->cond = BLK_ZONE_COND_FULL;
267 zone->wp = zone->start + zone->len;
268 break;
Chaitanya Kulkarnia61dbfb2019-08-01 10:26:38 -0700269 default:
Keith Busch79a85e22019-10-10 00:38:13 +0900270 return BLK_STS_NOTSUPP;
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900271 }
Chaitanya Kulkarni766c3292020-03-25 10:49:56 -0700272
273 trace_nullb_zone_op(cmd, zone_no, zone->cond);
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700274 return BLK_STS_OK;
275}
276
Damien Le Moal9dd44c72020-04-23 12:02:37 +0900277blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
278 sector_t sector, sector_t nr_sectors)
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700279{
280 switch (op) {
281 case REQ_OP_WRITE:
Damien Le Moale0489ed2020-05-12 17:55:52 +0900282 return null_zone_write(cmd, sector, nr_sectors, false);
283 case REQ_OP_ZONE_APPEND:
284 return null_zone_write(cmd, sector, nr_sectors, true);
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700285 case REQ_OP_ZONE_RESET:
286 case REQ_OP_ZONE_RESET_ALL:
Ajay Joshida644b22019-10-27 23:05:49 +0900287 case REQ_OP_ZONE_OPEN:
288 case REQ_OP_ZONE_CLOSE:
289 case REQ_OP_ZONE_FINISH:
290 return null_zone_mgmt(cmd, op, sector);
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700291 default:
Damien Le Moal9dd44c72020-04-23 12:02:37 +0900292 return null_process_cmd(cmd, op, sector, nr_sectors);
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700293 }
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200294}