blob: e0bd4cf1723073e3b27d0158c351a83871334353 [file] [log] [blame]
Thomas Gleixnera98c5b12019-06-01 10:08:25 +02001// SPDX-License-Identifier: GPL-2.0-only
Hannes Reinecke89d94752016-10-18 15:40:34 +09002/*
3 * SCSI Zoned Block commands
4 *
5 * Copyright (C) 2014-2015 SUSE Linux GmbH
6 * Written by: Hannes Reinecke <hare@suse.de>
7 * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
8 * Modified by: Shaun Tancheff <shaun.tancheff@seagate.com>
Hannes Reinecke89d94752016-10-18 15:40:34 +09009 */
10
11#include <linux/blkdev.h>
Damien Le Moalb091ac62019-07-01 14:09:17 +090012#include <linux/vmalloc.h>
13#include <linux/sched/mm.h>
Hannes Reinecke89d94752016-10-18 15:40:34 +090014
15#include <asm/unaligned.h>
16
17#include <scsi/scsi.h>
18#include <scsi/scsi_cmnd.h>
Hannes Reinecke89d94752016-10-18 15:40:34 +090019
20#include "sd.h"
Hannes Reinecke89d94752016-10-18 15:40:34 +090021
Christoph Hellwigd4100352019-11-11 11:39:30 +090022static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
23 unsigned int idx, report_zones_cb cb, void *data)
Hannes Reinecke89d94752016-10-18 15:40:34 +090024{
25 struct scsi_device *sdp = sdkp->device;
Christoph Hellwigd4100352019-11-11 11:39:30 +090026 struct blk_zone zone = { 0 };
Hannes Reinecke89d94752016-10-18 15:40:34 +090027
Christoph Hellwigd4100352019-11-11 11:39:30 +090028 zone.type = buf[0] & 0x0f;
29 zone.cond = (buf[1] >> 4) & 0xf;
Hannes Reinecke89d94752016-10-18 15:40:34 +090030 if (buf[1] & 0x01)
Christoph Hellwigd4100352019-11-11 11:39:30 +090031 zone.reset = 1;
Hannes Reinecke89d94752016-10-18 15:40:34 +090032 if (buf[1] & 0x02)
Christoph Hellwigd4100352019-11-11 11:39:30 +090033 zone.non_seq = 1;
Hannes Reinecke89d94752016-10-18 15:40:34 +090034
Christoph Hellwigd4100352019-11-11 11:39:30 +090035 zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
36 zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
37 zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
38 if (zone.type != ZBC_ZONE_TYPE_CONV &&
39 zone.cond == ZBC_ZONE_COND_FULL)
40 zone.wp = zone.start + zone.len;
41
42 return cb(&zone, idx, data);
Hannes Reinecke89d94752016-10-18 15:40:34 +090043}
44
45/**
Christoph Hellwige76239a2018-10-12 19:08:49 +090046 * sd_zbc_do_report_zones - Issue a REPORT ZONES scsi command.
Damien Le Moale98f42b2017-10-11 05:54:22 +090047 * @sdkp: The target disk
Damien Le Moalb091ac62019-07-01 14:09:17 +090048 * @buf: vmalloc-ed buffer to use for the reply
Damien Le Moale98f42b2017-10-11 05:54:22 +090049 * @buflen: the buffer size
50 * @lba: Start LBA of the report
Damien Le Moald2e428e2018-10-12 19:08:41 +090051 * @partial: Do partial report
Damien Le Moale98f42b2017-10-11 05:54:22 +090052 *
53 * For internal use during device validation.
Damien Le Moald2e428e2018-10-12 19:08:41 +090054 * Using partial=true can significantly speed up execution of a report zones
55 * command because the disk does not have to count all possible report matching
56 * zones and will only report the count of zones fitting in the command reply
57 * buffer.
Hannes Reinecke89d94752016-10-18 15:40:34 +090058 */
Christoph Hellwige76239a2018-10-12 19:08:49 +090059static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
60 unsigned int buflen, sector_t lba,
61 bool partial)
Hannes Reinecke89d94752016-10-18 15:40:34 +090062{
63 struct scsi_device *sdp = sdkp->device;
64 const int timeout = sdp->request_queue->rq_timeout;
65 struct scsi_sense_hdr sshdr;
66 unsigned char cmd[16];
67 unsigned int rep_len;
68 int result;
69
70 memset(cmd, 0, 16);
71 cmd[0] = ZBC_IN;
72 cmd[1] = ZI_REPORT_ZONES;
73 put_unaligned_be64(lba, &cmd[2]);
74 put_unaligned_be32(buflen, &cmd[10]);
Damien Le Moald2e428e2018-10-12 19:08:41 +090075 if (partial)
76 cmd[14] = ZBC_REPORT_ZONE_PARTIAL;
Hannes Reinecke89d94752016-10-18 15:40:34 +090077
78 result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
79 buf, buflen, &sshdr,
80 timeout, SD_MAX_RETRIES, NULL);
81 if (result) {
82 sd_printk(KERN_ERR, sdkp,
Damien Le Moala35989a2019-11-25 16:05:18 +090083 "REPORT ZONES start lba %llu failed\n", lba);
84 sd_print_result(sdkp, "REPORT ZONES", result);
85 if (driver_byte(result) == DRIVER_SENSE &&
86 scsi_sense_valid(&sshdr))
87 sd_print_sense_hdr(sdkp, &sshdr);
Hannes Reinecke89d94752016-10-18 15:40:34 +090088 return -EIO;
89 }
90
91 rep_len = get_unaligned_be32(&buf[0]);
92 if (rep_len < 64) {
93 sd_printk(KERN_ERR, sdkp,
94 "REPORT ZONES report invalid length %u\n",
95 rep_len);
96 return -EIO;
97 }
98
99 return 0;
100}
101
Damien Le Moalb091ac62019-07-01 14:09:17 +0900102/**
103 * Allocate a buffer for report zones reply.
104 * @sdkp: The target disk
105 * @nr_zones: Maximum number of zones to report
106 * @buflen: Size of the buffer allocated
107 *
108 * Try to allocate a reply buffer for the number of requested zones.
109 * The size of the buffer allocated may be smaller than requested to
110 * satify the device constraint (max_hw_sectors, max_segments, etc).
111 *
112 * Return the address of the allocated buffer and update @buflen with
113 * the size of the allocated buffer.
114 */
115static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
116 unsigned int nr_zones, size_t *buflen)
117{
118 struct request_queue *q = sdkp->disk->queue;
119 size_t bufsize;
120 void *buf;
121
122 /*
123 * Report zone buffer size should be at most 64B times the number of
124 * zones requested plus the 64B reply header, but should be at least
125 * SECTOR_SIZE for ATA devices.
126 * Make sure that this size does not exceed the hardware capabilities.
127 * Furthermore, since the report zone command cannot be split, make
128 * sure that the allocated buffer can always be mapped by limiting the
129 * number of pages allocated to the HBA max segments limit.
130 */
Damien Le Moal23a50862019-11-11 11:39:29 +0900131 nr_zones = min(nr_zones, sdkp->nr_zones);
132 bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
Damien Le Moalb091ac62019-07-01 14:09:17 +0900133 bufsize = min_t(size_t, bufsize,
134 queue_max_hw_sectors(q) << SECTOR_SHIFT);
135 bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
136
Damien Le Moal23a50862019-11-11 11:39:29 +0900137 while (bufsize >= SECTOR_SIZE) {
138 buf = __vmalloc(bufsize,
139 GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY,
140 PAGE_KERNEL);
141 if (buf) {
142 *buflen = bufsize;
143 return buf;
144 }
145 bufsize >>= 1;
146 }
Damien Le Moalb091ac62019-07-01 14:09:17 +0900147
Damien Le Moal23a50862019-11-11 11:39:29 +0900148 return NULL;
Damien Le Moalb091ac62019-07-01 14:09:17 +0900149}
150
Damien Le Moale98f42b2017-10-11 05:54:22 +0900151/**
Damien Le Moale98f42b2017-10-11 05:54:22 +0900152 * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
153 * @sdkp: The target disk
154 */
Hannes Reinecke89d94752016-10-18 15:40:34 +0900155static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
156{
157 return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
158}
159
Christoph Hellwigd4100352019-11-11 11:39:30 +0900160int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
161 unsigned int nr_zones, report_zones_cb cb, void *data)
162{
163 struct scsi_disk *sdkp = scsi_disk(disk);
164 unsigned int nr, i;
165 unsigned char *buf;
166 size_t offset, buflen = 0;
167 int zone_idx = 0;
168 int ret;
169
170 if (!sd_is_zoned(sdkp))
171 /* Not a zoned device */
172 return -EOPNOTSUPP;
173
174 buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen);
175 if (!buf)
176 return -ENOMEM;
177
178 while (zone_idx < nr_zones && sector < get_capacity(disk)) {
179 ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
180 sectors_to_logical(sdkp->device, sector), true);
181 if (ret)
182 goto out;
183
184 offset = 0;
185 nr = min(nr_zones, get_unaligned_be32(&buf[0]) / 64);
186 if (!nr)
187 break;
188
189 for (i = 0; i < nr && zone_idx < nr_zones; i++) {
190 offset += 64;
191 ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx,
192 cb, data);
193 if (ret)
194 goto out;
195 zone_idx++;
196 }
197
198 sector += sd_zbc_zone_sectors(sdkp) * i;
199 }
200
201 ret = zone_idx;
202out:
203 kvfree(buf);
204 return ret;
205}
206
Damien Le Moale98f42b2017-10-11 05:54:22 +0900207/**
Ajay Joshiad512f22019-10-27 23:05:47 +0900208 * sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations
209 * can be RESET WRITE POINTER, OPEN, CLOSE or FINISH.
Damien Le Moale98f42b2017-10-11 05:54:22 +0900210 * @cmd: the command to setup
Ajay Joshiad512f22019-10-27 23:05:47 +0900211 * @op: Operation to be performed
212 * @all: All zones control
Damien Le Moale98f42b2017-10-11 05:54:22 +0900213 *
Ajay Joshiad512f22019-10-27 23:05:47 +0900214 * Called from sd_init_command() for REQ_OP_ZONE_RESET, REQ_OP_ZONE_RESET_ALL,
215 * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE or REQ_OP_ZONE_FINISH requests.
Damien Le Moale98f42b2017-10-11 05:54:22 +0900216 */
Ajay Joshiad512f22019-10-27 23:05:47 +0900217blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
218 unsigned char op, bool all)
Hannes Reinecke89d94752016-10-18 15:40:34 +0900219{
220 struct request *rq = cmd->request;
221 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
222 sector_t sector = blk_rq_pos(rq);
223 sector_t block = sectors_to_logical(sdkp->device, sector);
Hannes Reinecke89d94752016-10-18 15:40:34 +0900224
225 if (!sd_is_zoned(sdkp))
226 /* Not a zoned device */
Christoph Hellwig159b2cb2018-11-09 14:42:39 +0100227 return BLK_STS_IOERR;
Hannes Reinecke89d94752016-10-18 15:40:34 +0900228
229 if (sdkp->device->changed)
Christoph Hellwig159b2cb2018-11-09 14:42:39 +0100230 return BLK_STS_IOERR;
Hannes Reinecke89d94752016-10-18 15:40:34 +0900231
232 if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
233 /* Unaligned request */
Christoph Hellwig159b2cb2018-11-09 14:42:39 +0100234 return BLK_STS_IOERR;
Hannes Reinecke89d94752016-10-18 15:40:34 +0900235
Hannes Reinecke89d94752016-10-18 15:40:34 +0900236 cmd->cmd_len = 16;
237 memset(cmd->cmnd, 0, cmd->cmd_len);
238 cmd->cmnd[0] = ZBC_OUT;
Ajay Joshiad512f22019-10-27 23:05:47 +0900239 cmd->cmnd[1] = op;
Chaitanya Kulkarnid81e9d42019-08-01 10:26:37 -0700240 if (all)
241 cmd->cmnd[14] = 0x1;
242 else
243 put_unaligned_be64(block, &cmd->cmnd[2]);
Hannes Reinecke89d94752016-10-18 15:40:34 +0900244
245 rq->timeout = SD_TIMEOUT;
246 cmd->sc_data_direction = DMA_NONE;
247 cmd->transfersize = 0;
248 cmd->allowed = 0;
249
Christoph Hellwig159b2cb2018-11-09 14:42:39 +0100250 return BLK_STS_OK;
Hannes Reinecke89d94752016-10-18 15:40:34 +0900251}
252
Damien Le Moale98f42b2017-10-11 05:54:22 +0900253/**
Damien Le Moale98f42b2017-10-11 05:54:22 +0900254 * sd_zbc_complete - ZBC command post processing.
255 * @cmd: Completed command
256 * @good_bytes: Command reply bytes
257 * @sshdr: command sense header
258 *
259 * Called from sd_done(). Process report zones reply and handle reset zone
260 * and write commands errors.
261 */
262void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
Hannes Reinecke89d94752016-10-18 15:40:34 +0900263 struct scsi_sense_hdr *sshdr)
264{
265 int result = cmd->result;
266 struct request *rq = cmd->request;
267
Ajay Joshiad512f22019-10-27 23:05:47 +0900268 if (op_is_zone_mgmt(req_op(rq)) &&
Damien Le Moaledc1f542019-10-27 23:05:44 +0900269 result &&
270 sshdr->sense_key == ILLEGAL_REQUEST &&
271 sshdr->asc == 0x24) {
272 /*
Ajay Joshiad512f22019-10-27 23:05:47 +0900273 * INVALID FIELD IN CDB error: a zone management command was
274 * attempted on a conventional zone. Nothing to worry about,
275 * so be quiet about the error.
Damien Le Moaledc1f542019-10-27 23:05:44 +0900276 */
277 rq->rq_flags |= RQF_QUIET;
Hannes Reinecke89d94752016-10-18 15:40:34 +0900278 }
279}
280
281/**
Damien Le Moal7f9d35d2018-10-12 19:08:40 +0900282 * sd_zbc_check_zoned_characteristics - Check zoned block device characteristics
Damien Le Moale98f42b2017-10-11 05:54:22 +0900283 * @sdkp: Target disk
284 * @buf: Buffer where to store the VPD page data
285 *
Damien Le Moal7f9d35d2018-10-12 19:08:40 +0900286 * Read VPD page B6, get information and check that reads are unconstrained.
Hannes Reinecke89d94752016-10-18 15:40:34 +0900287 */
Damien Le Moal7f9d35d2018-10-12 19:08:40 +0900288static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
289 unsigned char *buf)
Hannes Reinecke89d94752016-10-18 15:40:34 +0900290{
291
292 if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
293 sd_printk(KERN_NOTICE, sdkp,
Damien Le Moal7f9d35d2018-10-12 19:08:40 +0900294 "Read zoned characteristics VPD page failed\n");
Hannes Reinecke89d94752016-10-18 15:40:34 +0900295 return -ENODEV;
296 }
297
298 if (sdkp->device->type != TYPE_ZBC) {
299 /* Host-aware */
300 sdkp->urswrz = 1;
Damien Le Moal4a109032017-10-11 05:54:25 +0900301 sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
302 sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
Hannes Reinecke89d94752016-10-18 15:40:34 +0900303 sdkp->zones_max_open = 0;
304 } else {
305 /* Host-managed */
306 sdkp->urswrz = buf[4] & 1;
307 sdkp->zones_optimal_open = 0;
308 sdkp->zones_optimal_nonseq = 0;
Damien Le Moal4a109032017-10-11 05:54:25 +0900309 sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
Hannes Reinecke89d94752016-10-18 15:40:34 +0900310 }
311
Damien Le Moal7f9d35d2018-10-12 19:08:40 +0900312 /*
313 * Check for unconstrained reads: host-managed devices with
314 * constrained reads (drives failing read after write pointer)
315 * are not supported.
316 */
317 if (!sdkp->urswrz) {
318 if (sdkp->first_scan)
319 sd_printk(KERN_NOTICE, sdkp,
320 "constrained reads devices are not supported\n");
321 return -ENODEV;
322 }
323
Hannes Reinecke89d94752016-10-18 15:40:34 +0900324 return 0;
325}
326
Damien Le Moale98f42b2017-10-11 05:54:22 +0900327/**
Damien Le Moald2e428e2018-10-12 19:08:41 +0900328 * sd_zbc_check_zones - Check the device capacity and zone sizes
Damien Le Moale98f42b2017-10-11 05:54:22 +0900329 * @sdkp: Target disk
330 *
Damien Le Moald2e428e2018-10-12 19:08:41 +0900331 * Check that the device capacity as reported by READ CAPACITY matches the
332 * max_lba value (plus one)of the report zones command reply. Also check that
333 * all zones of the device have an equal size, only allowing the last zone of
334 * the disk to have a smaller size (runt zone). The zone size must also be a
335 * power of two.
Bart Van Asscheccce20f2018-04-16 18:04:41 -0700336 *
Damien Le Moalf13cff62018-07-03 15:23:58 +0900337 * Returns the zone size in number of blocks upon success or an error code
338 * upon failure.
Damien Le Moale98f42b2017-10-11 05:54:22 +0900339 */
Damien Le Moald9dd7302019-11-11 11:39:22 +0900340static int sd_zbc_check_zones(struct scsi_disk *sdkp, unsigned char *buf,
341 u32 *zblocks)
Hannes Reinecke89d94752016-10-18 15:40:34 +0900342{
Damien Le Moal4b433922018-03-02 07:19:28 +0900343 u64 zone_blocks = 0;
Damien Le Moald9dd7302019-11-11 11:39:22 +0900344 sector_t max_lba;
Hannes Reinecke89d94752016-10-18 15:40:34 +0900345 unsigned char *rec;
Damien Le Moal5f832a32018-10-12 19:08:42 +0900346 int ret;
Hannes Reinecke89d94752016-10-18 15:40:34 +0900347
Damien Le Moald9dd7302019-11-11 11:39:22 +0900348 /* Do a report zone to get max_lba and the size of the first zone */
349 ret = sd_zbc_do_report_zones(sdkp, buf, SD_BUF_SIZE, 0, false);
Damien Le Moal4b433922018-03-02 07:19:28 +0900350 if (ret)
Damien Le Moald9dd7302019-11-11 11:39:22 +0900351 return ret;
Hannes Reinecke89d94752016-10-18 15:40:34 +0900352
Damien Le Moald2e428e2018-10-12 19:08:41 +0900353 if (sdkp->rc_basis == 0) {
354 /* The max_lba field is the capacity of this device */
355 max_lba = get_unaligned_be64(&buf[8]);
356 if (sdkp->capacity != max_lba + 1) {
357 if (sdkp->first_scan)
358 sd_printk(KERN_WARNING, sdkp,
359 "Changing capacity from %llu to max LBA+1 %llu\n",
360 (unsigned long long)sdkp->capacity,
361 (unsigned long long)max_lba + 1);
362 sdkp->capacity = max_lba + 1;
363 }
364 }
365
Damien Le Moald9dd7302019-11-11 11:39:22 +0900366 /* Parse REPORT ZONES header */
Damien Le Moald9dd7302019-11-11 11:39:22 +0900367 rec = buf + 64;
368 zone_blocks = get_unaligned_be64(&rec[8]);
369 if (!zone_blocks || !is_power_of_2(zone_blocks)) {
Hannes Reinecke89d94752016-10-18 15:40:34 +0900370 if (sdkp->first_scan)
371 sd_printk(KERN_NOTICE, sdkp,
372 "Devices with non power of 2 zone "
373 "size are not supported\n");
Damien Le Moald9dd7302019-11-11 11:39:22 +0900374 return -ENODEV;
375 }
376
377 if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
Hannes Reinecke89d94752016-10-18 15:40:34 +0900378 if (sdkp->first_scan)
379 sd_printk(KERN_NOTICE, sdkp,
380 "Zone size too large\n");
Damien Le Moald9dd7302019-11-11 11:39:22 +0900381 return -EFBIG;
Hannes Reinecke89d94752016-10-18 15:40:34 +0900382 }
383
Damien Le Moald9dd7302019-11-11 11:39:22 +0900384 *zblocks = zone_blocks;
Hannes Reinecke89d94752016-10-18 15:40:34 +0900385
Damien Le Moald9dd7302019-11-11 11:39:22 +0900386 return 0;
Hannes Reinecke89d94752016-10-18 15:40:34 +0900387}
388
Damien Le Moale98f42b2017-10-11 05:54:22 +0900389int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
Hannes Reinecke89d94752016-10-18 15:40:34 +0900390{
Damien Le Moalbf505452018-10-12 19:08:50 +0900391 struct gendisk *disk = sdkp->disk;
392 unsigned int nr_zones;
Damien Le Moal0cdc5852019-07-17 10:51:49 +0900393 u32 zone_blocks = 0;
Bart Van Asschef7053242017-04-24 16:51:14 +0900394 int ret;
Hannes Reinecke89d94752016-10-18 15:40:34 +0900395
396 if (!sd_is_zoned(sdkp))
397 /*
398 * Device managed or normal SCSI disk,
399 * no special handling required
400 */
401 return 0;
402
Damien Le Moal7f9d35d2018-10-12 19:08:40 +0900403 /* Check zoned block device characteristics (unconstrained reads) */
404 ret = sd_zbc_check_zoned_characteristics(sdkp, buf);
Hannes Reinecke89d94752016-10-18 15:40:34 +0900405 if (ret)
406 goto err;
407
Hannes Reinecke89d94752016-10-18 15:40:34 +0900408 /*
409 * Check zone size: only devices with a constant zone size (except
410 * an eventual last runt zone) that is a power of 2 are supported.
411 */
Damien Le Moald9dd7302019-11-11 11:39:22 +0900412 ret = sd_zbc_check_zones(sdkp, buf, &zone_blocks);
Damien Le Moal5f832a32018-10-12 19:08:42 +0900413 if (ret != 0)
Hannes Reinecke89d94752016-10-18 15:40:34 +0900414 goto err;
415
416 /* The drive satisfies the kernel restrictions: set it up */
Chaitanya Kulkarnid81e9d42019-08-01 10:26:37 -0700417 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, sdkp->disk->queue);
Damien Le Moalebddd2a2019-09-05 18:51:35 +0900418 blk_queue_required_elevator_features(sdkp->disk->queue,
419 ELEVATOR_F_ZBD_SEQ_WRITE);
Damien Le Moalbf505452018-10-12 19:08:50 +0900420 nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
421
422 /* READ16/WRITE16 is mandatory for ZBC disks */
423 sdkp->device->use_16_for_rw = 1;
424 sdkp->device->use_10_for_rw = 0;
425
426 /*
Damien Le Moal88fc41c2019-01-30 15:54:58 +0900427 * Revalidate the disk zone bitmaps once the block device capacity is
428 * set on the second revalidate execution during disk scan and if
429 * something changed when executing a normal revalidate.
Damien Le Moalbf505452018-10-12 19:08:50 +0900430 */
Damien Le Moal88fc41c2019-01-30 15:54:58 +0900431 if (sdkp->first_scan) {
432 sdkp->zone_blocks = zone_blocks;
433 sdkp->nr_zones = nr_zones;
Damien Le Moalbf505452018-10-12 19:08:50 +0900434 return 0;
Damien Le Moal88fc41c2019-01-30 15:54:58 +0900435 }
436
Damien Le Moalbf505452018-10-12 19:08:50 +0900437 if (sdkp->zone_blocks != zone_blocks ||
438 sdkp->nr_zones != nr_zones ||
439 disk->queue->nr_zones != nr_zones) {
440 ret = blk_revalidate_disk_zones(disk);
441 if (ret != 0)
442 goto err;
443 sdkp->zone_blocks = zone_blocks;
444 sdkp->nr_zones = nr_zones;
445 }
Hannes Reinecke89d94752016-10-18 15:40:34 +0900446
Hannes Reinecke89d94752016-10-18 15:40:34 +0900447 return 0;
448
449err:
450 sdkp->capacity = 0;
451
452 return ret;
453}
454
Hannes Reinecke89d94752016-10-18 15:40:34 +0900455void sd_zbc_print_zones(struct scsi_disk *sdkp)
456{
457 if (!sd_is_zoned(sdkp) || !sdkp->capacity)
458 return;
459
460 if (sdkp->capacity & (sdkp->zone_blocks - 1))
461 sd_printk(KERN_NOTICE, sdkp,
462 "%u zones of %u logical blocks + 1 runt zone\n",
463 sdkp->nr_zones - 1,
464 sdkp->zone_blocks);
465 else
466 sd_printk(KERN_NOTICE, sdkp,
467 "%u zones of %u logical blocks\n",
468 sdkp->nr_zones,
469 sdkp->zone_blocks);
470}