blob: ea4e086ba00eed8a6e16d28a98736577b384dd9a [file] [log] [blame]
Christoph Hellwig3dcf60b2019-04-30 14:42:43 -04001// SPDX-License-Identifier: GPL-2.0
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +09002/*
3 * Zoned block device handling
4 *
5 * Copyright (c) 2015, Hannes Reinecke
6 * Copyright (c) 2015, SUSE Linux GmbH
7 *
8 * Copyright (c) 2016, Damien Le Moal
9 * Copyright (c) 2016, Western Digital
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/rbtree.h>
15#include <linux/blkdev.h>
Damien Le Moalbf505452018-10-12 19:08:50 +090016#include <linux/blk-mq.h>
Damien Le Moal26202922019-07-01 14:09:18 +090017#include <linux/mm.h>
18#include <linux/vmalloc.h>
Damien Le Moalbd976e52019-07-01 14:09:16 +090019#include <linux/sched/mm.h>
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +090020
Damien Le Moala2d6b3a2018-10-12 19:08:47 +090021#include "blk.h"
22
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +090023static inline sector_t blk_zone_start(struct request_queue *q,
24 sector_t sector)
25{
Damien Le Moalf99e8642017-01-12 07:58:32 -070026 sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +090027
28 return sector & ~zone_mask;
29}
30
31/*
Christoph Hellwig6cc77e92017-12-21 15:43:38 +090032 * Return true if a request is a write requests that needs zone write locking.
33 */
34bool blk_req_needs_zone_write_lock(struct request *rq)
35{
36 if (!rq->q->seq_zones_wlock)
37 return false;
38
39 if (blk_rq_is_passthrough(rq))
40 return false;
41
42 switch (req_op(rq)) {
43 case REQ_OP_WRITE_ZEROES:
44 case REQ_OP_WRITE_SAME:
45 case REQ_OP_WRITE:
46 return blk_rq_zone_is_seq(rq);
47 default:
48 return false;
49 }
50}
51EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
52
53void __blk_req_zone_write_lock(struct request *rq)
54{
55 if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
56 rq->q->seq_zones_wlock)))
57 return;
58
59 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
60 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
61}
62EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);
63
64void __blk_req_zone_write_unlock(struct request *rq)
65{
66 rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
67 if (rq->q->seq_zones_wlock)
68 WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
69 rq->q->seq_zones_wlock));
70}
71EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
72
Damien Le Moala91e1382018-10-12 19:08:43 +090073static inline unsigned int __blkdev_nr_zones(struct request_queue *q,
74 sector_t nr_sectors)
75{
Damien Le Moal113ab722019-07-10 13:53:10 +090076 sector_t zone_sectors = blk_queue_zone_sectors(q);
Damien Le Moala91e1382018-10-12 19:08:43 +090077
78 return (nr_sectors + zone_sectors - 1) >> ilog2(zone_sectors);
79}
80
81/**
82 * blkdev_nr_zones - Get number of zones
83 * @bdev: Target block device
84 *
85 * Description:
86 * Return the total number of zones of a zoned block device.
87 * For a regular block device, the number of zones is always 0.
88 */
89unsigned int blkdev_nr_zones(struct block_device *bdev)
90{
91 struct request_queue *q = bdev_get_queue(bdev);
92
93 if (!blk_queue_is_zoned(q))
94 return 0;
95
96 return __blkdev_nr_zones(q, bdev->bd_part->nr_sects);
97}
98EXPORT_SYMBOL_GPL(blkdev_nr_zones);
99
Christoph Hellwig6cc77e92017-12-21 15:43:38 +0900100/*
Christoph Hellwige76239a2018-10-12 19:08:49 +0900101 * Check that a zone report belongs to this partition, and if yes, fix its start
102 * sector and write pointer and return true. Return false otherwise.
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900103 */
Christoph Hellwige76239a2018-10-12 19:08:49 +0900104static bool blkdev_report_zone(struct block_device *bdev, struct blk_zone *rep)
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900105{
106 sector_t offset = get_start_sect(bdev);
107
108 if (rep->start < offset)
109 return false;
110
111 rep->start -= offset;
112 if (rep->start + rep->len > bdev->bd_part->nr_sects)
113 return false;
114
115 if (rep->type == BLK_ZONE_TYPE_CONVENTIONAL)
116 rep->wp = rep->start + rep->len;
117 else
118 rep->wp -= offset;
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900119 return true;
120}
121
122/**
123 * blkdev_report_zones - Get zones information
124 * @bdev: Target block device
125 * @sector: Sector from which to report zones
126 * @zones: Array of zone structures where to return the zones information
127 * @nr_zones: Number of zone structures in the zone array
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900128 *
129 * Description:
130 * Get zone information starting from the zone containing @sector.
131 * The number of zone information reported may be less than the number
132 * requested by @nr_zones. The number of zones actually reported is
133 * returned in @nr_zones.
Damien Le Moalbd976e52019-07-01 14:09:16 +0900134 * The caller must use memalloc_noXX_save/restore() calls to control
135 * memory allocations done within this function (zone array and command
136 * buffer allocation by the device driver).
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900137 */
Christoph Hellwige76239a2018-10-12 19:08:49 +0900138int blkdev_report_zones(struct block_device *bdev, sector_t sector,
Damien Le Moalbd976e52019-07-01 14:09:16 +0900139 struct blk_zone *zones, unsigned int *nr_zones)
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900140{
141 struct request_queue *q = bdev_get_queue(bdev);
Damien Le Moalceeb3732019-11-11 11:39:24 +0900142 struct gendisk *disk = bdev->bd_disk;
Christoph Hellwige76239a2018-10-12 19:08:49 +0900143 unsigned int i, nrz;
Arnd Bergmann3c4da7582016-10-21 17:42:33 +0200144 int ret;
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900145
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900146 if (!blk_queue_is_zoned(q))
147 return -EOPNOTSUPP;
148
Christoph Hellwige76239a2018-10-12 19:08:49 +0900149 /*
150 * A block device that advertized itself as zoned must have a
151 * report_zones method. If it does not have one defined, the device
152 * driver has a bug. So warn about that.
153 */
Damien Le Moalceeb3732019-11-11 11:39:24 +0900154 if (WARN_ON_ONCE(!disk->fops->report_zones))
Christoph Hellwige76239a2018-10-12 19:08:49 +0900155 return -EOPNOTSUPP;
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900156
Christoph Hellwige76239a2018-10-12 19:08:49 +0900157 if (!*nr_zones || sector >= bdev->bd_part->nr_sects) {
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900158 *nr_zones = 0;
159 return 0;
160 }
161
Christoph Hellwige76239a2018-10-12 19:08:49 +0900162 nrz = min(*nr_zones,
163 __blkdev_nr_zones(q, bdev->bd_part->nr_sects - sector));
Damien Le Moalceeb3732019-11-11 11:39:24 +0900164 ret = disk->fops->report_zones(disk, get_start_sect(bdev) + sector,
165 zones, &nrz);
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900166 if (ret)
Christoph Hellwige76239a2018-10-12 19:08:49 +0900167 return ret;
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900168
Christoph Hellwige76239a2018-10-12 19:08:49 +0900169 for (i = 0; i < nrz; i++) {
170 if (!blkdev_report_zone(bdev, zones))
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900171 break;
Christoph Hellwige76239a2018-10-12 19:08:49 +0900172 zones++;
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900173 }
174
Christoph Hellwige76239a2018-10-12 19:08:49 +0900175 *nr_zones = i;
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900176
Christoph Hellwige76239a2018-10-12 19:08:49 +0900177 return 0;
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900178}
179EXPORT_SYMBOL_GPL(blkdev_report_zones);
180
Chaitanya Kulkarni6e33dbf2019-08-01 10:26:36 -0700181static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev,
Damien Le Moalc7a1d922019-10-27 23:05:43 +0900182 sector_t sector,
Chaitanya Kulkarni6e33dbf2019-08-01 10:26:36 -0700183 sector_t nr_sectors)
184{
185 if (!blk_queue_zone_resetall(bdev_get_queue(bdev)))
186 return false;
187
Damien Le Moalc7a1d922019-10-27 23:05:43 +0900188 if (sector || nr_sectors != part_nr_sects_read(bdev->bd_part))
Chaitanya Kulkarni6e33dbf2019-08-01 10:26:36 -0700189 return false;
190 /*
191 * REQ_OP_ZONE_RESET_ALL can be executed only if the block device is
192 * the entire disk, that is, if the blocks device start offset is 0 and
193 * its capacity is the same as the entire disk.
194 */
195 return get_start_sect(bdev) == 0 &&
196 part_nr_sects_read(bdev->bd_part) == get_capacity(bdev->bd_disk);
197}
198
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900199/**
Ajay Joshi6c1b1da2019-10-27 23:05:45 +0900200 * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900201 * @bdev: Target block device
Ajay Joshi6c1b1da2019-10-27 23:05:45 +0900202 * @op: Operation to be performed on the zones
203 * @sector: Start sector of the first zone to operate on
204 * @nr_sectors: Number of sectors, should be at least the length of one zone and
205 * must be zone size aligned.
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900206 * @gfp_mask: Memory allocation flags (for bio_alloc)
207 *
208 * Description:
Ajay Joshi6c1b1da2019-10-27 23:05:45 +0900209 * Perform the specified operation on the range of zones specified by
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900210 * @sector..@sector+@nr_sectors. Specifying the entire disk sector range
211 * is valid, but the specified range should not contain conventional zones.
Ajay Joshi6c1b1da2019-10-27 23:05:45 +0900212 * The operation to execute on each zone can be a zone reset, open, close
213 * or finish request.
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900214 */
Ajay Joshi6c1b1da2019-10-27 23:05:45 +0900215int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
216 sector_t sector, sector_t nr_sectors,
217 gfp_t gfp_mask)
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900218{
219 struct request_queue *q = bdev_get_queue(bdev);
Ajay Joshi6c1b1da2019-10-27 23:05:45 +0900220 sector_t zone_sectors = blk_queue_zone_sectors(q);
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900221 sector_t end_sector = sector + nr_sectors;
Damien Le Moala2d6b3a2018-10-12 19:08:47 +0900222 struct bio *bio = NULL;
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900223 int ret;
224
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900225 if (!blk_queue_is_zoned(q))
226 return -EOPNOTSUPP;
227
Damien Le Moala2d6b3a2018-10-12 19:08:47 +0900228 if (bdev_read_only(bdev))
229 return -EPERM;
230
Ajay Joshi6c1b1da2019-10-27 23:05:45 +0900231 if (!op_is_zone_mgmt(op))
232 return -EOPNOTSUPP;
233
Damien Le Moala2d6b3a2018-10-12 19:08:47 +0900234 if (!nr_sectors || end_sector > bdev->bd_part->nr_sects)
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900235 /* Out of range */
236 return -EINVAL;
237
238 /* Check alignment (handle eventual smaller last zone) */
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900239 if (sector & (zone_sectors - 1))
240 return -EINVAL;
241
242 if ((nr_sectors & (zone_sectors - 1)) &&
243 end_sector != bdev->bd_part->nr_sects)
244 return -EINVAL;
245
246 while (sector < end_sector) {
Damien Le Moala2d6b3a2018-10-12 19:08:47 +0900247 bio = blk_next_bio(bio, 0, gfp_mask);
Christoph Hellwig74d46992017-08-23 19:10:32 +0200248 bio_set_dev(bio, bdev);
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900249
Damien Le Moalc7a1d922019-10-27 23:05:43 +0900250 /*
251 * Special case for the zone reset operation that reset all
252 * zones, this is useful for applications like mkfs.
253 */
Ajay Joshi6c1b1da2019-10-27 23:05:45 +0900254 if (op == REQ_OP_ZONE_RESET &&
255 blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) {
Damien Le Moalc7a1d922019-10-27 23:05:43 +0900256 bio->bi_opf = REQ_OP_ZONE_RESET_ALL;
257 break;
258 }
259
Ajay Joshi6c1b1da2019-10-27 23:05:45 +0900260 bio->bi_opf = op;
Damien Le Moalc7a1d922019-10-27 23:05:43 +0900261 bio->bi_iter.bi_sector = sector;
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900262 sector += zone_sectors;
263
264 /* This may take a while, so be nice to others */
265 cond_resched();
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900266 }
267
Damien Le Moala2d6b3a2018-10-12 19:08:47 +0900268 ret = submit_bio_wait(bio);
269 bio_put(bio);
270
Damien Le Moala2d6b3a2018-10-12 19:08:47 +0900271 return ret;
Hannes Reinecke6a0cb1b2016-10-18 15:40:33 +0900272}
Ajay Joshi6c1b1da2019-10-27 23:05:45 +0900273EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
Shaun Tancheff3ed05a92016-10-18 15:40:35 +0900274
Bart Van Assche56c4bdd2018-03-08 15:28:50 -0800275/*
Shaun Tancheff3ed05a92016-10-18 15:40:35 +0900276 * BLKREPORTZONE ioctl processing.
277 * Called from blkdev_ioctl.
278 */
279int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
280 unsigned int cmd, unsigned long arg)
281{
282 void __user *argp = (void __user *)arg;
283 struct request_queue *q;
284 struct blk_zone_report rep;
285 struct blk_zone *zones;
286 int ret;
287
288 if (!argp)
289 return -EINVAL;
290
291 q = bdev_get_queue(bdev);
292 if (!q)
293 return -ENXIO;
294
295 if (!blk_queue_is_zoned(q))
296 return -ENOTTY;
297
298 if (!capable(CAP_SYS_ADMIN))
299 return -EACCES;
300
301 if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
302 return -EFAULT;
303
304 if (!rep.nr_zones)
305 return -EINVAL;
306
Damien Le Moal2e85fba2018-10-12 19:08:44 +0900307 rep.nr_zones = min(blkdev_nr_zones(bdev), rep.nr_zones);
Bart Van Assche327ea4a2018-05-22 08:27:22 -0700308
Kees Cook344476e2018-06-12 14:04:32 -0700309 zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone),
310 GFP_KERNEL | __GFP_ZERO);
Shaun Tancheff3ed05a92016-10-18 15:40:35 +0900311 if (!zones)
312 return -ENOMEM;
313
Damien Le Moalbd976e52019-07-01 14:09:16 +0900314 ret = blkdev_report_zones(bdev, rep.sector, zones, &rep.nr_zones);
Shaun Tancheff3ed05a92016-10-18 15:40:35 +0900315 if (ret)
316 goto out;
317
318 if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) {
319 ret = -EFAULT;
320 goto out;
321 }
322
323 if (rep.nr_zones) {
324 if (copy_to_user(argp + sizeof(struct blk_zone_report), zones,
325 sizeof(struct blk_zone) * rep.nr_zones))
326 ret = -EFAULT;
327 }
328
329 out:
Bart Van Assche327ea4a2018-05-22 08:27:22 -0700330 kvfree(zones);
Shaun Tancheff3ed05a92016-10-18 15:40:35 +0900331
332 return ret;
333}
334
Bart Van Assche56c4bdd2018-03-08 15:28:50 -0800335/*
Ajay Joshie876df12019-10-27 23:05:46 +0900336 * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
Shaun Tancheff3ed05a92016-10-18 15:40:35 +0900337 * Called from blkdev_ioctl.
338 */
Ajay Joshie876df12019-10-27 23:05:46 +0900339int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
340 unsigned int cmd, unsigned long arg)
Shaun Tancheff3ed05a92016-10-18 15:40:35 +0900341{
342 void __user *argp = (void __user *)arg;
343 struct request_queue *q;
344 struct blk_zone_range zrange;
Ajay Joshie876df12019-10-27 23:05:46 +0900345 enum req_opf op;
Shaun Tancheff3ed05a92016-10-18 15:40:35 +0900346
347 if (!argp)
348 return -EINVAL;
349
350 q = bdev_get_queue(bdev);
351 if (!q)
352 return -ENXIO;
353
354 if (!blk_queue_is_zoned(q))
355 return -ENOTTY;
356
357 if (!capable(CAP_SYS_ADMIN))
358 return -EACCES;
359
360 if (!(mode & FMODE_WRITE))
361 return -EBADF;
362
363 if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
364 return -EFAULT;
365
Ajay Joshie876df12019-10-27 23:05:46 +0900366 switch (cmd) {
367 case BLKRESETZONE:
368 op = REQ_OP_ZONE_RESET;
369 break;
370 case BLKOPENZONE:
371 op = REQ_OP_ZONE_OPEN;
372 break;
373 case BLKCLOSEZONE:
374 op = REQ_OP_ZONE_CLOSE;
375 break;
376 case BLKFINISHZONE:
377 op = REQ_OP_ZONE_FINISH;
378 break;
379 default:
380 return -ENOTTY;
381 }
382
383 return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
384 GFP_KERNEL);
Shaun Tancheff3ed05a92016-10-18 15:40:35 +0900385}
Damien Le Moalbf505452018-10-12 19:08:50 +0900386
387static inline unsigned long *blk_alloc_zone_bitmap(int node,
388 unsigned int nr_zones)
389{
390 return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
391 GFP_NOIO, node);
392}
393
394/*
395 * Allocate an array of struct blk_zone to get nr_zones zone information.
396 * The allocated array may be smaller than nr_zones.
397 */
Damien Le Moal26202922019-07-01 14:09:18 +0900398static struct blk_zone *blk_alloc_zones(unsigned int *nr_zones)
Damien Le Moalbf505452018-10-12 19:08:50 +0900399{
Damien Le Moal26202922019-07-01 14:09:18 +0900400 struct blk_zone *zones;
401 size_t nrz = min(*nr_zones, BLK_ZONED_REPORT_MAX_ZONES);
Damien Le Moalbf505452018-10-12 19:08:50 +0900402
Damien Le Moal26202922019-07-01 14:09:18 +0900403 /*
404 * GFP_KERNEL here is meaningless as the caller task context has
405 * the PF_MEMALLOC_NOIO flag set in blk_revalidate_disk_zones()
406 * with memalloc_noio_save().
407 */
408 zones = kvcalloc(nrz, sizeof(struct blk_zone), GFP_KERNEL);
409 if (!zones) {
410 *nr_zones = 0;
411 return NULL;
Damien Le Moalbf505452018-10-12 19:08:50 +0900412 }
413
Damien Le Moal26202922019-07-01 14:09:18 +0900414 *nr_zones = nrz;
415
416 return zones;
Damien Le Moalbf505452018-10-12 19:08:50 +0900417}
418
419void blk_queue_free_zone_bitmaps(struct request_queue *q)
420{
421 kfree(q->seq_zones_bitmap);
422 q->seq_zones_bitmap = NULL;
423 kfree(q->seq_zones_wlock);
424 q->seq_zones_wlock = NULL;
425}
426
Damien Le Moald9dd7302019-11-11 11:39:22 +0900427/*
428 * Helper function to check the validity of zones of a zoned block device.
429 */
430static bool blk_zone_valid(struct gendisk *disk, struct blk_zone *zone,
431 sector_t *sector)
432{
433 struct request_queue *q = disk->queue;
434 sector_t zone_sectors = blk_queue_zone_sectors(q);
435 sector_t capacity = get_capacity(disk);
436
437 /*
438 * All zones must have the same size, with the exception on an eventual
439 * smaller last zone.
440 */
441 if (zone->start + zone_sectors < capacity &&
442 zone->len != zone_sectors) {
443 pr_warn("%s: Invalid zoned device with non constant zone size\n",
444 disk->disk_name);
445 return false;
446 }
447
448 if (zone->start + zone->len >= capacity &&
449 zone->len > zone_sectors) {
450 pr_warn("%s: Invalid zoned device with larger last zone size\n",
451 disk->disk_name);
452 return false;
453 }
454
455 /* Check for holes in the zone report */
456 if (zone->start != *sector) {
457 pr_warn("%s: Zone gap at sectors %llu..%llu\n",
458 disk->disk_name, *sector, zone->start);
459 return false;
460 }
461
462 /* Check zone type */
463 switch (zone->type) {
464 case BLK_ZONE_TYPE_CONVENTIONAL:
465 case BLK_ZONE_TYPE_SEQWRITE_REQ:
466 case BLK_ZONE_TYPE_SEQWRITE_PREF:
467 break;
468 default:
469 pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
470 disk->disk_name, (int)zone->type, zone->start);
471 return false;
472 }
473
474 *sector += zone->len;
475
476 return true;
477}
478
Damien Le Moalbf505452018-10-12 19:08:50 +0900479/**
480 * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
481 * @disk: Target disk
482 *
483 * Helper function for low-level device drivers to (re) allocate and initialize
484 * a disk request queue zone bitmaps. This functions should normally be called
485 * within the disk ->revalidate method. For BIO based queues, no zone bitmap
486 * is allocated.
487 */
488int blk_revalidate_disk_zones(struct gendisk *disk)
489{
490 struct request_queue *q = disk->queue;
491 unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk));
492 unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
493 unsigned int i, rep_nr_zones = 0, z = 0, nrz;
494 struct blk_zone *zones = NULL;
Damien Le Moalbd976e52019-07-01 14:09:16 +0900495 unsigned int noio_flag;
Damien Le Moalbf505452018-10-12 19:08:50 +0900496 sector_t sector = 0;
497 int ret = 0;
498
Christoph Hellwigc98c3d092019-11-11 11:39:23 +0900499 if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
500 return -EIO;
501
Damien Le Moalbf505452018-10-12 19:08:50 +0900502 /*
503 * BIO based queues do not use a scheduler so only q->nr_zones
504 * needs to be updated so that the sysfs exposed value is correct.
505 */
Jens Axboe344e9ff2018-11-15 12:22:51 -0700506 if (!queue_is_mq(q)) {
Damien Le Moalbf505452018-10-12 19:08:50 +0900507 q->nr_zones = nr_zones;
508 return 0;
509 }
510
Damien Le Moalbd976e52019-07-01 14:09:16 +0900511 /*
512 * Ensure that all memory allocations in this context are done as
513 * if GFP_NOIO was specified.
514 */
515 noio_flag = memalloc_noio_save();
516
Christoph Hellwigc98c3d092019-11-11 11:39:23 +0900517 if (!nr_zones)
Damien Le Moalbf505452018-10-12 19:08:50 +0900518 goto update;
Damien Le Moalbf505452018-10-12 19:08:50 +0900519
520 /* Allocate bitmaps */
521 ret = -ENOMEM;
522 seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones);
523 if (!seq_zones_wlock)
524 goto out;
525 seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones);
526 if (!seq_zones_bitmap)
527 goto out;
528
Damien Le Moald9dd7302019-11-11 11:39:22 +0900529 /*
530 * Get zone information to check the zones and initialize
531 * seq_zones_bitmap.
532 */
Damien Le Moalbf505452018-10-12 19:08:50 +0900533 rep_nr_zones = nr_zones;
Damien Le Moal26202922019-07-01 14:09:18 +0900534 zones = blk_alloc_zones(&rep_nr_zones);
Damien Le Moalbf505452018-10-12 19:08:50 +0900535 if (!zones)
536 goto out;
537
538 while (z < nr_zones) {
539 nrz = min(nr_zones - z, rep_nr_zones);
Damien Le Moalceeb3732019-11-11 11:39:24 +0900540 ret = disk->fops->report_zones(disk, sector, zones, &nrz);
Damien Le Moalbf505452018-10-12 19:08:50 +0900541 if (ret)
542 goto out;
543 if (!nrz)
544 break;
545 for (i = 0; i < nrz; i++) {
Damien Le Moald9dd7302019-11-11 11:39:22 +0900546 if (!blk_zone_valid(disk, &zones[i], &sector)) {
547 ret = -ENODEV;
548 goto out;
549 }
Damien Le Moalbf505452018-10-12 19:08:50 +0900550 if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL)
551 set_bit(z, seq_zones_bitmap);
552 z++;
553 }
Damien Le Moalbf505452018-10-12 19:08:50 +0900554 }
555
556 if (WARN_ON(z != nr_zones)) {
557 ret = -EIO;
558 goto out;
559 }
560
561update:
562 /*
563 * Install the new bitmaps, making sure the queue is stopped and
564 * all I/Os are completed (i.e. a scheduler is not referencing the
565 * bitmaps).
566 */
567 blk_mq_freeze_queue(q);
568 q->nr_zones = nr_zones;
569 swap(q->seq_zones_wlock, seq_zones_wlock);
570 swap(q->seq_zones_bitmap, seq_zones_bitmap);
571 blk_mq_unfreeze_queue(q);
572
573out:
Damien Le Moalbd976e52019-07-01 14:09:16 +0900574 memalloc_noio_restore(noio_flag);
575
Damien Le Moal26202922019-07-01 14:09:18 +0900576 kvfree(zones);
Damien Le Moalbf505452018-10-12 19:08:50 +0900577 kfree(seq_zones_wlock);
578 kfree(seq_zones_bitmap);
579
580 if (ret) {
581 pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
582 blk_mq_freeze_queue(q);
583 blk_queue_free_zone_bitmaps(q);
584 blk_mq_unfreeze_queue(q);
585 }
586
587 return ret;
588}
589EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
590