blob: 7e88df64d197b149a2a92bd1bc42eb35375c51bd [file] [log] [blame]
Dmitry Fomichevbae9a0a2019-08-02 15:02:50 -07001// SPDX-License-Identifier: GPL-2.0-only
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09002/*
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-zoned.h"
9
10#include <linux/module.h>
11
12#define DM_MSG_PREFIX "zoned"
13
14#define DMZ_MIN_BIOS 8192
15
16/*
17 * Zone BIO context.
18 */
19struct dmz_bioctx {
Hannes Reinecke52d67752020-05-11 10:24:25 +020020 struct dmz_dev *dev;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090021 struct dm_zone *zone;
22 struct bio *bio;
John Pittman092b56482018-08-23 13:35:57 -040023 refcount_t ref;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090024};
25
26/*
27 * Chunk work descriptor.
28 */
29struct dm_chunk_work {
30 struct work_struct work;
John Pittman092b56482018-08-23 13:35:57 -040031 refcount_t refcount;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090032 struct dmz_target *target;
33 unsigned int chunk;
34 struct bio_list bio_list;
35};
36
37/*
38 * Target descriptor.
39 */
40struct dmz_target {
Hannes Reinecke4dba1282020-06-02 13:09:52 +020041 struct dm_dev **ddev;
Hannes Reineckef97809a2020-06-02 13:09:50 +020042 unsigned int nr_ddevs;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090043
Hannes Reinecke4dba1282020-06-02 13:09:52 +020044 unsigned int flags;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090045
46 /* Zoned block device information */
47 struct dmz_dev *dev;
48
49 /* For metadata handling */
50 struct dmz_metadata *metadata;
51
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090052 /* For chunk work */
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090053 struct radix_tree_root chunk_rxtree;
54 struct workqueue_struct *chunk_wq;
Mike Snitzer72d711c2018-05-22 18:26:20 -040055 struct mutex chunk_lock;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090056
57 /* For cloned BIOs to zones */
Kent Overstreet6f1c8192018-05-20 18:25:53 -040058 struct bio_set bio_set;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090059
60 /* For flush */
61 spinlock_t flush_lock;
62 struct bio_list flush_list;
63 struct delayed_work flush_work;
64 struct workqueue_struct *flush_wq;
65};
66
67/*
68 * Flush intervals (seconds).
69 */
70#define DMZ_FLUSH_PERIOD (10 * HZ)
71
72/*
73 * Target BIO completion.
74 */
75static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
76{
Hannes Reinecke52d67752020-05-11 10:24:25 +020077 struct dmz_bioctx *bioctx =
78 dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090079
Damien Le Moald57f9da2018-11-30 15:31:48 +090080 if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
81 bio->bi_status = status;
Hannes Reineckebd5c4032020-05-11 10:24:30 +020082 if (bioctx->dev && bio->bi_status != BLK_STS_OK)
Hannes Reinecke52d67752020-05-11 10:24:25 +020083 bioctx->dev->flags |= DMZ_CHECK_BDEV;
Damien Le Moald57f9da2018-11-30 15:31:48 +090084
85 if (refcount_dec_and_test(&bioctx->ref)) {
86 struct dm_zone *zone = bioctx->zone;
87
88 if (zone) {
89 if (bio->bi_status != BLK_STS_OK &&
90 bio_op(bio) == REQ_OP_WRITE &&
91 dmz_is_seq(zone))
92 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
93 dmz_deactivate_zone(zone);
94 }
95 bio_endio(bio);
96 }
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090097}
98
99/*
Damien Le Moald57f9da2018-11-30 15:31:48 +0900100 * Completion callback for an internally cloned target BIO. This terminates the
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900101 * target BIO when there are no more references to its context.
102 */
Damien Le Moald57f9da2018-11-30 15:31:48 +0900103static void dmz_clone_endio(struct bio *clone)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900104{
Damien Le Moald57f9da2018-11-30 15:31:48 +0900105 struct dmz_bioctx *bioctx = clone->bi_private;
106 blk_status_t status = clone->bi_status;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900107
Damien Le Moald57f9da2018-11-30 15:31:48 +0900108 bio_put(clone);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900109 dmz_bio_endio(bioctx->bio, status);
110}
111
112/*
Damien Le Moald57f9da2018-11-30 15:31:48 +0900113 * Issue a clone of a target BIO. The clone may only partially process the
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900114 * original target BIO.
115 */
Damien Le Moald57f9da2018-11-30 15:31:48 +0900116static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
117 struct bio *bio, sector_t chunk_block,
118 unsigned int nr_blocks)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900119{
Hannes Reinecke52d67752020-05-11 10:24:25 +0200120 struct dmz_bioctx *bioctx =
121 dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
Hannes Reinecke8f222722020-06-02 13:09:48 +0200122 struct dmz_dev *dev = zone->dev;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900123 struct bio *clone;
124
Hannes Reinecke52d67752020-05-11 10:24:25 +0200125 if (dev->flags & DMZ_BDEV_DYING)
126 return -EIO;
127
Kent Overstreet6f1c8192018-05-20 18:25:53 -0400128 clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900129 if (!clone)
130 return -ENOMEM;
131
Hannes Reinecke52d67752020-05-11 10:24:25 +0200132 bio_set_dev(clone, dev->bdev);
133 bioctx->dev = dev;
Damien Le Moald57f9da2018-11-30 15:31:48 +0900134 clone->bi_iter.bi_sector =
135 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900136 clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
Damien Le Moald57f9da2018-11-30 15:31:48 +0900137 clone->bi_end_io = dmz_clone_endio;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900138 clone->bi_private = bioctx;
139
140 bio_advance(bio, clone->bi_iter.bi_size);
141
John Pittman092b56482018-08-23 13:35:57 -0400142 refcount_inc(&bioctx->ref);
Christoph Hellwiged00aab2020-07-01 10:59:44 +0200143 submit_bio_noacct(clone);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900144
Damien Le Moald57f9da2018-11-30 15:31:48 +0900145 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
146 zone->wp_block += nr_blocks;
147
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900148 return 0;
149}
150
151/*
152 * Zero out pages of discarded blocks accessed by a read BIO.
153 */
154static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
155 sector_t chunk_block, unsigned int nr_blocks)
156{
157 unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT;
158
159 /* Clear nr_blocks */
160 swap(bio->bi_iter.bi_size, size);
161 zero_fill_bio(bio);
162 swap(bio->bi_iter.bi_size, size);
163
164 bio_advance(bio, size);
165}
166
167/*
168 * Process a read BIO.
169 */
170static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
171 struct bio *bio)
172{
Hannes Reinecke36820562020-05-11 10:24:21 +0200173 struct dmz_metadata *zmd = dmz->metadata;
174 sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900175 unsigned int nr_blocks = dmz_bio_blocks(bio);
176 sector_t end_block = chunk_block + nr_blocks;
177 struct dm_zone *rzone, *bzone;
178 int ret;
179
180 /* Read into unmapped chunks need only zeroing the BIO buffer */
181 if (!zone) {
182 zero_fill_bio(bio);
183 return 0;
184 }
185
Hannes Reinecke2234e732020-05-11 10:24:22 +0200186 DMDEBUG("(%s): READ chunk %llu -> %s zone %u, block %llu, %u blocks",
187 dmz_metadata_label(zmd),
188 (unsigned long long)dmz_bio_chunk(zmd, bio),
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200189 (dmz_is_rnd(zone) ? "RND" :
190 (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
Hannes Reinecke2234e732020-05-11 10:24:22 +0200191 zone->id,
192 (unsigned long long)chunk_block, nr_blocks);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900193
194 /* Check block validity to determine the read location */
195 bzone = zone->bzone;
196 while (chunk_block < end_block) {
197 nr_blocks = 0;
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200198 if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
199 chunk_block < zone->wp_block) {
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900200 /* Test block validity in the data zone */
Hannes Reinecke36820562020-05-11 10:24:21 +0200201 ret = dmz_block_valid(zmd, zone, chunk_block);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900202 if (ret < 0)
203 return ret;
204 if (ret > 0) {
205 /* Read data zone blocks */
206 nr_blocks = ret;
207 rzone = zone;
208 }
209 }
210
211 /*
212 * No valid blocks found in the data zone.
213 * Check the buffer zone, if there is one.
214 */
215 if (!nr_blocks && bzone) {
Hannes Reinecke36820562020-05-11 10:24:21 +0200216 ret = dmz_block_valid(zmd, bzone, chunk_block);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900217 if (ret < 0)
218 return ret;
219 if (ret > 0) {
220 /* Read buffer zone blocks */
221 nr_blocks = ret;
222 rzone = bzone;
223 }
224 }
225
226 if (nr_blocks) {
227 /* Valid blocks found: read them */
Hannes Reinecke52d67752020-05-11 10:24:25 +0200228 nr_blocks = min_t(unsigned int, nr_blocks,
229 end_block - chunk_block);
230 ret = dmz_submit_bio(dmz, rzone, bio,
231 chunk_block, nr_blocks);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900232 if (ret)
233 return ret;
234 chunk_block += nr_blocks;
235 } else {
236 /* No valid block: zeroout the current BIO block */
237 dmz_handle_read_zero(dmz, bio, chunk_block, 1);
238 chunk_block++;
239 }
240 }
241
242 return 0;
243}
244
245/*
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900246 * Write blocks directly in a data zone, at the write pointer.
247 * If a buffer zone is assigned, invalidate the blocks written
248 * in place.
249 */
250static int dmz_handle_direct_write(struct dmz_target *dmz,
251 struct dm_zone *zone, struct bio *bio,
252 sector_t chunk_block,
253 unsigned int nr_blocks)
254{
255 struct dmz_metadata *zmd = dmz->metadata;
256 struct dm_zone *bzone = zone->bzone;
257 int ret;
258
259 if (dmz_is_readonly(zone))
260 return -EROFS;
261
262 /* Submit write */
Damien Le Moald57f9da2018-11-30 15:31:48 +0900263 ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
264 if (ret)
265 return ret;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900266
267 /*
268 * Validate the blocks in the data zone and invalidate
269 * in the buffer zone, if there is one.
270 */
271 ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks);
272 if (ret == 0 && bzone)
273 ret = dmz_invalidate_blocks(zmd, bzone, chunk_block, nr_blocks);
274
275 return ret;
276}
277
278/*
279 * Write blocks in the buffer zone of @zone.
280 * If no buffer zone is assigned yet, get one.
281 * Called with @zone write locked.
282 */
283static int dmz_handle_buffered_write(struct dmz_target *dmz,
284 struct dm_zone *zone, struct bio *bio,
285 sector_t chunk_block,
286 unsigned int nr_blocks)
287{
288 struct dmz_metadata *zmd = dmz->metadata;
289 struct dm_zone *bzone;
290 int ret;
291
292 /* Get the buffer zone. One will be allocated if needed */
293 bzone = dmz_get_chunk_buffer(zmd, zone);
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700294 if (IS_ERR(bzone))
295 return PTR_ERR(bzone);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900296
297 if (dmz_is_readonly(bzone))
298 return -EROFS;
299
300 /* Submit write */
Damien Le Moald57f9da2018-11-30 15:31:48 +0900301 ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
302 if (ret)
303 return ret;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900304
305 /*
306 * Validate the blocks in the buffer zone
307 * and invalidate in the data zone.
308 */
309 ret = dmz_validate_blocks(zmd, bzone, chunk_block, nr_blocks);
310 if (ret == 0 && chunk_block < zone->wp_block)
311 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
312
313 return ret;
314}
315
316/*
317 * Process a write BIO.
318 */
319static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
320 struct bio *bio)
321{
Hannes Reinecke36820562020-05-11 10:24:21 +0200322 struct dmz_metadata *zmd = dmz->metadata;
323 sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900324 unsigned int nr_blocks = dmz_bio_blocks(bio);
325
326 if (!zone)
327 return -ENOSPC;
328
Hannes Reinecke2234e732020-05-11 10:24:22 +0200329 DMDEBUG("(%s): WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
330 dmz_metadata_label(zmd),
331 (unsigned long long)dmz_bio_chunk(zmd, bio),
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200332 (dmz_is_rnd(zone) ? "RND" :
333 (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
Hannes Reinecke2234e732020-05-11 10:24:22 +0200334 zone->id,
335 (unsigned long long)chunk_block, nr_blocks);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900336
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200337 if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
338 chunk_block == zone->wp_block) {
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900339 /*
340 * zone is a random zone or it is a sequential zone
341 * and the BIO is aligned to the zone write pointer:
342 * direct write the zone.
343 */
Hannes Reinecke52d67752020-05-11 10:24:25 +0200344 return dmz_handle_direct_write(dmz, zone, bio,
345 chunk_block, nr_blocks);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900346 }
347
348 /*
349 * This is an unaligned write in a sequential zone:
350 * use buffered write.
351 */
352 return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
353}
354
355/*
356 * Process a discard BIO.
357 */
358static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
359 struct bio *bio)
360{
361 struct dmz_metadata *zmd = dmz->metadata;
362 sector_t block = dmz_bio_block(bio);
363 unsigned int nr_blocks = dmz_bio_blocks(bio);
Hannes Reinecke36820562020-05-11 10:24:21 +0200364 sector_t chunk_block = dmz_chunk_block(zmd, block);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900365 int ret = 0;
366
367 /* For unmapped chunks, there is nothing to do */
368 if (!zone)
369 return 0;
370
371 if (dmz_is_readonly(zone))
372 return -EROFS;
373
Hannes Reinecke2234e732020-05-11 10:24:22 +0200374 DMDEBUG("(%s): DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
375 dmz_metadata_label(dmz->metadata),
376 (unsigned long long)dmz_bio_chunk(zmd, bio),
377 zone->id,
378 (unsigned long long)chunk_block, nr_blocks);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900379
380 /*
381 * Invalidate blocks in the data zone and its
382 * buffer zone if one is mapped.
383 */
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200384 if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
385 chunk_block < zone->wp_block)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900386 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
387 if (ret == 0 && zone->bzone)
388 ret = dmz_invalidate_blocks(zmd, zone->bzone,
389 chunk_block, nr_blocks);
390 return ret;
391}
392
393/*
394 * Process a BIO.
395 */
396static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
397 struct bio *bio)
398{
Hannes Reinecke52d67752020-05-11 10:24:25 +0200399 struct dmz_bioctx *bioctx =
400 dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900401 struct dmz_metadata *zmd = dmz->metadata;
402 struct dm_zone *zone;
Damien Le Moal174364f2020-07-08 09:20:22 +0900403 int ret;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900404
405 dmz_lock_metadata(zmd);
406
407 /*
408 * Get the data zone mapping the chunk. There may be no
409 * mapping for read and discard. If a mapping is obtained,
410 + the zone returned will be set to active state.
411 */
Hannes Reinecke36820562020-05-11 10:24:21 +0200412 zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(zmd, bio),
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900413 bio_op(bio));
414 if (IS_ERR(zone)) {
415 ret = PTR_ERR(zone);
416 goto out;
417 }
418
419 /* Process the BIO */
420 if (zone) {
421 dmz_activate_zone(zone);
422 bioctx->zone = zone;
Hannes Reineckef97809a2020-06-02 13:09:50 +0200423 dmz_reclaim_bio_acc(zone->dev->reclaim);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900424 }
425
426 switch (bio_op(bio)) {
427 case REQ_OP_READ:
428 ret = dmz_handle_read(dmz, zone, bio);
429 break;
430 case REQ_OP_WRITE:
431 ret = dmz_handle_write(dmz, zone, bio);
432 break;
433 case REQ_OP_DISCARD:
434 case REQ_OP_WRITE_ZEROES:
435 ret = dmz_handle_discard(dmz, zone, bio);
436 break;
437 default:
Hannes Reinecke2234e732020-05-11 10:24:22 +0200438 DMERR("(%s): Unsupported BIO operation 0x%x",
439 dmz_metadata_label(dmz->metadata), bio_op(bio));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900440 ret = -EIO;
441 }
442
443 /*
444 * Release the chunk mapping. This will check that the mapping
445 * is still valid, that is, that the zone used still has valid blocks.
446 */
447 if (zone)
448 dmz_put_chunk_mapping(zmd, zone);
449out:
450 dmz_bio_endio(bio, errno_to_blk_status(ret));
451
452 dmz_unlock_metadata(zmd);
453}
454
455/*
456 * Increment a chunk reference counter.
457 */
458static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
459{
John Pittman092b56482018-08-23 13:35:57 -0400460 refcount_inc(&cw->refcount);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900461}
462
463/*
464 * Decrement a chunk work reference count and
465 * free it if it becomes 0.
466 */
467static void dmz_put_chunk_work(struct dm_chunk_work *cw)
468{
John Pittman092b56482018-08-23 13:35:57 -0400469 if (refcount_dec_and_test(&cw->refcount)) {
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900470 WARN_ON(!bio_list_empty(&cw->bio_list));
471 radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
472 kfree(cw);
473 }
474}
475
476/*
477 * Chunk BIO work function.
478 */
479static void dmz_chunk_work(struct work_struct *work)
480{
481 struct dm_chunk_work *cw = container_of(work, struct dm_chunk_work, work);
482 struct dmz_target *dmz = cw->target;
483 struct bio *bio;
484
485 mutex_lock(&dmz->chunk_lock);
486
487 /* Process the chunk BIOs */
488 while ((bio = bio_list_pop(&cw->bio_list))) {
489 mutex_unlock(&dmz->chunk_lock);
490 dmz_handle_bio(dmz, cw, bio);
491 mutex_lock(&dmz->chunk_lock);
492 dmz_put_chunk_work(cw);
493 }
494
495 /* Queueing the work incremented the work refcount */
496 dmz_put_chunk_work(cw);
497
498 mutex_unlock(&dmz->chunk_lock);
499}
500
501/*
502 * Flush work.
503 */
504static void dmz_flush_work(struct work_struct *work)
505{
506 struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work);
507 struct bio *bio;
508 int ret;
509
510 /* Flush dirty metadata blocks */
511 ret = dmz_flush_metadata(dmz->metadata);
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700512 if (ret)
Hannes Reinecke49de3b72020-05-14 08:09:29 +0200513 DMDEBUG("(%s): Metadata flush failed, rc=%d",
Hannes Reinecke2234e732020-05-11 10:24:22 +0200514 dmz_metadata_label(dmz->metadata), ret);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900515
516 /* Process queued flush requests */
517 while (1) {
518 spin_lock(&dmz->flush_lock);
519 bio = bio_list_pop(&dmz->flush_list);
520 spin_unlock(&dmz->flush_lock);
521
522 if (!bio)
523 break;
524
525 dmz_bio_endio(bio, errno_to_blk_status(ret));
526 }
527
528 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
529}
530
531/*
532 * Get a chunk work and start it to process a new BIO.
533 * If the BIO chunk has no work yet, create one.
534 */
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700535static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900536{
Hannes Reinecke36820562020-05-11 10:24:21 +0200537 unsigned int chunk = dmz_bio_chunk(dmz->metadata, bio);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900538 struct dm_chunk_work *cw;
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700539 int ret = 0;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900540
541 mutex_lock(&dmz->chunk_lock);
542
543 /* Get the BIO chunk work. If one is not active yet, create one */
544 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
Shin'ichiro Kawasakiee63634b2020-02-27 09:18:52 +0900545 if (cw) {
546 dmz_get_chunk_work(cw);
547 } else {
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900548 /* Create a new chunk work */
Damien Le Moal4218a952017-07-24 16:44:37 +0900549 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700550 if (unlikely(!cw)) {
551 ret = -ENOMEM;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900552 goto out;
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700553 }
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900554
555 INIT_WORK(&cw->work, dmz_chunk_work);
Shin'ichiro Kawasakiee63634b2020-02-27 09:18:52 +0900556 refcount_set(&cw->refcount, 1);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900557 cw->target = dmz;
558 cw->chunk = chunk;
559 bio_list_init(&cw->bio_list);
560
561 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
562 if (unlikely(ret)) {
563 kfree(cw);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900564 goto out;
565 }
566 }
567
568 bio_list_add(&cw->bio_list, bio);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900569
570 if (queue_work(dmz->chunk_wq, &cw->work))
571 dmz_get_chunk_work(cw);
572out:
573 mutex_unlock(&dmz->chunk_lock);
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700574 return ret;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900575}
576
577/*
Dmitry Fomicheve7fad902019-11-06 14:34:35 -0800578 * Check if the backing device is being removed. If it's on the way out,
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700579 * start failing I/O. Reclaim and metadata components also call this
580 * function to cleanly abort operation in the event of such failure.
581 */
582bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
583{
Dmitry Fomicheve7fad902019-11-06 14:34:35 -0800584 if (dmz_dev->flags & DMZ_BDEV_DYING)
585 return true;
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700586
Dmitry Fomicheve7fad902019-11-06 14:34:35 -0800587 if (dmz_dev->flags & DMZ_CHECK_BDEV)
588 return !dmz_check_bdev(dmz_dev);
589
590 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
591 dmz_dev_warn(dmz_dev, "Backing device queue dying");
592 dmz_dev->flags |= DMZ_BDEV_DYING;
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700593 }
594
595 return dmz_dev->flags & DMZ_BDEV_DYING;
596}
597
598/*
Dmitry Fomicheve7fad902019-11-06 14:34:35 -0800599 * Check the backing device availability. This detects such events as
600 * backing device going offline due to errors, media removals, etc.
601 * This check is less efficient than dmz_bdev_is_dying() and should
602 * only be performed as a part of error handling.
603 */
604bool dmz_check_bdev(struct dmz_dev *dmz_dev)
605{
606 struct gendisk *disk;
607
608 dmz_dev->flags &= ~DMZ_CHECK_BDEV;
609
610 if (dmz_bdev_is_dying(dmz_dev))
611 return false;
612
613 disk = dmz_dev->bdev->bd_disk;
614 if (disk->fops->check_events &&
615 disk->fops->check_events(disk, 0) & DISK_EVENT_MEDIA_CHANGE) {
616 dmz_dev_warn(dmz_dev, "Backing device offline");
617 dmz_dev->flags |= DMZ_BDEV_DYING;
618 }
619
620 return !(dmz_dev->flags & DMZ_BDEV_DYING);
621}
622
623/*
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900624 * Process a new BIO.
625 */
626static int dmz_map(struct dm_target *ti, struct bio *bio)
627{
628 struct dmz_target *dmz = ti->private;
Hannes Reinecke36820562020-05-11 10:24:21 +0200629 struct dmz_metadata *zmd = dmz->metadata;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900630 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
631 sector_t sector = bio->bi_iter.bi_sector;
632 unsigned int nr_sectors = bio_sectors(bio);
633 sector_t chunk_sector;
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700634 int ret;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900635
Hannes Reinecked0e21ce2020-05-11 10:24:23 +0200636 if (dmz_dev_is_dying(zmd))
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700637 return DM_MAPIO_KILL;
638
Hannes Reinecke2234e732020-05-11 10:24:22 +0200639 DMDEBUG("(%s): BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
640 dmz_metadata_label(zmd),
641 bio_op(bio), (unsigned long long)sector, nr_sectors,
642 (unsigned long long)dmz_bio_chunk(zmd, bio),
643 (unsigned long long)dmz_chunk_block(zmd, dmz_bio_block(bio)),
644 (unsigned int)dmz_bio_blocks(bio));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900645
Mikulas Patockaedbe9592017-07-21 11:56:46 -0400646 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900647 return DM_MAPIO_REMAPPED;
648
649 /* The BIO should be block aligned */
650 if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK))
651 return DM_MAPIO_KILL;
652
653 /* Initialize the BIO context */
Hannes Reinecke52d67752020-05-11 10:24:25 +0200654 bioctx->dev = NULL;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900655 bioctx->zone = NULL;
656 bioctx->bio = bio;
John Pittman092b56482018-08-23 13:35:57 -0400657 refcount_set(&bioctx->ref, 1);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900658
659 /* Set the BIO pending in the flush list */
Mikulas Patockaedbe9592017-07-21 11:56:46 -0400660 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900661 spin_lock(&dmz->flush_lock);
662 bio_list_add(&dmz->flush_list, bio);
663 spin_unlock(&dmz->flush_lock);
664 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0);
665 return DM_MAPIO_SUBMITTED;
666 }
667
668 /* Split zone BIOs to fit entirely into a zone */
Hannes Reinecke36820562020-05-11 10:24:21 +0200669 chunk_sector = sector & (dmz_zone_nr_sectors(zmd) - 1);
670 if (chunk_sector + nr_sectors > dmz_zone_nr_sectors(zmd))
671 dm_accept_partial_bio(bio, dmz_zone_nr_sectors(zmd) - chunk_sector);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900672
673 /* Now ready to handle this BIO */
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700674 ret = dmz_queue_chunk_work(dmz, bio);
675 if (ret) {
Hannes Reinecke49de3b72020-05-14 08:09:29 +0200676 DMDEBUG("(%s): BIO op %d, can't process chunk %llu, err %i",
Hannes Reinecke2234e732020-05-11 10:24:22 +0200677 dmz_metadata_label(zmd),
678 bio_op(bio), (u64)dmz_bio_chunk(zmd, bio),
679 ret);
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700680 return DM_MAPIO_REQUEUE;
681 }
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900682
683 return DM_MAPIO_SUBMITTED;
684}
685
686/*
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900687 * Get zoned device information.
688 */
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200689static int dmz_get_zoned_device(struct dm_target *ti, char *path,
690 int idx, int nr_devs)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900691{
692 struct dmz_target *dmz = ti->private;
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200693 struct dm_dev *ddev;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900694 struct dmz_dev *dev;
695 int ret;
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200696 struct block_device *bdev;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900697
698 /* Get the target device */
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200699 ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &ddev);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900700 if (ret) {
701 ti->error = "Get target device failed";
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900702 return ret;
703 }
704
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200705 bdev = ddev->bdev;
706 if (bdev_zoned_model(bdev) == BLK_ZONED_NONE) {
707 if (nr_devs == 1) {
708 ti->error = "Invalid regular device";
709 goto err;
710 }
711 if (idx != 0) {
712 ti->error = "First device must be a regular device";
713 goto err;
714 }
715 if (dmz->ddev[0]) {
716 ti->error = "Too many regular devices";
717 goto err;
718 }
719 dev = &dmz->dev[idx];
720 dev->flags = DMZ_BDEV_REGULAR;
721 } else {
722 if (dmz->ddev[idx]) {
723 ti->error = "Too many zoned devices";
724 goto err;
725 }
726 if (nr_devs > 1 && idx == 0) {
727 ti->error = "First device must be a regular device";
728 goto err;
729 }
730 dev = &dmz->dev[idx];
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900731 }
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200732 dev->bdev = bdev;
Hannes Reinecke69875d42020-06-02 13:09:54 +0200733 dev->dev_idx = idx;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900734 (void)bdevname(dev->bdev, dev->name);
735
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200736 dev->capacity = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
737 if (ti->begin) {
738 ti->error = "Partial mapping is not supported";
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900739 goto err;
740 }
741
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200742 dmz->ddev[idx] = ddev;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900743
744 return 0;
745err:
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200746 dm_put_device(ti, ddev);
747 return -EINVAL;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900748}
749
750/*
751 * Cleanup zoned device information.
752 */
753static void dmz_put_zoned_device(struct dm_target *ti)
754{
755 struct dmz_target *dmz = ti->private;
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200756 int i;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900757
Hannes Reinecke4dba1282020-06-02 13:09:52 +0200758 for (i = 0; i < dmz->nr_ddevs; i++) {
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200759 if (dmz->ddev[i]) {
760 dm_put_device(ti, dmz->ddev[i]);
761 dmz->ddev[i] = NULL;
762 }
763 }
764}
765
766static int dmz_fixup_devices(struct dm_target *ti)
767{
768 struct dmz_target *dmz = ti->private;
769 struct dmz_dev *reg_dev, *zoned_dev;
770 struct request_queue *q;
Hannes Reinecke4dba1282020-06-02 13:09:52 +0200771 sector_t zone_nr_sectors = 0;
772 int i;
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200773
774 /*
Hannes Reinecke4dba1282020-06-02 13:09:52 +0200775 * When we have more than on devices, the first one must be a
776 * regular block device and the others zoned block devices.
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200777 */
Hannes Reinecke4dba1282020-06-02 13:09:52 +0200778 if (dmz->nr_ddevs > 1) {
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200779 reg_dev = &dmz->dev[0];
780 if (!(reg_dev->flags & DMZ_BDEV_REGULAR)) {
781 ti->error = "Primary disk is not a regular device";
782 return -EINVAL;
783 }
Hannes Reinecke4dba1282020-06-02 13:09:52 +0200784 for (i = 1; i < dmz->nr_ddevs; i++) {
785 zoned_dev = &dmz->dev[i];
786 if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
787 ti->error = "Secondary disk is not a zoned device";
788 return -EINVAL;
789 }
790 q = bdev_get_queue(zoned_dev->bdev);
791 if (zone_nr_sectors &&
792 zone_nr_sectors != blk_queue_zone_sectors(q)) {
793 ti->error = "Zone nr sectors mismatch";
794 return -EINVAL;
795 }
796 zone_nr_sectors = blk_queue_zone_sectors(q);
797 zoned_dev->zone_nr_sectors = zone_nr_sectors;
798 zoned_dev->nr_zones =
799 blkdev_nr_zones(zoned_dev->bdev->bd_disk);
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200800 }
801 } else {
802 reg_dev = NULL;
803 zoned_dev = &dmz->dev[0];
804 if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
805 ti->error = "Disk is not a zoned device";
806 return -EINVAL;
807 }
Hannes Reinecke4dba1282020-06-02 13:09:52 +0200808 q = bdev_get_queue(zoned_dev->bdev);
809 zoned_dev->zone_nr_sectors = blk_queue_zone_sectors(q);
810 zoned_dev->nr_zones = blkdev_nr_zones(zoned_dev->bdev->bd_disk);
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200811 }
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200812
813 if (reg_dev) {
Hannes Reinecke4dba1282020-06-02 13:09:52 +0200814 sector_t zone_offset;
815
816 reg_dev->zone_nr_sectors = zone_nr_sectors;
Nathan Chancellor42c689f2020-05-13 01:45:22 -0700817 reg_dev->nr_zones =
818 DIV_ROUND_UP_SECTOR_T(reg_dev->capacity,
819 reg_dev->zone_nr_sectors);
Hannes Reinecke4dba1282020-06-02 13:09:52 +0200820 reg_dev->zone_offset = 0;
821 zone_offset = reg_dev->nr_zones;
822 for (i = 1; i < dmz->nr_ddevs; i++) {
823 dmz->dev[i].zone_offset = zone_offset;
824 zone_offset += dmz->dev[i].nr_zones;
825 }
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200826 }
827 return 0;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900828}
829
830/*
831 * Setup target.
832 */
833static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
834{
835 struct dmz_target *dmz;
Hannes Reineckef97809a2020-06-02 13:09:50 +0200836 int ret, i;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900837
838 /* Check arguments */
Hannes Reinecke4dba1282020-06-02 13:09:52 +0200839 if (argc < 1) {
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900840 ti->error = "Invalid argument count";
841 return -EINVAL;
842 }
843
844 /* Allocate and initialize the target descriptor */
845 dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL);
846 if (!dmz) {
847 ti->error = "Unable to allocate the zoned target descriptor";
848 return -ENOMEM;
849 }
Hannes Reinecke4dba1282020-06-02 13:09:52 +0200850 dmz->dev = kcalloc(argc, sizeof(struct dmz_dev), GFP_KERNEL);
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200851 if (!dmz->dev) {
852 ti->error = "Unable to allocate the zoned device descriptors";
853 kfree(dmz);
854 return -ENOMEM;
855 }
Hannes Reinecke4dba1282020-06-02 13:09:52 +0200856 dmz->ddev = kcalloc(argc, sizeof(struct dm_dev *), GFP_KERNEL);
857 if (!dmz->ddev) {
858 ti->error = "Unable to allocate the dm device descriptors";
859 ret = -ENOMEM;
860 goto err;
861 }
Hannes Reineckef97809a2020-06-02 13:09:50 +0200862 dmz->nr_ddevs = argc;
Hannes Reinecke4dba1282020-06-02 13:09:52 +0200863
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900864 ti->private = dmz;
865
866 /* Get the target zoned block device */
Hannes Reinecke4dba1282020-06-02 13:09:52 +0200867 for (i = 0; i < argc; i++) {
868 ret = dmz_get_zoned_device(ti, argv[i], i, argc);
869 if (ret)
870 goto err_dev;
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200871 }
872 ret = dmz_fixup_devices(ti);
Hannes Reinecke4dba1282020-06-02 13:09:52 +0200873 if (ret)
874 goto err_dev;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900875
876 /* Initialize metadata */
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200877 ret = dmz_ctr_metadata(dmz->dev, argc, &dmz->metadata,
Hannes Reinecke2234e732020-05-11 10:24:22 +0200878 dm_table_device_name(ti->table));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900879 if (ret) {
880 ti->error = "Metadata initialization failed";
881 goto err_dev;
882 }
883
884 /* Set target (no write same support) */
Hou Tao7b237742020-06-15 11:33:23 +0800885 ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900886 ti->num_flush_bios = 1;
887 ti->num_discard_bios = 1;
888 ti->num_write_zeroes_bios = 1;
889 ti->per_io_data_size = sizeof(struct dmz_bioctx);
890 ti->flush_supported = true;
891 ti->discards_supported = true;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900892
893 /* The exposed capacity is the number of chunks that can be mapped */
Hannes Reinecke36820562020-05-11 10:24:21 +0200894 ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) <<
895 dmz_zone_nr_sectors_shift(dmz->metadata);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900896
897 /* Zone BIO */
Kent Overstreet6f1c8192018-05-20 18:25:53 -0400898 ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
899 if (ret) {
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900900 ti->error = "Create BIO set failed";
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900901 goto err_meta;
902 }
903
904 /* Chunk BIO work */
905 mutex_init(&dmz->chunk_lock);
Bart Van Assche2d0b2d62018-06-22 08:09:11 -0700906 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
Hannes Reinecke2234e732020-05-11 10:24:22 +0200907 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s",
908 WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
909 dmz_metadata_label(dmz->metadata));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900910 if (!dmz->chunk_wq) {
911 ti->error = "Create chunk workqueue failed";
912 ret = -ENOMEM;
913 goto err_bio;
914 }
915
916 /* Flush work */
917 spin_lock_init(&dmz->flush_lock);
918 bio_list_init(&dmz->flush_list);
919 INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
920 dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
Hannes Reinecke2234e732020-05-11 10:24:22 +0200921 dmz_metadata_label(dmz->metadata));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900922 if (!dmz->flush_wq) {
923 ti->error = "Create flush workqueue failed";
924 ret = -ENOMEM;
925 goto err_cwq;
926 }
927 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
928
929 /* Initialize reclaim */
Hannes Reineckef97809a2020-06-02 13:09:50 +0200930 for (i = 0; i < dmz->nr_ddevs; i++) {
931 ret = dmz_ctr_reclaim(dmz->metadata, &dmz->dev[i].reclaim, i);
932 if (ret) {
933 ti->error = "Zone reclaim initialization failed";
934 goto err_fwq;
935 }
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900936 }
937
Hannes Reinecke2234e732020-05-11 10:24:22 +0200938 DMINFO("(%s): Target device: %llu 512-byte logical sectors (%llu blocks)",
939 dmz_metadata_label(dmz->metadata),
940 (unsigned long long)ti->len,
941 (unsigned long long)dmz_sect2blk(ti->len));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900942
943 return 0;
944err_fwq:
945 destroy_workqueue(dmz->flush_wq);
946err_cwq:
947 destroy_workqueue(dmz->chunk_wq);
948err_bio:
Mike Snitzerd5ffebd2018-01-05 21:17:20 -0500949 mutex_destroy(&dmz->chunk_lock);
Kent Overstreet6f1c8192018-05-20 18:25:53 -0400950 bioset_exit(&dmz->bio_set);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900951err_meta:
952 dmz_dtr_metadata(dmz->metadata);
953err_dev:
954 dmz_put_zoned_device(ti);
955err:
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200956 kfree(dmz->dev);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900957 kfree(dmz);
958
959 return ret;
960}
961
962/*
963 * Cleanup target.
964 */
965static void dmz_dtr(struct dm_target *ti)
966{
967 struct dmz_target *dmz = ti->private;
Hannes Reineckef97809a2020-06-02 13:09:50 +0200968 int i;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900969
970 flush_workqueue(dmz->chunk_wq);
971 destroy_workqueue(dmz->chunk_wq);
972
Hannes Reineckef97809a2020-06-02 13:09:50 +0200973 for (i = 0; i < dmz->nr_ddevs; i++)
974 dmz_dtr_reclaim(dmz->dev[i].reclaim);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900975
976 cancel_delayed_work_sync(&dmz->flush_work);
977 destroy_workqueue(dmz->flush_wq);
978
979 (void) dmz_flush_metadata(dmz->metadata);
980
981 dmz_dtr_metadata(dmz->metadata);
982
Kent Overstreet6f1c8192018-05-20 18:25:53 -0400983 bioset_exit(&dmz->bio_set);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900984
985 dmz_put_zoned_device(ti);
986
Mike Snitzerd5ffebd2018-01-05 21:17:20 -0500987 mutex_destroy(&dmz->chunk_lock);
988
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200989 kfree(dmz->dev);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900990 kfree(dmz);
991}
992
993/*
994 * Setup target request queue limits.
995 */
996static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
997{
998 struct dmz_target *dmz = ti->private;
Hannes Reinecke36820562020-05-11 10:24:21 +0200999 unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001000
1001 limits->logical_block_size = DMZ_BLOCK_SIZE;
1002 limits->physical_block_size = DMZ_BLOCK_SIZE;
1003
1004 blk_limits_io_min(limits, DMZ_BLOCK_SIZE);
1005 blk_limits_io_opt(limits, DMZ_BLOCK_SIZE);
1006
1007 limits->discard_alignment = DMZ_BLOCK_SIZE;
1008 limits->discard_granularity = DMZ_BLOCK_SIZE;
1009 limits->max_discard_sectors = chunk_sectors;
1010 limits->max_hw_discard_sectors = chunk_sectors;
1011 limits->max_write_zeroes_sectors = chunk_sectors;
1012
1013 /* FS hint to try to align to the device zone size */
1014 limits->chunk_sectors = chunk_sectors;
1015 limits->max_sectors = chunk_sectors;
1016
1017 /* We are exposing a drive-managed zoned block device */
1018 limits->zoned = BLK_ZONED_NONE;
1019}
1020
1021/*
1022 * Pass on ioctl to the backend device.
1023 */
Mike Snitzer5bd5e8d2018-04-03 16:54:10 -04001024static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001025{
1026 struct dmz_target *dmz = ti->private;
Hannes Reinecke52d67752020-05-11 10:24:25 +02001027 struct dmz_dev *dev = &dmz->dev[0];
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001028
Hannes Reinecke52d67752020-05-11 10:24:25 +02001029 if (!dmz_check_bdev(dev))
Dmitry Fomicheve7fad902019-11-06 14:34:35 -08001030 return -EIO;
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -07001031
Hannes Reinecke52d67752020-05-11 10:24:25 +02001032 *bdev = dev->bdev;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001033
1034 return 0;
1035}
1036
1037/*
1038 * Stop works on suspend.
1039 */
1040static void dmz_suspend(struct dm_target *ti)
1041{
1042 struct dmz_target *dmz = ti->private;
Hannes Reineckef97809a2020-06-02 13:09:50 +02001043 int i;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001044
1045 flush_workqueue(dmz->chunk_wq);
Hannes Reineckef97809a2020-06-02 13:09:50 +02001046 for (i = 0; i < dmz->nr_ddevs; i++)
1047 dmz_suspend_reclaim(dmz->dev[i].reclaim);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001048 cancel_delayed_work_sync(&dmz->flush_work);
1049}
1050
1051/*
1052 * Restart works on resume or if suspend failed.
1053 */
1054static void dmz_resume(struct dm_target *ti)
1055{
1056 struct dmz_target *dmz = ti->private;
Hannes Reineckef97809a2020-06-02 13:09:50 +02001057 int i;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001058
1059 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
Hannes Reineckef97809a2020-06-02 13:09:50 +02001060 for (i = 0; i < dmz->nr_ddevs; i++)
1061 dmz_resume_reclaim(dmz->dev[i].reclaim);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001062}
1063
1064static int dmz_iterate_devices(struct dm_target *ti,
1065 iterate_devices_callout_fn fn, void *data)
1066{
1067 struct dmz_target *dmz = ti->private;
Hannes Reineckebd5c4032020-05-11 10:24:30 +02001068 unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata);
1069 sector_t capacity;
Hannes Reinecke4dba1282020-06-02 13:09:52 +02001070 int i, r;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001071
Hannes Reinecke4dba1282020-06-02 13:09:52 +02001072 for (i = 0; i < dmz->nr_ddevs; i++) {
1073 capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1);
1074 r = fn(ti, dmz->ddev[i], 0, capacity, data);
1075 if (r)
1076 break;
Hannes Reineckebd5c4032020-05-11 10:24:30 +02001077 }
1078 return r;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001079}
1080
Hannes Reineckebc3d5712020-05-11 10:24:16 +02001081static void dmz_status(struct dm_target *ti, status_type_t type,
1082 unsigned int status_flags, char *result,
1083 unsigned int maxlen)
1084{
1085 struct dmz_target *dmz = ti->private;
1086 ssize_t sz = 0;
1087 char buf[BDEVNAME_SIZE];
Hannes Reineckebd5c4032020-05-11 10:24:30 +02001088 struct dmz_dev *dev;
Hannes Reineckebd82fda2020-06-02 13:09:51 +02001089 int i;
Hannes Reineckebc3d5712020-05-11 10:24:16 +02001090
1091 switch (type) {
1092 case STATUSTYPE_INFO:
Hannes Reineckebd82fda2020-06-02 13:09:51 +02001093 DMEMIT("%u zones %u/%u cache",
Hannes Reineckebc3d5712020-05-11 10:24:16 +02001094 dmz_nr_zones(dmz->metadata),
Hannes Reinecke34f5aff2020-05-19 10:14:20 +02001095 dmz_nr_unmap_cache_zones(dmz->metadata),
Hannes Reineckebd82fda2020-06-02 13:09:51 +02001096 dmz_nr_cache_zones(dmz->metadata));
Hannes Reinecke4dba1282020-06-02 13:09:52 +02001097 for (i = 0; i < dmz->nr_ddevs; i++) {
Hannes Reineckebd82fda2020-06-02 13:09:51 +02001098 /*
1099 * For a multi-device setup the first device
1100 * contains only cache zones.
1101 */
1102 if ((i == 0) &&
1103 (dmz_nr_cache_zones(dmz->metadata) > 0))
1104 continue;
1105 DMEMIT(" %u/%u random %u/%u sequential",
1106 dmz_nr_unmap_rnd_zones(dmz->metadata, i),
1107 dmz_nr_rnd_zones(dmz->metadata, i),
1108 dmz_nr_unmap_seq_zones(dmz->metadata, i),
1109 dmz_nr_seq_zones(dmz->metadata, i));
1110 }
Hannes Reineckebc3d5712020-05-11 10:24:16 +02001111 break;
1112 case STATUSTYPE_TABLE:
Hannes Reineckebd5c4032020-05-11 10:24:30 +02001113 dev = &dmz->dev[0];
1114 format_dev_t(buf, dev->bdev->bd_dev);
Hannes Reineckebc3d5712020-05-11 10:24:16 +02001115 DMEMIT("%s", buf);
Hannes Reinecke4dba1282020-06-02 13:09:52 +02001116 for (i = 1; i < dmz->nr_ddevs; i++) {
1117 dev = &dmz->dev[i];
Hannes Reineckebd5c4032020-05-11 10:24:30 +02001118 format_dev_t(buf, dev->bdev->bd_dev);
1119 DMEMIT(" %s", buf);
1120 }
Hannes Reineckebc3d5712020-05-11 10:24:16 +02001121 break;
1122 }
1123 return;
1124}
1125
Hannes Reinecke90b39d52020-05-11 10:24:17 +02001126static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
1127 char *result, unsigned int maxlen)
1128{
1129 struct dmz_target *dmz = ti->private;
1130 int r = -EINVAL;
1131
1132 if (!strcasecmp(argv[0], "reclaim")) {
Hannes Reineckef97809a2020-06-02 13:09:50 +02001133 int i;
1134
1135 for (i = 0; i < dmz->nr_ddevs; i++)
1136 dmz_schedule_reclaim(dmz->dev[i].reclaim);
Hannes Reinecke90b39d52020-05-11 10:24:17 +02001137 r = 0;
1138 } else
1139 DMERR("unrecognized message %s", argv[0]);
1140 return r;
1141}
1142
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001143static struct target_type dmz_type = {
1144 .name = "zoned",
Hannes Reineckebd5c4032020-05-11 10:24:30 +02001145 .version = {2, 0, 0},
Shin'ichiro Kawasaki2d669ce2021-03-16 13:36:02 +09001146 .features = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL,
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001147 .module = THIS_MODULE,
1148 .ctr = dmz_ctr,
1149 .dtr = dmz_dtr,
1150 .map = dmz_map,
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001151 .io_hints = dmz_io_hints,
1152 .prepare_ioctl = dmz_prepare_ioctl,
1153 .postsuspend = dmz_suspend,
1154 .resume = dmz_resume,
1155 .iterate_devices = dmz_iterate_devices,
Hannes Reineckebc3d5712020-05-11 10:24:16 +02001156 .status = dmz_status,
Hannes Reinecke90b39d52020-05-11 10:24:17 +02001157 .message = dmz_message,
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001158};
1159
1160static int __init dmz_init(void)
1161{
1162 return dm_register_target(&dmz_type);
1163}
1164
1165static void __exit dmz_exit(void)
1166{
1167 dm_unregister_target(&dmz_type);
1168}
1169
1170module_init(dmz_init);
1171module_exit(dmz_exit);
1172
1173MODULE_DESCRIPTION(DM_NAME " target for zoned block devices");
1174MODULE_AUTHOR("Damien Le Moal <damien.lemoal@wdc.com>");
1175MODULE_LICENSE("GPL");