blob: a09fb78ffe885187a5fb043eec2f00cb8845c68e [file] [log] [blame]
Dmitry Fomichevbae9a0a2019-08-02 15:02:50 -07001// SPDX-License-Identifier: GPL-2.0-only
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09002/*
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-zoned.h"
9
10#include <linux/module.h>
11
12#define DM_MSG_PREFIX "zoned"
13
14#define DMZ_MIN_BIOS 8192
15
16/*
17 * Zone BIO context.
18 */
19struct dmz_bioctx {
Hannes Reinecke52d67752020-05-11 10:24:25 +020020 struct dmz_dev *dev;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090021 struct dm_zone *zone;
22 struct bio *bio;
John Pittman092b56482018-08-23 13:35:57 -040023 refcount_t ref;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090024};
25
26/*
27 * Chunk work descriptor.
28 */
29struct dm_chunk_work {
30 struct work_struct work;
John Pittman092b56482018-08-23 13:35:57 -040031 refcount_t refcount;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090032 struct dmz_target *target;
33 unsigned int chunk;
34 struct bio_list bio_list;
35};
36
37/*
38 * Target descriptor.
39 */
40struct dmz_target {
41 struct dm_dev *ddev;
42
43 unsigned long flags;
44
45 /* Zoned block device information */
46 struct dmz_dev *dev;
47
48 /* For metadata handling */
49 struct dmz_metadata *metadata;
50
51 /* For reclaim */
52 struct dmz_reclaim *reclaim;
53
54 /* For chunk work */
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090055 struct radix_tree_root chunk_rxtree;
56 struct workqueue_struct *chunk_wq;
Mike Snitzer72d711c2018-05-22 18:26:20 -040057 struct mutex chunk_lock;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090058
59 /* For cloned BIOs to zones */
Kent Overstreet6f1c8192018-05-20 18:25:53 -040060 struct bio_set bio_set;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090061
62 /* For flush */
63 spinlock_t flush_lock;
64 struct bio_list flush_list;
65 struct delayed_work flush_work;
66 struct workqueue_struct *flush_wq;
67};
68
69/*
70 * Flush intervals (seconds).
71 */
72#define DMZ_FLUSH_PERIOD (10 * HZ)
73
74/*
75 * Target BIO completion.
76 */
77static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
78{
Hannes Reinecke52d67752020-05-11 10:24:25 +020079 struct dmz_bioctx *bioctx =
80 dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090081
Damien Le Moald57f9da2018-11-30 15:31:48 +090082 if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
83 bio->bi_status = status;
Dmitry Fomicheve7fad902019-11-06 14:34:35 -080084 if (bio->bi_status != BLK_STS_OK)
Hannes Reinecke52d67752020-05-11 10:24:25 +020085 bioctx->dev->flags |= DMZ_CHECK_BDEV;
Damien Le Moald57f9da2018-11-30 15:31:48 +090086
87 if (refcount_dec_and_test(&bioctx->ref)) {
88 struct dm_zone *zone = bioctx->zone;
89
90 if (zone) {
91 if (bio->bi_status != BLK_STS_OK &&
92 bio_op(bio) == REQ_OP_WRITE &&
93 dmz_is_seq(zone))
94 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
95 dmz_deactivate_zone(zone);
96 }
97 bio_endio(bio);
98 }
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090099}
100
101/*
Damien Le Moald57f9da2018-11-30 15:31:48 +0900102 * Completion callback for an internally cloned target BIO. This terminates the
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900103 * target BIO when there are no more references to its context.
104 */
Damien Le Moald57f9da2018-11-30 15:31:48 +0900105static void dmz_clone_endio(struct bio *clone)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900106{
Damien Le Moald57f9da2018-11-30 15:31:48 +0900107 struct dmz_bioctx *bioctx = clone->bi_private;
108 blk_status_t status = clone->bi_status;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900109
Damien Le Moald57f9da2018-11-30 15:31:48 +0900110 bio_put(clone);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900111 dmz_bio_endio(bioctx->bio, status);
112}
113
114/*
Damien Le Moald57f9da2018-11-30 15:31:48 +0900115 * Issue a clone of a target BIO. The clone may only partially process the
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900116 * original target BIO.
117 */
Damien Le Moald57f9da2018-11-30 15:31:48 +0900118static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
119 struct bio *bio, sector_t chunk_block,
120 unsigned int nr_blocks)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900121{
Hannes Reinecke52d67752020-05-11 10:24:25 +0200122 struct dmz_bioctx *bioctx =
123 dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
124 struct dmz_dev *dev = dmz_zone_to_dev(dmz->metadata, zone);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900125 struct bio *clone;
126
Hannes Reinecke52d67752020-05-11 10:24:25 +0200127 if (dev->flags & DMZ_BDEV_DYING)
128 return -EIO;
129
Kent Overstreet6f1c8192018-05-20 18:25:53 -0400130 clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900131 if (!clone)
132 return -ENOMEM;
133
Hannes Reinecke52d67752020-05-11 10:24:25 +0200134 bio_set_dev(clone, dev->bdev);
135 bioctx->dev = dev;
Damien Le Moald57f9da2018-11-30 15:31:48 +0900136 clone->bi_iter.bi_sector =
137 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900138 clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
Damien Le Moald57f9da2018-11-30 15:31:48 +0900139 clone->bi_end_io = dmz_clone_endio;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900140 clone->bi_private = bioctx;
141
142 bio_advance(bio, clone->bi_iter.bi_size);
143
John Pittman092b56482018-08-23 13:35:57 -0400144 refcount_inc(&bioctx->ref);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900145 generic_make_request(clone);
146
Damien Le Moald57f9da2018-11-30 15:31:48 +0900147 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
148 zone->wp_block += nr_blocks;
149
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900150 return 0;
151}
152
153/*
154 * Zero out pages of discarded blocks accessed by a read BIO.
155 */
156static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
157 sector_t chunk_block, unsigned int nr_blocks)
158{
159 unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT;
160
161 /* Clear nr_blocks */
162 swap(bio->bi_iter.bi_size, size);
163 zero_fill_bio(bio);
164 swap(bio->bi_iter.bi_size, size);
165
166 bio_advance(bio, size);
167}
168
169/*
170 * Process a read BIO.
171 */
172static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
173 struct bio *bio)
174{
Hannes Reinecke36820562020-05-11 10:24:21 +0200175 struct dmz_metadata *zmd = dmz->metadata;
176 sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900177 unsigned int nr_blocks = dmz_bio_blocks(bio);
178 sector_t end_block = chunk_block + nr_blocks;
179 struct dm_zone *rzone, *bzone;
180 int ret;
181
182 /* Read into unmapped chunks need only zeroing the BIO buffer */
183 if (!zone) {
184 zero_fill_bio(bio);
185 return 0;
186 }
187
Hannes Reinecke2234e732020-05-11 10:24:22 +0200188 DMDEBUG("(%s): READ chunk %llu -> %s zone %u, block %llu, %u blocks",
189 dmz_metadata_label(zmd),
190 (unsigned long long)dmz_bio_chunk(zmd, bio),
191 (dmz_is_rnd(zone) ? "RND" : "SEQ"),
192 zone->id,
193 (unsigned long long)chunk_block, nr_blocks);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900194
195 /* Check block validity to determine the read location */
196 bzone = zone->bzone;
197 while (chunk_block < end_block) {
198 nr_blocks = 0;
199 if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) {
200 /* Test block validity in the data zone */
Hannes Reinecke36820562020-05-11 10:24:21 +0200201 ret = dmz_block_valid(zmd, zone, chunk_block);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900202 if (ret < 0)
203 return ret;
204 if (ret > 0) {
205 /* Read data zone blocks */
206 nr_blocks = ret;
207 rzone = zone;
208 }
209 }
210
211 /*
212 * No valid blocks found in the data zone.
213 * Check the buffer zone, if there is one.
214 */
215 if (!nr_blocks && bzone) {
Hannes Reinecke36820562020-05-11 10:24:21 +0200216 ret = dmz_block_valid(zmd, bzone, chunk_block);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900217 if (ret < 0)
218 return ret;
219 if (ret > 0) {
220 /* Read buffer zone blocks */
221 nr_blocks = ret;
222 rzone = bzone;
223 }
224 }
225
226 if (nr_blocks) {
227 /* Valid blocks found: read them */
Hannes Reinecke52d67752020-05-11 10:24:25 +0200228 nr_blocks = min_t(unsigned int, nr_blocks,
229 end_block - chunk_block);
230 ret = dmz_submit_bio(dmz, rzone, bio,
231 chunk_block, nr_blocks);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900232 if (ret)
233 return ret;
234 chunk_block += nr_blocks;
235 } else {
236 /* No valid block: zeroout the current BIO block */
237 dmz_handle_read_zero(dmz, bio, chunk_block, 1);
238 chunk_block++;
239 }
240 }
241
242 return 0;
243}
244
245/*
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900246 * Write blocks directly in a data zone, at the write pointer.
247 * If a buffer zone is assigned, invalidate the blocks written
248 * in place.
249 */
250static int dmz_handle_direct_write(struct dmz_target *dmz,
251 struct dm_zone *zone, struct bio *bio,
252 sector_t chunk_block,
253 unsigned int nr_blocks)
254{
255 struct dmz_metadata *zmd = dmz->metadata;
256 struct dm_zone *bzone = zone->bzone;
257 int ret;
258
259 if (dmz_is_readonly(zone))
260 return -EROFS;
261
262 /* Submit write */
Damien Le Moald57f9da2018-11-30 15:31:48 +0900263 ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
264 if (ret)
265 return ret;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900266
267 /*
268 * Validate the blocks in the data zone and invalidate
269 * in the buffer zone, if there is one.
270 */
271 ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks);
272 if (ret == 0 && bzone)
273 ret = dmz_invalidate_blocks(zmd, bzone, chunk_block, nr_blocks);
274
275 return ret;
276}
277
278/*
279 * Write blocks in the buffer zone of @zone.
280 * If no buffer zone is assigned yet, get one.
281 * Called with @zone write locked.
282 */
283static int dmz_handle_buffered_write(struct dmz_target *dmz,
284 struct dm_zone *zone, struct bio *bio,
285 sector_t chunk_block,
286 unsigned int nr_blocks)
287{
288 struct dmz_metadata *zmd = dmz->metadata;
289 struct dm_zone *bzone;
290 int ret;
291
292 /* Get the buffer zone. One will be allocated if needed */
293 bzone = dmz_get_chunk_buffer(zmd, zone);
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700294 if (IS_ERR(bzone))
295 return PTR_ERR(bzone);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900296
297 if (dmz_is_readonly(bzone))
298 return -EROFS;
299
300 /* Submit write */
Damien Le Moald57f9da2018-11-30 15:31:48 +0900301 ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
302 if (ret)
303 return ret;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900304
305 /*
306 * Validate the blocks in the buffer zone
307 * and invalidate in the data zone.
308 */
309 ret = dmz_validate_blocks(zmd, bzone, chunk_block, nr_blocks);
310 if (ret == 0 && chunk_block < zone->wp_block)
311 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
312
313 return ret;
314}
315
316/*
317 * Process a write BIO.
318 */
319static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
320 struct bio *bio)
321{
Hannes Reinecke36820562020-05-11 10:24:21 +0200322 struct dmz_metadata *zmd = dmz->metadata;
323 sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900324 unsigned int nr_blocks = dmz_bio_blocks(bio);
325
326 if (!zone)
327 return -ENOSPC;
328
Hannes Reinecke2234e732020-05-11 10:24:22 +0200329 DMDEBUG("(%s): WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
330 dmz_metadata_label(zmd),
331 (unsigned long long)dmz_bio_chunk(zmd, bio),
332 (dmz_is_rnd(zone) ? "RND" : "SEQ"),
333 zone->id,
334 (unsigned long long)chunk_block, nr_blocks);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900335
336 if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) {
337 /*
338 * zone is a random zone or it is a sequential zone
339 * and the BIO is aligned to the zone write pointer:
340 * direct write the zone.
341 */
Hannes Reinecke52d67752020-05-11 10:24:25 +0200342 return dmz_handle_direct_write(dmz, zone, bio,
343 chunk_block, nr_blocks);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900344 }
345
346 /*
347 * This is an unaligned write in a sequential zone:
348 * use buffered write.
349 */
350 return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
351}
352
353/*
354 * Process a discard BIO.
355 */
356static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
357 struct bio *bio)
358{
359 struct dmz_metadata *zmd = dmz->metadata;
360 sector_t block = dmz_bio_block(bio);
361 unsigned int nr_blocks = dmz_bio_blocks(bio);
Hannes Reinecke36820562020-05-11 10:24:21 +0200362 sector_t chunk_block = dmz_chunk_block(zmd, block);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900363 int ret = 0;
364
365 /* For unmapped chunks, there is nothing to do */
366 if (!zone)
367 return 0;
368
369 if (dmz_is_readonly(zone))
370 return -EROFS;
371
Hannes Reinecke2234e732020-05-11 10:24:22 +0200372 DMDEBUG("(%s): DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
373 dmz_metadata_label(dmz->metadata),
374 (unsigned long long)dmz_bio_chunk(zmd, bio),
375 zone->id,
376 (unsigned long long)chunk_block, nr_blocks);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900377
378 /*
379 * Invalidate blocks in the data zone and its
380 * buffer zone if one is mapped.
381 */
382 if (dmz_is_rnd(zone) || chunk_block < zone->wp_block)
383 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
384 if (ret == 0 && zone->bzone)
385 ret = dmz_invalidate_blocks(zmd, zone->bzone,
386 chunk_block, nr_blocks);
387 return ret;
388}
389
390/*
391 * Process a BIO.
392 */
393static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
394 struct bio *bio)
395{
Hannes Reinecke52d67752020-05-11 10:24:25 +0200396 struct dmz_bioctx *bioctx =
397 dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900398 struct dmz_metadata *zmd = dmz->metadata;
399 struct dm_zone *zone;
400 int ret;
401
402 /*
403 * Write may trigger a zone allocation. So make sure the
404 * allocation can succeed.
405 */
406 if (bio_op(bio) == REQ_OP_WRITE)
407 dmz_schedule_reclaim(dmz->reclaim);
408
409 dmz_lock_metadata(zmd);
410
411 /*
412 * Get the data zone mapping the chunk. There may be no
413 * mapping for read and discard. If a mapping is obtained,
414 + the zone returned will be set to active state.
415 */
Hannes Reinecke36820562020-05-11 10:24:21 +0200416 zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(zmd, bio),
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900417 bio_op(bio));
418 if (IS_ERR(zone)) {
419 ret = PTR_ERR(zone);
420 goto out;
421 }
422
423 /* Process the BIO */
424 if (zone) {
425 dmz_activate_zone(zone);
426 bioctx->zone = zone;
427 }
428
429 switch (bio_op(bio)) {
430 case REQ_OP_READ:
431 ret = dmz_handle_read(dmz, zone, bio);
432 break;
433 case REQ_OP_WRITE:
434 ret = dmz_handle_write(dmz, zone, bio);
435 break;
436 case REQ_OP_DISCARD:
437 case REQ_OP_WRITE_ZEROES:
438 ret = dmz_handle_discard(dmz, zone, bio);
439 break;
440 default:
Hannes Reinecke2234e732020-05-11 10:24:22 +0200441 DMERR("(%s): Unsupported BIO operation 0x%x",
442 dmz_metadata_label(dmz->metadata), bio_op(bio));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900443 ret = -EIO;
444 }
445
446 /*
447 * Release the chunk mapping. This will check that the mapping
448 * is still valid, that is, that the zone used still has valid blocks.
449 */
450 if (zone)
451 dmz_put_chunk_mapping(zmd, zone);
452out:
453 dmz_bio_endio(bio, errno_to_blk_status(ret));
454
455 dmz_unlock_metadata(zmd);
456}
457
458/*
459 * Increment a chunk reference counter.
460 */
461static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
462{
John Pittman092b56482018-08-23 13:35:57 -0400463 refcount_inc(&cw->refcount);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900464}
465
466/*
467 * Decrement a chunk work reference count and
468 * free it if it becomes 0.
469 */
470static void dmz_put_chunk_work(struct dm_chunk_work *cw)
471{
John Pittman092b56482018-08-23 13:35:57 -0400472 if (refcount_dec_and_test(&cw->refcount)) {
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900473 WARN_ON(!bio_list_empty(&cw->bio_list));
474 radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
475 kfree(cw);
476 }
477}
478
479/*
480 * Chunk BIO work function.
481 */
482static void dmz_chunk_work(struct work_struct *work)
483{
484 struct dm_chunk_work *cw = container_of(work, struct dm_chunk_work, work);
485 struct dmz_target *dmz = cw->target;
486 struct bio *bio;
487
488 mutex_lock(&dmz->chunk_lock);
489
490 /* Process the chunk BIOs */
491 while ((bio = bio_list_pop(&cw->bio_list))) {
492 mutex_unlock(&dmz->chunk_lock);
493 dmz_handle_bio(dmz, cw, bio);
494 mutex_lock(&dmz->chunk_lock);
495 dmz_put_chunk_work(cw);
496 }
497
498 /* Queueing the work incremented the work refcount */
499 dmz_put_chunk_work(cw);
500
501 mutex_unlock(&dmz->chunk_lock);
502}
503
504/*
505 * Flush work.
506 */
507static void dmz_flush_work(struct work_struct *work)
508{
509 struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work);
510 struct bio *bio;
511 int ret;
512
513 /* Flush dirty metadata blocks */
514 ret = dmz_flush_metadata(dmz->metadata);
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700515 if (ret)
Hannes Reinecke2234e732020-05-11 10:24:22 +0200516 DMDEBUG("(%s): Metadata flush failed, rc=%d\n",
517 dmz_metadata_label(dmz->metadata), ret);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900518
519 /* Process queued flush requests */
520 while (1) {
521 spin_lock(&dmz->flush_lock);
522 bio = bio_list_pop(&dmz->flush_list);
523 spin_unlock(&dmz->flush_lock);
524
525 if (!bio)
526 break;
527
528 dmz_bio_endio(bio, errno_to_blk_status(ret));
529 }
530
531 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
532}
533
534/*
535 * Get a chunk work and start it to process a new BIO.
536 * If the BIO chunk has no work yet, create one.
537 */
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700538static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900539{
Hannes Reinecke36820562020-05-11 10:24:21 +0200540 unsigned int chunk = dmz_bio_chunk(dmz->metadata, bio);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900541 struct dm_chunk_work *cw;
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700542 int ret = 0;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900543
544 mutex_lock(&dmz->chunk_lock);
545
546 /* Get the BIO chunk work. If one is not active yet, create one */
547 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
Shin'ichiro Kawasakiee63634b2020-02-27 09:18:52 +0900548 if (cw) {
549 dmz_get_chunk_work(cw);
550 } else {
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900551 /* Create a new chunk work */
Damien Le Moal4218a952017-07-24 16:44:37 +0900552 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700553 if (unlikely(!cw)) {
554 ret = -ENOMEM;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900555 goto out;
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700556 }
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900557
558 INIT_WORK(&cw->work, dmz_chunk_work);
Shin'ichiro Kawasakiee63634b2020-02-27 09:18:52 +0900559 refcount_set(&cw->refcount, 1);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900560 cw->target = dmz;
561 cw->chunk = chunk;
562 bio_list_init(&cw->bio_list);
563
564 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
565 if (unlikely(ret)) {
566 kfree(cw);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900567 goto out;
568 }
569 }
570
571 bio_list_add(&cw->bio_list, bio);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900572
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700573 dmz_reclaim_bio_acc(dmz->reclaim);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900574 if (queue_work(dmz->chunk_wq, &cw->work))
575 dmz_get_chunk_work(cw);
576out:
577 mutex_unlock(&dmz->chunk_lock);
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700578 return ret;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900579}
580
581/*
Dmitry Fomicheve7fad902019-11-06 14:34:35 -0800582 * Check if the backing device is being removed. If it's on the way out,
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700583 * start failing I/O. Reclaim and metadata components also call this
584 * function to cleanly abort operation in the event of such failure.
585 */
586bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
587{
Dmitry Fomicheve7fad902019-11-06 14:34:35 -0800588 if (dmz_dev->flags & DMZ_BDEV_DYING)
589 return true;
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700590
Dmitry Fomicheve7fad902019-11-06 14:34:35 -0800591 if (dmz_dev->flags & DMZ_CHECK_BDEV)
592 return !dmz_check_bdev(dmz_dev);
593
594 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
595 dmz_dev_warn(dmz_dev, "Backing device queue dying");
596 dmz_dev->flags |= DMZ_BDEV_DYING;
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700597 }
598
599 return dmz_dev->flags & DMZ_BDEV_DYING;
600}
601
602/*
Dmitry Fomicheve7fad902019-11-06 14:34:35 -0800603 * Check the backing device availability. This detects such events as
604 * backing device going offline due to errors, media removals, etc.
605 * This check is less efficient than dmz_bdev_is_dying() and should
606 * only be performed as a part of error handling.
607 */
608bool dmz_check_bdev(struct dmz_dev *dmz_dev)
609{
610 struct gendisk *disk;
611
612 dmz_dev->flags &= ~DMZ_CHECK_BDEV;
613
614 if (dmz_bdev_is_dying(dmz_dev))
615 return false;
616
617 disk = dmz_dev->bdev->bd_disk;
618 if (disk->fops->check_events &&
619 disk->fops->check_events(disk, 0) & DISK_EVENT_MEDIA_CHANGE) {
620 dmz_dev_warn(dmz_dev, "Backing device offline");
621 dmz_dev->flags |= DMZ_BDEV_DYING;
622 }
623
624 return !(dmz_dev->flags & DMZ_BDEV_DYING);
625}
626
627/*
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900628 * Process a new BIO.
629 */
630static int dmz_map(struct dm_target *ti, struct bio *bio)
631{
632 struct dmz_target *dmz = ti->private;
Hannes Reinecke36820562020-05-11 10:24:21 +0200633 struct dmz_metadata *zmd = dmz->metadata;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900634 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
635 sector_t sector = bio->bi_iter.bi_sector;
636 unsigned int nr_sectors = bio_sectors(bio);
637 sector_t chunk_sector;
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700638 int ret;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900639
Hannes Reinecked0e21ce2020-05-11 10:24:23 +0200640 if (dmz_dev_is_dying(zmd))
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700641 return DM_MAPIO_KILL;
642
Hannes Reinecke2234e732020-05-11 10:24:22 +0200643 DMDEBUG("(%s): BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
644 dmz_metadata_label(zmd),
645 bio_op(bio), (unsigned long long)sector, nr_sectors,
646 (unsigned long long)dmz_bio_chunk(zmd, bio),
647 (unsigned long long)dmz_chunk_block(zmd, dmz_bio_block(bio)),
648 (unsigned int)dmz_bio_blocks(bio));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900649
Mikulas Patockaedbe9592017-07-21 11:56:46 -0400650 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900651 return DM_MAPIO_REMAPPED;
652
653 /* The BIO should be block aligned */
654 if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK))
655 return DM_MAPIO_KILL;
656
657 /* Initialize the BIO context */
Hannes Reinecke52d67752020-05-11 10:24:25 +0200658 bioctx->dev = NULL;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900659 bioctx->zone = NULL;
660 bioctx->bio = bio;
John Pittman092b56482018-08-23 13:35:57 -0400661 refcount_set(&bioctx->ref, 1);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900662
663 /* Set the BIO pending in the flush list */
Mikulas Patockaedbe9592017-07-21 11:56:46 -0400664 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900665 spin_lock(&dmz->flush_lock);
666 bio_list_add(&dmz->flush_list, bio);
667 spin_unlock(&dmz->flush_lock);
668 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0);
669 return DM_MAPIO_SUBMITTED;
670 }
671
672 /* Split zone BIOs to fit entirely into a zone */
Hannes Reinecke36820562020-05-11 10:24:21 +0200673 chunk_sector = sector & (dmz_zone_nr_sectors(zmd) - 1);
674 if (chunk_sector + nr_sectors > dmz_zone_nr_sectors(zmd))
675 dm_accept_partial_bio(bio, dmz_zone_nr_sectors(zmd) - chunk_sector);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900676
677 /* Now ready to handle this BIO */
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700678 ret = dmz_queue_chunk_work(dmz, bio);
679 if (ret) {
Hannes Reinecke2234e732020-05-11 10:24:22 +0200680 DMDEBUG("(%s): BIO op %d, can't process chunk %llu, err %i\n",
681 dmz_metadata_label(zmd),
682 bio_op(bio), (u64)dmz_bio_chunk(zmd, bio),
683 ret);
Dmitry Fomichevd7428c52019-08-10 14:43:10 -0700684 return DM_MAPIO_REQUEUE;
685 }
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900686
687 return DM_MAPIO_SUBMITTED;
688}
689
690/*
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900691 * Get zoned device information.
692 */
693static int dmz_get_zoned_device(struct dm_target *ti, char *path)
694{
695 struct dmz_target *dmz = ti->private;
696 struct request_queue *q;
697 struct dmz_dev *dev;
Damien Le Moal114e02592017-10-28 16:39:34 +0900698 sector_t aligned_capacity;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900699 int ret;
700
701 /* Get the target device */
702 ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev);
703 if (ret) {
704 ti->error = "Get target device failed";
705 dmz->ddev = NULL;
706 return ret;
707 }
708
709 dev = kzalloc(sizeof(struct dmz_dev), GFP_KERNEL);
710 if (!dev) {
711 ret = -ENOMEM;
712 goto err;
713 }
714
715 dev->bdev = dmz->ddev->bdev;
716 (void)bdevname(dev->bdev, dev->name);
717
718 if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE) {
719 ti->error = "Not a zoned block device";
720 ret = -EINVAL;
721 goto err;
722 }
723
Damien Le Moal114e02592017-10-28 16:39:34 +0900724 q = bdev_get_queue(dev->bdev);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900725 dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
Dan Carpentera3839bc2019-04-10 11:12:31 +0300726 aligned_capacity = dev->capacity &
727 ~((sector_t)blk_queue_zone_sectors(q) - 1);
Damien Le Moal114e02592017-10-28 16:39:34 +0900728 if (ti->begin ||
729 ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900730 ti->error = "Partial mapping not supported";
731 ret = -EINVAL;
732 goto err;
733 }
734
Damien Le Moal114e02592017-10-28 16:39:34 +0900735 dev->zone_nr_sectors = blk_queue_zone_sectors(q);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900736
Christoph Hellwig9b38bb42019-12-03 10:39:04 +0100737 dev->nr_zones = blkdev_nr_zones(dev->bdev->bd_disk);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900738
739 dmz->dev = dev;
740
741 return 0;
742err:
743 dm_put_device(ti, dmz->ddev);
744 kfree(dev);
745
746 return ret;
747}
748
749/*
750 * Cleanup zoned device information.
751 */
752static void dmz_put_zoned_device(struct dm_target *ti)
753{
754 struct dmz_target *dmz = ti->private;
755
756 dm_put_device(ti, dmz->ddev);
757 kfree(dmz->dev);
758 dmz->dev = NULL;
759}
760
761/*
762 * Setup target.
763 */
764static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
765{
766 struct dmz_target *dmz;
767 struct dmz_dev *dev;
768 int ret;
769
770 /* Check arguments */
771 if (argc != 1) {
772 ti->error = "Invalid argument count";
773 return -EINVAL;
774 }
775
776 /* Allocate and initialize the target descriptor */
777 dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL);
778 if (!dmz) {
779 ti->error = "Unable to allocate the zoned target descriptor";
780 return -ENOMEM;
781 }
782 ti->private = dmz;
783
784 /* Get the target zoned block device */
785 ret = dmz_get_zoned_device(ti, argv[0]);
786 if (ret) {
787 dmz->ddev = NULL;
788 goto err;
789 }
790
791 /* Initialize metadata */
792 dev = dmz->dev;
Hannes Reinecke2234e732020-05-11 10:24:22 +0200793 ret = dmz_ctr_metadata(dev, &dmz->metadata,
794 dm_table_device_name(ti->table));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900795 if (ret) {
796 ti->error = "Metadata initialization failed";
797 goto err_dev;
798 }
799
800 /* Set target (no write same support) */
Hannes Reinecke36820562020-05-11 10:24:21 +0200801 ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata) << 9;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900802 ti->num_flush_bios = 1;
803 ti->num_discard_bios = 1;
804 ti->num_write_zeroes_bios = 1;
805 ti->per_io_data_size = sizeof(struct dmz_bioctx);
806 ti->flush_supported = true;
807 ti->discards_supported = true;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900808
809 /* The exposed capacity is the number of chunks that can be mapped */
Hannes Reinecke36820562020-05-11 10:24:21 +0200810 ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) <<
811 dmz_zone_nr_sectors_shift(dmz->metadata);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900812
813 /* Zone BIO */
Kent Overstreet6f1c8192018-05-20 18:25:53 -0400814 ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
815 if (ret) {
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900816 ti->error = "Create BIO set failed";
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900817 goto err_meta;
818 }
819
820 /* Chunk BIO work */
821 mutex_init(&dmz->chunk_lock);
Bart Van Assche2d0b2d62018-06-22 08:09:11 -0700822 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
Hannes Reinecke2234e732020-05-11 10:24:22 +0200823 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s",
824 WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
825 dmz_metadata_label(dmz->metadata));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900826 if (!dmz->chunk_wq) {
827 ti->error = "Create chunk workqueue failed";
828 ret = -ENOMEM;
829 goto err_bio;
830 }
831
832 /* Flush work */
833 spin_lock_init(&dmz->flush_lock);
834 bio_list_init(&dmz->flush_list);
835 INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
836 dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
Hannes Reinecke2234e732020-05-11 10:24:22 +0200837 dmz_metadata_label(dmz->metadata));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900838 if (!dmz->flush_wq) {
839 ti->error = "Create flush workqueue failed";
840 ret = -ENOMEM;
841 goto err_cwq;
842 }
843 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
844
845 /* Initialize reclaim */
Hannes Reinecke6c805f72020-05-11 10:24:24 +0200846 ret = dmz_ctr_reclaim(dmz->metadata, &dmz->reclaim);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900847 if (ret) {
848 ti->error = "Zone reclaim initialization failed";
849 goto err_fwq;
850 }
851
Hannes Reinecke2234e732020-05-11 10:24:22 +0200852 DMINFO("(%s): Target device: %llu 512-byte logical sectors (%llu blocks)",
853 dmz_metadata_label(dmz->metadata),
854 (unsigned long long)ti->len,
855 (unsigned long long)dmz_sect2blk(ti->len));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900856
857 return 0;
858err_fwq:
859 destroy_workqueue(dmz->flush_wq);
860err_cwq:
861 destroy_workqueue(dmz->chunk_wq);
862err_bio:
Mike Snitzerd5ffebd2018-01-05 21:17:20 -0500863 mutex_destroy(&dmz->chunk_lock);
Kent Overstreet6f1c8192018-05-20 18:25:53 -0400864 bioset_exit(&dmz->bio_set);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900865err_meta:
866 dmz_dtr_metadata(dmz->metadata);
867err_dev:
868 dmz_put_zoned_device(ti);
869err:
870 kfree(dmz);
871
872 return ret;
873}
874
875/*
876 * Cleanup target.
877 */
878static void dmz_dtr(struct dm_target *ti)
879{
880 struct dmz_target *dmz = ti->private;
881
882 flush_workqueue(dmz->chunk_wq);
883 destroy_workqueue(dmz->chunk_wq);
884
885 dmz_dtr_reclaim(dmz->reclaim);
886
887 cancel_delayed_work_sync(&dmz->flush_work);
888 destroy_workqueue(dmz->flush_wq);
889
890 (void) dmz_flush_metadata(dmz->metadata);
891
892 dmz_dtr_metadata(dmz->metadata);
893
Kent Overstreet6f1c8192018-05-20 18:25:53 -0400894 bioset_exit(&dmz->bio_set);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900895
896 dmz_put_zoned_device(ti);
897
Mike Snitzerd5ffebd2018-01-05 21:17:20 -0500898 mutex_destroy(&dmz->chunk_lock);
899
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900900 kfree(dmz);
901}
902
903/*
904 * Setup target request queue limits.
905 */
906static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
907{
908 struct dmz_target *dmz = ti->private;
Hannes Reinecke36820562020-05-11 10:24:21 +0200909 unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900910
911 limits->logical_block_size = DMZ_BLOCK_SIZE;
912 limits->physical_block_size = DMZ_BLOCK_SIZE;
913
914 blk_limits_io_min(limits, DMZ_BLOCK_SIZE);
915 blk_limits_io_opt(limits, DMZ_BLOCK_SIZE);
916
917 limits->discard_alignment = DMZ_BLOCK_SIZE;
918 limits->discard_granularity = DMZ_BLOCK_SIZE;
919 limits->max_discard_sectors = chunk_sectors;
920 limits->max_hw_discard_sectors = chunk_sectors;
921 limits->max_write_zeroes_sectors = chunk_sectors;
922
923 /* FS hint to try to align to the device zone size */
924 limits->chunk_sectors = chunk_sectors;
925 limits->max_sectors = chunk_sectors;
926
927 /* We are exposing a drive-managed zoned block device */
928 limits->zoned = BLK_ZONED_NONE;
929}
930
931/*
932 * Pass on ioctl to the backend device.
933 */
Mike Snitzer5bd5e8d2018-04-03 16:54:10 -0400934static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900935{
936 struct dmz_target *dmz = ti->private;
Hannes Reinecke52d67752020-05-11 10:24:25 +0200937 struct dmz_dev *dev = &dmz->dev[0];
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900938
Hannes Reinecke52d67752020-05-11 10:24:25 +0200939 if (!dmz_check_bdev(dev))
Dmitry Fomicheve7fad902019-11-06 14:34:35 -0800940 return -EIO;
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700941
Hannes Reinecke52d67752020-05-11 10:24:25 +0200942 *bdev = dev->bdev;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900943
944 return 0;
945}
946
947/*
948 * Stop works on suspend.
949 */
950static void dmz_suspend(struct dm_target *ti)
951{
952 struct dmz_target *dmz = ti->private;
953
954 flush_workqueue(dmz->chunk_wq);
955 dmz_suspend_reclaim(dmz->reclaim);
956 cancel_delayed_work_sync(&dmz->flush_work);
957}
958
959/*
960 * Restart works on resume or if suspend failed.
961 */
962static void dmz_resume(struct dm_target *ti)
963{
964 struct dmz_target *dmz = ti->private;
965
966 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
967 dmz_resume_reclaim(dmz->reclaim);
968}
969
970static int dmz_iterate_devices(struct dm_target *ti,
971 iterate_devices_callout_fn fn, void *data)
972{
973 struct dmz_target *dmz = ti->private;
Damien Le Moal114e02592017-10-28 16:39:34 +0900974 struct dmz_dev *dev = dmz->dev;
Hannes Reinecke36820562020-05-11 10:24:21 +0200975 sector_t capacity = dev->capacity & ~(dmz_zone_nr_sectors(dmz->metadata) - 1);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900976
Damien Le Moal114e02592017-10-28 16:39:34 +0900977 return fn(ti, dmz->ddev, 0, capacity, data);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900978}
979
Hannes Reineckebc3d5712020-05-11 10:24:16 +0200980static void dmz_status(struct dm_target *ti, status_type_t type,
981 unsigned int status_flags, char *result,
982 unsigned int maxlen)
983{
984 struct dmz_target *dmz = ti->private;
985 ssize_t sz = 0;
986 char buf[BDEVNAME_SIZE];
987
988 switch (type) {
989 case STATUSTYPE_INFO:
990 DMEMIT("%u zones %u/%u random %u/%u sequential",
991 dmz_nr_zones(dmz->metadata),
992 dmz_nr_unmap_rnd_zones(dmz->metadata),
993 dmz_nr_rnd_zones(dmz->metadata),
994 dmz_nr_unmap_seq_zones(dmz->metadata),
995 dmz_nr_seq_zones(dmz->metadata));
996 break;
997 case STATUSTYPE_TABLE:
998 format_dev_t(buf, dmz->dev->bdev->bd_dev);
999 DMEMIT("%s", buf);
1000 break;
1001 }
1002 return;
1003}
1004
Hannes Reinecke90b39d52020-05-11 10:24:17 +02001005static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
1006 char *result, unsigned int maxlen)
1007{
1008 struct dmz_target *dmz = ti->private;
1009 int r = -EINVAL;
1010
1011 if (!strcasecmp(argv[0], "reclaim")) {
1012 dmz_schedule_reclaim(dmz->reclaim);
1013 r = 0;
1014 } else
1015 DMERR("unrecognized message %s", argv[0]);
1016 return r;
1017}
1018
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001019static struct target_type dmz_type = {
1020 .name = "zoned",
Mike Snitzer636be422020-02-27 14:25:31 -05001021 .version = {1, 1, 0},
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001022 .features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
1023 .module = THIS_MODULE,
1024 .ctr = dmz_ctr,
1025 .dtr = dmz_dtr,
1026 .map = dmz_map,
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001027 .io_hints = dmz_io_hints,
1028 .prepare_ioctl = dmz_prepare_ioctl,
1029 .postsuspend = dmz_suspend,
1030 .resume = dmz_resume,
1031 .iterate_devices = dmz_iterate_devices,
Hannes Reineckebc3d5712020-05-11 10:24:16 +02001032 .status = dmz_status,
Hannes Reinecke90b39d52020-05-11 10:24:17 +02001033 .message = dmz_message,
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09001034};
1035
1036static int __init dmz_init(void)
1037{
1038 return dm_register_target(&dmz_type);
1039}
1040
1041static void __exit dmz_exit(void)
1042{
1043 dm_unregister_target(&dmz_type);
1044}
1045
1046module_init(dmz_init);
1047module_exit(dmz_exit);
1048
1049MODULE_DESCRIPTION(DM_NAME " target for zoned block devices");
1050MODULE_AUTHOR("Damien Le Moal <damien.lemoal@wdc.com>");
1051MODULE_LICENSE("GPL");