blob: f2a760f62db54e9d444658b9362ba7608d14c51f [file] [log] [blame]
Dmitry Fomichevbae9a0a2019-08-02 15:02:50 -07001/* SPDX-License-Identifier: GPL-2.0 */
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09002/*
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 *
5 * This file is released under the GPL.
6 */
7
8#ifndef DM_ZONED_H
9#define DM_ZONED_H
10
11#include <linux/types.h>
12#include <linux/blkdev.h>
13#include <linux/device-mapper.h>
14#include <linux/dm-kcopyd.h>
15#include <linux/list.h>
16#include <linux/spinlock.h>
17#include <linux/mutex.h>
18#include <linux/workqueue.h>
19#include <linux/rwsem.h>
20#include <linux/rbtree.h>
21#include <linux/radix-tree.h>
22#include <linux/shrinker.h>
23
24/*
25 * dm-zoned creates block devices with 4KB blocks, always.
26 */
27#define DMZ_BLOCK_SHIFT 12
28#define DMZ_BLOCK_SIZE (1 << DMZ_BLOCK_SHIFT)
29#define DMZ_BLOCK_MASK (DMZ_BLOCK_SIZE - 1)
30
31#define DMZ_BLOCK_SHIFT_BITS (DMZ_BLOCK_SHIFT + 3)
32#define DMZ_BLOCK_SIZE_BITS (1 << DMZ_BLOCK_SHIFT_BITS)
33#define DMZ_BLOCK_MASK_BITS (DMZ_BLOCK_SIZE_BITS - 1)
34
35#define DMZ_BLOCK_SECTORS_SHIFT (DMZ_BLOCK_SHIFT - SECTOR_SHIFT)
36#define DMZ_BLOCK_SECTORS (DMZ_BLOCK_SIZE >> SECTOR_SHIFT)
37#define DMZ_BLOCK_SECTORS_MASK (DMZ_BLOCK_SECTORS - 1)
38
39/*
40 * 4KB block <-> 512B sector conversion.
41 */
42#define dmz_blk2sect(b) ((sector_t)(b) << DMZ_BLOCK_SECTORS_SHIFT)
43#define dmz_sect2blk(s) ((sector_t)(s) >> DMZ_BLOCK_SECTORS_SHIFT)
44
45#define dmz_bio_block(bio) dmz_sect2blk((bio)->bi_iter.bi_sector)
46#define dmz_bio_blocks(bio) dmz_sect2blk(bio_sectors(bio))
47
Hannes Reinecke18979812020-06-02 13:09:49 +020048struct dmz_metadata;
49struct dmz_reclaim;
50
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090051/*
52 * Zoned block device information.
53 */
54struct dmz_dev {
55 struct block_device *bdev;
Hannes Reinecke18979812020-06-02 13:09:49 +020056 struct dmz_metadata *metadata;
Hannes Reineckef97809a2020-06-02 13:09:50 +020057 struct dmz_reclaim *reclaim;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090058
59 char name[BDEVNAME_SIZE];
Hannes Reineckebd5c4032020-05-11 10:24:30 +020060 uuid_t uuid;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090061
62 sector_t capacity;
63
64 unsigned int nr_zones;
Hannes Reineckebd5c4032020-05-11 10:24:30 +020065 unsigned int zone_offset;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090066
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -070067 unsigned int flags;
68
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090069 sector_t zone_nr_sectors;
Hannes Reineckebd82fda2020-06-02 13:09:51 +020070
71 unsigned int nr_rnd;
72 atomic_t unmap_nr_rnd;
73 struct list_head unmap_rnd_list;
74 struct list_head map_rnd_list;
75
76 unsigned int nr_seq;
77 atomic_t unmap_nr_seq;
78 struct list_head unmap_seq_list;
79 struct list_head map_seq_list;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090080};
81
Hannes Reinecke36820562020-05-11 10:24:21 +020082#define dmz_bio_chunk(zmd, bio) ((bio)->bi_iter.bi_sector >> \
83 dmz_zone_nr_sectors_shift(zmd))
84#define dmz_chunk_block(zmd, b) ((b) & (dmz_zone_nr_blocks(zmd) - 1))
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090085
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -070086/* Device flags. */
87#define DMZ_BDEV_DYING (1 << 0)
Dmitry Fomicheve7fad902019-11-06 14:34:35 -080088#define DMZ_CHECK_BDEV (2 << 0)
Hannes Reineckebd5c4032020-05-11 10:24:30 +020089#define DMZ_BDEV_REGULAR (4 << 0)
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -070090
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090091/*
92 * Zone descriptor.
93 */
94struct dm_zone {
95 /* For listing the zone depending on its state */
96 struct list_head link;
97
Hannes Reinecke8f222722020-06-02 13:09:48 +020098 /* Device containing this zone */
99 struct dmz_dev *dev;
100
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900101 /* Zone type and state */
102 unsigned long flags;
103
104 /* Zone activation reference count */
105 atomic_t refcount;
106
Hannes Reineckeb7122872020-05-11 10:24:18 +0200107 /* Zone id */
108 unsigned int id;
109
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900110 /* Zone write pointer block (relative to the zone start block) */
111 unsigned int wp_block;
112
113 /* Zone weight (number of valid blocks in the zone) */
114 unsigned int weight;
115
116 /* The chunk that the zone maps */
117 unsigned int chunk;
118
119 /*
120 * For a sequential data zone, pointer to the random zone
121 * used as a buffer for processing unaligned writes.
122 * For a buffer zone, this points back to the data zone.
123 */
124 struct dm_zone *bzone;
125};
126
127/*
128 * Zone flags.
129 */
130enum {
131 /* Zone write type */
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200132 DMZ_CACHE,
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900133 DMZ_RND,
134 DMZ_SEQ,
135
136 /* Zone critical condition */
137 DMZ_OFFLINE,
138 DMZ_READ_ONLY,
139
140 /* How the zone is being used */
141 DMZ_META,
142 DMZ_DATA,
143 DMZ_BUF,
Hannes Reineckeaec67b42020-06-02 13:09:45 +0200144 DMZ_RESERVED,
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900145
146 /* Zone internal state */
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900147 DMZ_RECLAIM,
148 DMZ_SEQ_WRITE_ERR,
Hannes Reineckea16b7de2020-05-19 10:14:23 +0200149 DMZ_RECLAIM_TERMINATE,
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900150};
151
152/*
153 * Zone data accessors.
154 */
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200155#define dmz_is_cache(z) test_bit(DMZ_CACHE, &(z)->flags)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900156#define dmz_is_rnd(z) test_bit(DMZ_RND, &(z)->flags)
157#define dmz_is_seq(z) test_bit(DMZ_SEQ, &(z)->flags)
158#define dmz_is_empty(z) ((z)->wp_block == 0)
159#define dmz_is_offline(z) test_bit(DMZ_OFFLINE, &(z)->flags)
160#define dmz_is_readonly(z) test_bit(DMZ_READ_ONLY, &(z)->flags)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900161#define dmz_in_reclaim(z) test_bit(DMZ_RECLAIM, &(z)->flags)
Hannes Reineckeaec67b42020-06-02 13:09:45 +0200162#define dmz_is_reserved(z) test_bit(DMZ_RESERVED, &(z)->flags)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900163#define dmz_seq_write_err(z) test_bit(DMZ_SEQ_WRITE_ERR, &(z)->flags)
Hannes Reineckea16b7de2020-05-19 10:14:23 +0200164#define dmz_reclaim_should_terminate(z) \
165 test_bit(DMZ_RECLAIM_TERMINATE, &(z)->flags)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900166
167#define dmz_is_meta(z) test_bit(DMZ_META, &(z)->flags)
168#define dmz_is_buf(z) test_bit(DMZ_BUF, &(z)->flags)
169#define dmz_is_data(z) test_bit(DMZ_DATA, &(z)->flags)
170
171#define dmz_weight(z) ((z)->weight)
172
173/*
174 * Message functions.
175 */
176#define dmz_dev_info(dev, format, args...) \
177 DMINFO("(%s): " format, (dev)->name, ## args)
178
179#define dmz_dev_err(dev, format, args...) \
180 DMERR("(%s): " format, (dev)->name, ## args)
181
182#define dmz_dev_warn(dev, format, args...) \
183 DMWARN("(%s): " format, (dev)->name, ## args)
184
185#define dmz_dev_debug(dev, format, args...) \
186 DMDEBUG("(%s): " format, (dev)->name, ## args)
187
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900188/*
189 * Functions defined in dm-zoned-metadata.c
190 */
Hannes Reineckebd5c4032020-05-11 10:24:30 +0200191int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
192 struct dmz_metadata **zmd, const char *devname);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900193void dmz_dtr_metadata(struct dmz_metadata *zmd);
194int dmz_resume_metadata(struct dmz_metadata *zmd);
195
196void dmz_lock_map(struct dmz_metadata *zmd);
197void dmz_unlock_map(struct dmz_metadata *zmd);
198void dmz_lock_metadata(struct dmz_metadata *zmd);
199void dmz_unlock_metadata(struct dmz_metadata *zmd);
200void dmz_lock_flush(struct dmz_metadata *zmd);
201void dmz_unlock_flush(struct dmz_metadata *zmd);
202int dmz_flush_metadata(struct dmz_metadata *zmd);
Hannes Reinecke2234e732020-05-11 10:24:22 +0200203const char *dmz_metadata_label(struct dmz_metadata *zmd);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900204
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900205sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone);
206sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone);
207unsigned int dmz_nr_chunks(struct dmz_metadata *zmd);
208
Hannes Reinecked0e21ce2020-05-11 10:24:23 +0200209bool dmz_check_dev(struct dmz_metadata *zmd);
210bool dmz_dev_is_dying(struct dmz_metadata *zmd);
211
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900212#define DMZ_ALLOC_RND 0x01
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200213#define DMZ_ALLOC_CACHE 0x02
214#define DMZ_ALLOC_SEQ 0x04
215#define DMZ_ALLOC_RECLAIM 0x10
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900216
217struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags);
218void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
219
220void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
221 unsigned int chunk);
222void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
Hannes Reineckebc3d5712020-05-11 10:24:16 +0200223unsigned int dmz_nr_zones(struct dmz_metadata *zmd);
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200224unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd);
225unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata *zmd);
Hannes Reineckebd82fda2020-06-02 13:09:51 +0200226unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd, int idx);
227unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd, int idx);
228unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd, int idx);
229unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd, int idx);
Hannes Reinecke36820562020-05-11 10:24:21 +0200230unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd);
231unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd);
232unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd);
233unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900234
Damien Le Moal3b8cafd2019-07-16 14:39:34 +0900235/*
236 * Activate a zone (increment its reference count).
237 */
238static inline void dmz_activate_zone(struct dm_zone *zone)
239{
240 atomic_inc(&zone->refcount);
241}
242
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900243int dmz_lock_zone_reclaim(struct dm_zone *zone);
244void dmz_unlock_zone_reclaim(struct dm_zone *zone);
Hannes Reinecke90a9b862020-05-19 10:14:21 +0200245struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd, bool idle);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900246
247struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd,
248 unsigned int chunk, int op);
249void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *zone);
250struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
251 struct dm_zone *dzone);
252
253int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
254 sector_t chunk_block, unsigned int nr_blocks);
255int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
256 sector_t chunk_block, unsigned int nr_blocks);
257int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
258 sector_t chunk_block);
259int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
260 sector_t *chunk_block);
261int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
262 struct dm_zone *to_zone);
263int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
264 struct dm_zone *to_zone, sector_t chunk_block);
265
266/*
267 * Functions defined in dm-zoned-reclaim.c
268 */
Hannes Reineckef97809a2020-06-02 13:09:50 +0200269int dmz_ctr_reclaim(struct dmz_metadata *zmd, struct dmz_reclaim **zrc, int idx);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900270void dmz_dtr_reclaim(struct dmz_reclaim *zrc);
271void dmz_suspend_reclaim(struct dmz_reclaim *zrc);
272void dmz_resume_reclaim(struct dmz_reclaim *zrc);
273void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
274void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
275
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700276/*
277 * Functions defined in dm-zoned-target.c
278 */
279bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
Dmitry Fomicheve7fad902019-11-06 14:34:35 -0800280bool dmz_check_bdev(struct dmz_dev *dmz_dev);
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700281
Hannes Reineckef97809a2020-06-02 13:09:50 +0200282/*
283 * Deactivate a zone. This decrement the zone reference counter
284 * indicating that all BIOs to the zone have completed when the count is 0.
285 */
286static inline void dmz_deactivate_zone(struct dm_zone *zone)
287{
288 dmz_reclaim_bio_acc(zone->dev->reclaim);
289 atomic_dec(&zone->refcount);
290}
291
292/*
293 * Test if a zone is active, that is, has a refcount > 0.
294 */
295static inline bool dmz_is_active(struct dm_zone *zone)
296{
297 return atomic_read(&zone->refcount);
298}
299
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900300#endif /* DM_ZONED_H */