blob: b855fef4f38a6fee5c7fa3cd918a2fa0baa780af [file] [log] [blame]
Mike Snitzer4cc96132016-05-12 16:28:10 -04001/*
2 * Internal header file _only_ for device mapper core
3 *
4 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
5 *
6 * This file is released under the LGPL.
7 */
8
9#ifndef DM_CORE_INTERNAL_H
10#define DM_CORE_INTERNAL_H
11
12#include <linux/kthread.h>
13#include <linux/ktime.h>
Mike Snitzer33bd6f02020-09-19 13:09:11 -040014#include <linux/genhd.h>
Mike Snitzer4cc96132016-05-12 16:28:10 -040015#include <linux/blk-mq.h>
Eric Biggers1e8d44b2021-10-18 11:04:51 -070016#include <linux/blk-crypto-profile.h>
Mike Snitzer4cc96132016-05-12 16:28:10 -040017
18#include <trace/events/block.h>
19
20#include "dm.h"
Tushar Sugandhi91ccbba2021-07-12 17:48:58 -070021#include "dm-ima.h"
Mike Snitzer4cc96132016-05-12 16:28:10 -040022
23#define DM_RESERVED_MAX_IOS 1024
24
25struct dm_kobject_holder {
26 struct kobject kobj;
27 struct completion completion;
28};
29
30/*
Mike Snitzer33bd6f02020-09-19 13:09:11 -040031 * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
32 * DM targets must _not_ deference a mapped_device or dm_table to directly
33 * access their members!
Mike Snitzer4cc96132016-05-12 16:28:10 -040034 */
Mike Snitzer33bd6f02020-09-19 13:09:11 -040035
Mike Snitzer4cc96132016-05-12 16:28:10 -040036struct mapped_device {
Mike Snitzer4cc96132016-05-12 16:28:10 -040037 struct mutex suspend_lock;
38
Mike Snitzer72d711c2018-05-22 18:26:20 -040039 struct mutex table_devices_lock;
40 struct list_head table_devices;
41
Mike Snitzer4cc96132016-05-12 16:28:10 -040042 /*
43 * The current mapping (struct dm_table *).
44 * Use dm_get_live_table{_fast} or take suspend_lock for
45 * dereference.
46 */
47 void __rcu *map;
48
Mike Snitzer4cc96132016-05-12 16:28:10 -040049 unsigned long flags;
50
Mike Snitzer4cc96132016-05-12 16:28:10 -040051 /* Protect queue and type against concurrent access. */
52 struct mutex type_lock;
Mike Snitzer72d711c2018-05-22 18:26:20 -040053 enum dm_queue_mode type;
54
55 int numa_node_id;
56 struct request_queue *queue;
Mike Snitzer4cc96132016-05-12 16:28:10 -040057
58 atomic_t holders;
59 atomic_t open_count;
60
61 struct dm_target *immutable_target;
62 struct target_type *immutable_target_type;
63
Mike Snitzer72d711c2018-05-22 18:26:20 -040064 char name[16];
Mike Snitzer4cc96132016-05-12 16:28:10 -040065 struct gendisk *disk;
Dan Williamsf26c5712017-04-12 12:35:44 -070066 struct dax_device *dax_dev;
Mike Snitzer4cc96132016-05-12 16:28:10 -040067
68 /*
69 * A list of ios that arrived while we were suspended.
70 */
Mike Snitzer4cc96132016-05-12 16:28:10 -040071 struct work_struct work;
Mike Snitzer72d711c2018-05-22 18:26:20 -040072 wait_queue_head_t wait;
Mike Snitzer4cc96132016-05-12 16:28:10 -040073 spinlock_t deferred_lock;
74 struct bio_list deferred;
75
Mike Snitzer72d711c2018-05-22 18:26:20 -040076 void *interface_ptr;
77
Mike Snitzer4cc96132016-05-12 16:28:10 -040078 /*
79 * Event handling.
80 */
81 wait_queue_head_t eventq;
82 atomic_t event_nr;
83 atomic_t uevent_seq;
84 struct list_head uevent_list;
85 spinlock_t uevent_lock; /* Protect access to uevent_list */
86
87 /* the number of internal suspends */
88 unsigned internal_suspend_count;
89
90 /*
Mike Snitzer4cc96132016-05-12 16:28:10 -040091 * io objects are allocated from here.
92 */
Kent Overstreet6f1c8192018-05-20 18:25:53 -040093 struct bio_set io_bs;
94 struct bio_set bs;
Mike Snitzer4cc96132016-05-12 16:28:10 -040095
96 /*
Mike Snitzer72d711c2018-05-22 18:26:20 -040097 * Processing queue (flush)
98 */
99 struct workqueue_struct *wq;
100
Mike Snitzer4cc96132016-05-12 16:28:10 -0400101 /* forced geometry settings */
102 struct hd_geometry geometry;
103
Mike Snitzer4cc96132016-05-12 16:28:10 -0400104 /* kobject and completion */
105 struct dm_kobject_holder kobj_holder;
106
Mikulas Patockaa666e5c2021-02-10 15:26:23 -0500107 int swap_bios;
108 struct semaphore swap_bios_semaphore;
109 struct mutex swap_bios_lock;
110
Mike Snitzer4cc96132016-05-12 16:28:10 -0400111 struct dm_stats stats;
112
Mike Snitzer4cc96132016-05-12 16:28:10 -0400113 /* for blk-mq request-based DM support */
114 struct blk_mq_tag_set *tag_set;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400115 bool init_tio_pdu:1;
Mikulas Patocka856eb092017-10-31 19:33:02 -0400116
117 struct srcu_struct io_barrier;
Damien Le Moalbb37d772021-05-26 06:25:00 +0900118
119#ifdef CONFIG_BLK_DEV_ZONED
120 unsigned int nr_zones;
121 unsigned int *zwp_offset;
122#endif
Tushar Sugandhi91ccbba2021-07-12 17:48:58 -0700123
124#ifdef CONFIG_IMA
125 struct dm_ima_measurements ima;
126#endif
Mike Snitzer4cc96132016-05-12 16:28:10 -0400127};
128
Damien Le Moale2118b32021-05-26 06:24:59 +0900129/*
130 * Bits for the flags field of struct mapped_device.
131 */
132#define DMF_BLOCK_IO_FOR_SUSPEND 0
133#define DMF_SUSPENDED 1
134#define DMF_FROZEN 2
135#define DMF_FREEING 3
136#define DMF_DELETING 4
137#define DMF_NOFLUSH_SUSPENDING 5
138#define DMF_DEFERRED_REMOVE 6
139#define DMF_SUSPENDED_INTERNALLY 7
140#define DMF_POST_SUSPENDING 8
Damien Le Moalbb37d772021-05-26 06:25:00 +0900141#define DMF_EMULATE_ZONE_APPEND 9
Damien Le Moale2118b32021-05-26 06:24:59 +0900142
Mike Snitzerbcb44432019-04-03 12:23:11 -0400143void disable_discard(struct mapped_device *md);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400144void disable_write_same(struct mapped_device *md);
Christoph Hellwigac62d622017-04-05 19:21:05 +0200145void disable_write_zeroes(struct mapped_device *md);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400146
Mike Snitzer33bd6f02020-09-19 13:09:11 -0400147static inline sector_t dm_get_size(struct mapped_device *md)
148{
149 return get_capacity(md->disk);
150}
151
152static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
153{
154 return &md->stats;
155}
156
Damien Le Moalbb37d772021-05-26 06:25:00 +0900157static inline bool dm_emulate_zone_append(struct mapped_device *md)
158{
159 if (blk_queue_is_zoned(md->queue))
160 return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
161 return false;
162}
163
Mike Snitzer33bd6f02020-09-19 13:09:11 -0400164#define DM_TABLE_MAX_DEPTH 16
165
166struct dm_table {
167 struct mapped_device *md;
168 enum dm_queue_mode type;
169
170 /* btree table */
171 unsigned int depth;
172 unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
173 sector_t *index[DM_TABLE_MAX_DEPTH];
174
175 unsigned int num_targets;
176 unsigned int num_allocated;
177 sector_t *highs;
178 struct dm_target *targets;
179
180 struct target_type *immutable_target_type;
181
182 bool integrity_supported:1;
183 bool singleton:1;
184 unsigned integrity_added:1;
185
186 /*
187 * Indicates the rw permissions for the new logical
188 * device. This should be a combination of FMODE_READ
189 * and FMODE_WRITE.
190 */
191 fmode_t mode;
192
193 /* a list of devices used by this table */
194 struct list_head devices;
195
196 /* events get handed up using this callback */
197 void (*event_fn)(void *);
198 void *event_context;
199
200 struct dm_md_mempools *mempools;
Satya Tangiralaaa6ce872021-02-01 05:10:17 +0000201
202#ifdef CONFIG_BLK_INLINE_ENCRYPTION
Eric Biggerscb77cb52021-10-18 11:04:52 -0700203 struct blk_crypto_profile *crypto_profile;
Satya Tangiralaaa6ce872021-02-01 05:10:17 +0000204#endif
Mike Snitzer33bd6f02020-09-19 13:09:11 -0400205};
206
Damien Le Moale2118b32021-05-26 06:24:59 +0900207/*
208 * One of these is allocated per clone bio.
209 */
210#define DM_TIO_MAGIC 7282014
211struct dm_target_io {
212 unsigned int magic;
213 struct dm_io *io;
214 struct dm_target *ti;
215 unsigned int target_bio_nr;
216 unsigned int *len_ptr;
217 bool inside_dm_io;
218 struct bio clone;
219};
220
221/*
222 * One of these is allocated per original bio.
223 * It contains the first clone used for that original.
224 */
225#define DM_IO_MAGIC 5191977
226struct dm_io {
227 unsigned int magic;
228 struct mapped_device *md;
229 blk_status_t status;
230 atomic_t io_count;
231 struct bio *orig_bio;
232 unsigned long start_time;
233 spinlock_t endio_lock;
234 struct dm_stats_aux stats_aux;
235 /* last member of dm_target_io is 'struct bio' */
236 struct dm_target_io tio;
237};
238
239static inline void dm_io_inc_pending(struct dm_io *io)
240{
241 atomic_inc(&io->io_count);
242}
243
244void dm_io_dec_pending(struct dm_io *io, blk_status_t error);
245
Mike Snitzer4cc96132016-05-12 16:28:10 -0400246static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
247{
248 return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
249}
250
251unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
252
253static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
254{
255 return !maxlen || strlen(result) + 1 >= maxlen;
256}
257
Mikulas Patocka93e64422017-01-16 16:05:59 -0500258extern atomic_t dm_global_event_nr;
259extern wait_queue_head_t dm_global_eventq;
Mikulas Patocka62e08242017-09-20 07:29:49 -0400260void dm_issue_global_event(void);
Mikulas Patocka93e64422017-01-16 16:05:59 -0500261
Mike Snitzer4cc96132016-05-12 16:28:10 -0400262#endif