Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Internal header file _only_ for device mapper core |
| 3 | * |
| 4 | * Copyright (C) 2016 Red Hat, Inc. All rights reserved. |
| 5 | * |
| 6 | * This file is released under the LGPL. |
| 7 | */ |
| 8 | |
| 9 | #ifndef DM_CORE_INTERNAL_H |
| 10 | #define DM_CORE_INTERNAL_H |
| 11 | |
| 12 | #include <linux/kthread.h> |
| 13 | #include <linux/ktime.h> |
Mike Snitzer | 33bd6f0 | 2020-09-19 13:09:11 -0400 | [diff] [blame] | 14 | #include <linux/genhd.h> |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 15 | #include <linux/blk-mq.h> |
| 16 | |
| 17 | #include <trace/events/block.h> |
| 18 | |
| 19 | #include "dm.h" |
| 20 | |
| 21 | #define DM_RESERVED_MAX_IOS 1024 |
| 22 | |
| 23 | struct dm_kobject_holder { |
| 24 | struct kobject kobj; |
| 25 | struct completion completion; |
| 26 | }; |
| 27 | |
| 28 | /* |
Mike Snitzer | 33bd6f0 | 2020-09-19 13:09:11 -0400 | [diff] [blame] | 29 | * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c. |
| 30 | * DM targets must _not_ deference a mapped_device or dm_table to directly |
| 31 | * access their members! |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 32 | */ |
Mike Snitzer | 33bd6f0 | 2020-09-19 13:09:11 -0400 | [diff] [blame] | 33 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 34 | struct mapped_device { |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 35 | struct mutex suspend_lock; |
| 36 | |
Mike Snitzer | 72d711c | 2018-05-22 18:26:20 -0400 | [diff] [blame] | 37 | struct mutex table_devices_lock; |
| 38 | struct list_head table_devices; |
| 39 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 40 | /* |
| 41 | * The current mapping (struct dm_table *). |
| 42 | * Use dm_get_live_table{_fast} or take suspend_lock for |
| 43 | * dereference. |
| 44 | */ |
| 45 | void __rcu *map; |
| 46 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 47 | unsigned long flags; |
| 48 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 49 | /* Protect queue and type against concurrent access. */ |
| 50 | struct mutex type_lock; |
Mike Snitzer | 72d711c | 2018-05-22 18:26:20 -0400 | [diff] [blame] | 51 | enum dm_queue_mode type; |
| 52 | |
| 53 | int numa_node_id; |
| 54 | struct request_queue *queue; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 55 | |
| 56 | atomic_t holders; |
| 57 | atomic_t open_count; |
| 58 | |
| 59 | struct dm_target *immutable_target; |
| 60 | struct target_type *immutable_target_type; |
| 61 | |
Mike Snitzer | 72d711c | 2018-05-22 18:26:20 -0400 | [diff] [blame] | 62 | char name[16]; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 63 | struct gendisk *disk; |
Dan Williams | f26c571 | 2017-04-12 12:35:44 -0700 | [diff] [blame] | 64 | struct dax_device *dax_dev; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 65 | |
| 66 | /* |
| 67 | * A list of ios that arrived while we were suspended. |
| 68 | */ |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 69 | struct work_struct work; |
Mike Snitzer | 72d711c | 2018-05-22 18:26:20 -0400 | [diff] [blame] | 70 | wait_queue_head_t wait; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 71 | spinlock_t deferred_lock; |
| 72 | struct bio_list deferred; |
| 73 | |
Mike Snitzer | 72d711c | 2018-05-22 18:26:20 -0400 | [diff] [blame] | 74 | void *interface_ptr; |
| 75 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 76 | /* |
| 77 | * Event handling. |
| 78 | */ |
| 79 | wait_queue_head_t eventq; |
| 80 | atomic_t event_nr; |
| 81 | atomic_t uevent_seq; |
| 82 | struct list_head uevent_list; |
| 83 | spinlock_t uevent_lock; /* Protect access to uevent_list */ |
| 84 | |
| 85 | /* the number of internal suspends */ |
| 86 | unsigned internal_suspend_count; |
| 87 | |
| 88 | /* |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 89 | * io objects are allocated from here. |
| 90 | */ |
Kent Overstreet | 6f1c819 | 2018-05-20 18:25:53 -0400 | [diff] [blame] | 91 | struct bio_set io_bs; |
| 92 | struct bio_set bs; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 93 | |
| 94 | /* |
Mike Snitzer | 72d711c | 2018-05-22 18:26:20 -0400 | [diff] [blame] | 95 | * Processing queue (flush) |
| 96 | */ |
| 97 | struct workqueue_struct *wq; |
| 98 | |
| 99 | /* |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 100 | * freeze/thaw support require holding onto a super block |
| 101 | */ |
| 102 | struct super_block *frozen_sb; |
| 103 | |
| 104 | /* forced geometry settings */ |
| 105 | struct hd_geometry geometry; |
| 106 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 107 | /* kobject and completion */ |
| 108 | struct dm_kobject_holder kobj_holder; |
| 109 | |
Mike Snitzer | 72d711c | 2018-05-22 18:26:20 -0400 | [diff] [blame] | 110 | struct block_device *bdev; |
| 111 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 112 | struct dm_stats stats; |
| 113 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 114 | /* for blk-mq request-based DM support */ |
| 115 | struct blk_mq_tag_set *tag_set; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 116 | bool init_tio_pdu:1; |
Mikulas Patocka | 856eb09 | 2017-10-31 19:33:02 -0400 | [diff] [blame] | 117 | |
| 118 | struct srcu_struct io_barrier; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 119 | }; |
| 120 | |
Mike Snitzer | bcb4443 | 2019-04-03 12:23:11 -0400 | [diff] [blame] | 121 | void disable_discard(struct mapped_device *md); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 122 | void disable_write_same(struct mapped_device *md); |
Christoph Hellwig | ac62d62 | 2017-04-05 19:21:05 +0200 | [diff] [blame] | 123 | void disable_write_zeroes(struct mapped_device *md); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 124 | |
Mike Snitzer | 33bd6f0 | 2020-09-19 13:09:11 -0400 | [diff] [blame] | 125 | static inline sector_t dm_get_size(struct mapped_device *md) |
| 126 | { |
| 127 | return get_capacity(md->disk); |
| 128 | } |
| 129 | |
| 130 | static inline struct dm_stats *dm_get_stats(struct mapped_device *md) |
| 131 | { |
| 132 | return &md->stats; |
| 133 | } |
| 134 | |
| 135 | #define DM_TABLE_MAX_DEPTH 16 |
| 136 | |
| 137 | struct dm_table { |
| 138 | struct mapped_device *md; |
| 139 | enum dm_queue_mode type; |
| 140 | |
| 141 | /* btree table */ |
| 142 | unsigned int depth; |
| 143 | unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */ |
| 144 | sector_t *index[DM_TABLE_MAX_DEPTH]; |
| 145 | |
| 146 | unsigned int num_targets; |
| 147 | unsigned int num_allocated; |
| 148 | sector_t *highs; |
| 149 | struct dm_target *targets; |
| 150 | |
| 151 | struct target_type *immutable_target_type; |
| 152 | |
| 153 | bool integrity_supported:1; |
| 154 | bool singleton:1; |
| 155 | unsigned integrity_added:1; |
| 156 | |
| 157 | /* |
| 158 | * Indicates the rw permissions for the new logical |
| 159 | * device. This should be a combination of FMODE_READ |
| 160 | * and FMODE_WRITE. |
| 161 | */ |
| 162 | fmode_t mode; |
| 163 | |
| 164 | /* a list of devices used by this table */ |
| 165 | struct list_head devices; |
| 166 | |
| 167 | /* events get handed up using this callback */ |
| 168 | void (*event_fn)(void *); |
| 169 | void *event_context; |
| 170 | |
| 171 | struct dm_md_mempools *mempools; |
| 172 | }; |
| 173 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 174 | static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) |
| 175 | { |
| 176 | return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; |
| 177 | } |
| 178 | |
| 179 | unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max); |
| 180 | |
| 181 | static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) |
| 182 | { |
| 183 | return !maxlen || strlen(result) + 1 >= maxlen; |
| 184 | } |
| 185 | |
Mikulas Patocka | 93e6442 | 2017-01-16 16:05:59 -0500 | [diff] [blame] | 186 | extern atomic_t dm_global_event_nr; |
| 187 | extern wait_queue_head_t dm_global_eventq; |
Mikulas Patocka | 62e0824 | 2017-09-20 07:29:49 -0400 | [diff] [blame] | 188 | void dm_issue_global_event(void); |
Mikulas Patocka | 93e6442 | 2017-01-16 16:05:59 -0500 | [diff] [blame] | 189 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 190 | #endif |