Thomas Gleixner | 5b497af | 2019-05-29 07:18:09 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Dan Williams | b94d523 | 2015-05-19 22:54:31 -0400 | [diff] [blame] | 2 | /* |
| 3 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. |
Dan Williams | b94d523 | 2015-05-19 22:54:31 -0400 | [diff] [blame] | 4 | */ |
| 5 | #ifndef __ND_CORE_H__ |
| 6 | #define __ND_CORE_H__ |
| 7 | #include <linux/libnvdimm.h> |
| 8 | #include <linux/device.h> |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 9 | #include <linux/sizes.h> |
| 10 | #include <linux/mutex.h> |
Dan Williams | 1b40e09 | 2015-05-01 13:34:01 -0400 | [diff] [blame] | 11 | #include <linux/nd.h> |
Dan Williams | 87a30e1 | 2019-07-17 18:08:26 -0700 | [diff] [blame] | 12 | #include "nd.h" |
Dan Williams | b94d523 | 2015-05-19 22:54:31 -0400 | [diff] [blame] | 13 | |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 14 | extern struct list_head nvdimm_bus_list; |
| 15 | extern struct mutex nvdimm_bus_list_mutex; |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 16 | extern int nvdimm_major; |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 17 | extern struct workqueue_struct *nvdimm_wq; |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 18 | |
Dan Williams | b94d523 | 2015-05-19 22:54:31 -0400 | [diff] [blame] | 19 | struct nvdimm_bus { |
| 20 | struct nvdimm_bus_descriptor *nd_desc; |
Dan Williams | b70d31d | 2019-07-17 18:08:15 -0700 | [diff] [blame] | 21 | wait_queue_head_t wait; |
Dan Williams | 45def22 | 2015-04-26 19:26:48 -0400 | [diff] [blame] | 22 | struct list_head list; |
Dan Williams | b94d523 | 2015-05-19 22:54:31 -0400 | [diff] [blame] | 23 | struct device dev; |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 24 | int id, probe_active; |
Dan Williams | b70d31d | 2019-07-17 18:08:15 -0700 | [diff] [blame] | 25 | atomic_t ioctl_active; |
Dan Williams | 29b9aa0 | 2016-06-06 17:42:38 -0700 | [diff] [blame] | 26 | struct list_head mapping_list; |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 27 | struct mutex reconfig_mutex; |
Dave Jiang | aa9ad44 | 2017-08-23 12:48:26 -0700 | [diff] [blame] | 28 | struct badrange badrange; |
Dan Williams | b94d523 | 2015-05-19 22:54:31 -0400 | [diff] [blame] | 29 | }; |
Dan Williams | 45def22 | 2015-04-26 19:26:48 -0400 | [diff] [blame] | 30 | |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 31 | struct nvdimm { |
| 32 | unsigned long flags; |
| 33 | void *provider_data; |
Dan Williams | e3654ec | 2016-04-28 16:17:07 -0700 | [diff] [blame] | 34 | unsigned long cmd_mask; |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 35 | struct device dev; |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 36 | atomic_t busy; |
Dan Williams | e5ae3b2 | 2016-06-07 17:00:04 -0700 | [diff] [blame] | 37 | int id, num_flush; |
| 38 | struct resource *flush_wpq; |
Dave Jiang | d6548ae | 2018-12-04 10:31:20 -0800 | [diff] [blame] | 39 | const char *dimm_id; |
Dave Jiang | f298939 | 2018-12-05 23:39:29 -0800 | [diff] [blame] | 40 | struct { |
| 41 | const struct nvdimm_security_ops *ops; |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 42 | unsigned long flags; |
| 43 | unsigned long ext_flags; |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 44 | unsigned int overwrite_tmo; |
| 45 | struct kernfs_node *overwrite_state; |
Dave Jiang | f298939 | 2018-12-05 23:39:29 -0800 | [diff] [blame] | 46 | } sec; |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 47 | struct delayed_work dwork; |
Dan Williams | 48001ea | 2020-07-20 15:08:18 -0700 | [diff] [blame] | 48 | const struct nvdimm_fw_ops *fw_ops; |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 49 | }; |
| 50 | |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 51 | static inline unsigned long nvdimm_security_flags( |
Dave Jiang | faa8bd6 | 2019-01-15 11:26:21 -0700 | [diff] [blame] | 52 | struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype) |
Dave Jiang | f298939 | 2018-12-05 23:39:29 -0800 | [diff] [blame] | 53 | { |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 54 | u64 flags; |
| 55 | const u64 state_flags = 1UL << NVDIMM_SECURITY_DISABLED |
| 56 | | 1UL << NVDIMM_SECURITY_LOCKED |
| 57 | | 1UL << NVDIMM_SECURITY_UNLOCKED |
| 58 | | 1UL << NVDIMM_SECURITY_OVERWRITE; |
Dave Jiang | f298939 | 2018-12-05 23:39:29 -0800 | [diff] [blame] | 59 | |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 60 | if (!nvdimm->sec.ops) |
| 61 | return 0; |
| 62 | |
| 63 | flags = nvdimm->sec.ops->get_flags(nvdimm, ptype); |
| 64 | /* disabled, locked, unlocked, and overwrite are mutually exclusive */ |
| 65 | dev_WARN_ONCE(&nvdimm->dev, hweight64(flags & state_flags) > 1, |
| 66 | "reported invalid security state: %#llx\n", |
| 67 | (unsigned long long) flags); |
| 68 | return flags; |
Dave Jiang | f298939 | 2018-12-05 23:39:29 -0800 | [diff] [blame] | 69 | } |
Dave Jiang | 37833fb | 2018-12-06 09:14:08 -0800 | [diff] [blame] | 70 | int nvdimm_security_freeze(struct nvdimm *nvdimm); |
Dave Jiang | 03b65b2 | 2018-12-07 10:33:30 -0700 | [diff] [blame] | 71 | #if IS_ENABLED(CONFIG_NVDIMM_KEYS) |
Dan Williams | 7b60422 | 2019-08-26 17:55:05 -0700 | [diff] [blame] | 72 | ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len); |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 73 | void nvdimm_security_overwrite_query(struct work_struct *work); |
Dave Jiang | 03b65b2 | 2018-12-07 10:33:30 -0700 | [diff] [blame] | 74 | #else |
Dan Williams | 7b60422 | 2019-08-26 17:55:05 -0700 | [diff] [blame] | 75 | static inline ssize_t nvdimm_security_store(struct device *dev, |
| 76 | const char *buf, size_t len) |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 77 | { |
| 78 | return -EOPNOTSUPP; |
| 79 | } |
| 80 | static inline void nvdimm_security_overwrite_query(struct work_struct *work) |
| 81 | { |
| 82 | } |
Dave Jiang | 03b65b2 | 2018-12-07 10:33:30 -0700 | [diff] [blame] | 83 | #endif |
Dave Jiang | f298939 | 2018-12-05 23:39:29 -0800 | [diff] [blame] | 84 | |
Dan Williams | 762d067 | 2016-10-04 16:09:59 -0700 | [diff] [blame] | 85 | /** |
| 86 | * struct blk_alloc_info - tracking info for BLK dpa scanning |
| 87 | * @nd_mapping: blk region mapping boundaries |
| 88 | * @available: decremented in alias_dpa_busy as aliased PMEM is scanned |
| 89 | * @busy: decremented in blk_dpa_busy to account for ranges already |
| 90 | * handled by alias_dpa_busy |
| 91 | * @res: alias_dpa_busy interprets this a free space range that needs to |
| 92 | * be truncated to the valid BLK allocation starting DPA, blk_dpa_busy |
| 93 | * treats it as a busy range that needs the aliased PMEM ranges |
| 94 | * truncated. |
| 95 | */ |
| 96 | struct blk_alloc_info { |
| 97 | struct nd_mapping *nd_mapping; |
| 98 | resource_size_t available, busy; |
| 99 | struct resource *res; |
| 100 | }; |
| 101 | |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 102 | bool is_nvdimm(struct device *dev); |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 103 | bool is_nd_pmem(struct device *dev); |
Dan Williams | c9e582a | 2017-05-29 23:12:19 -0700 | [diff] [blame] | 104 | bool is_nd_volatile(struct device *dev); |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 105 | bool is_nd_blk(struct device *dev); |
Dan Williams | c9e582a | 2017-05-29 23:12:19 -0700 | [diff] [blame] | 106 | static inline bool is_nd_region(struct device *dev) |
| 107 | { |
| 108 | return is_nd_pmem(dev) || is_nd_blk(dev) || is_nd_volatile(dev); |
| 109 | } |
| 110 | static inline bool is_memory(struct device *dev) |
| 111 | { |
| 112 | return is_nd_pmem(dev) || is_nd_volatile(dev); |
| 113 | } |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 114 | struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev); |
Dan Williams | 45def22 | 2015-04-26 19:26:48 -0400 | [diff] [blame] | 115 | int __init nvdimm_bus_init(void); |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 116 | void nvdimm_bus_exit(void); |
Dan Williams | b354aba | 2016-05-17 20:24:16 -0700 | [diff] [blame] | 117 | void nvdimm_devs_exit(void); |
Dan Williams | 1b40e09 | 2015-05-01 13:34:01 -0400 | [diff] [blame] | 118 | struct nd_region; |
Dan Williams | a2d1c7a | 2019-09-05 21:15:57 +0530 | [diff] [blame] | 119 | void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev); |
Dan Williams | 98a29c3 | 2016-09-30 15:28:27 -0700 | [diff] [blame] | 120 | void nd_region_create_ns_seed(struct nd_region *nd_region); |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 121 | void nd_region_create_btt_seed(struct nd_region *nd_region); |
Dan Williams | 2dc4333 | 2015-12-13 11:41:36 -0800 | [diff] [blame] | 122 | void nd_region_create_pfn_seed(struct nd_region *nd_region); |
Dan Williams | cd03412 | 2016-03-11 10:15:36 -0800 | [diff] [blame] | 123 | void nd_region_create_dax_seed(struct nd_region *nd_region); |
Dan Williams | 45def22 | 2015-04-26 19:26:48 -0400 | [diff] [blame] | 124 | int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus); |
| 125 | void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus); |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 126 | void nd_synchronize(void); |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 127 | void __nd_device_register(struct device *dev); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 128 | struct nd_label_id; |
| 129 | char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags); |
| 130 | bool nd_is_uuid_unique(struct device *dev, u8 *uuid); |
| 131 | struct nd_region; |
| 132 | struct nvdimm_drvdata; |
| 133 | struct nd_mapping; |
Dan Williams | ae8219f | 2016-09-19 16:04:21 -0700 | [diff] [blame] | 134 | void nd_mapping_free_labels(struct nd_mapping *nd_mapping); |
Keith Busch | 12e3129 | 2018-07-24 15:07:57 -0600 | [diff] [blame] | 135 | |
| 136 | int __reserve_free_pmem(struct device *dev, void *data); |
| 137 | void release_free_pmem(struct nvdimm_bus *nvdimm_bus, |
| 138 | struct nd_mapping *nd_mapping); |
| 139 | |
| 140 | resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, |
| 141 | struct nd_mapping *nd_mapping); |
| 142 | resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 143 | resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, |
| 144 | struct nd_mapping *nd_mapping, resource_size_t *overlap); |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 145 | resource_size_t nd_blk_available_dpa(struct nd_region *nd_region); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 146 | resource_size_t nd_region_available_dpa(struct nd_region *nd_region); |
Dan Williams | ae86cbf | 2018-11-24 10:47:04 -0800 | [diff] [blame] | 147 | int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, |
| 148 | resource_size_t size); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 149 | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, |
| 150 | struct nd_label_id *label_id); |
Dan Williams | 762d067 | 2016-10-04 16:09:59 -0700 | [diff] [blame] | 151 | int alias_dpa_busy(struct device *dev, void *data); |
Dan Williams | 1b40e09 | 2015-05-01 13:34:01 -0400 | [diff] [blame] | 152 | struct resource *nsblk_add_resource(struct nd_region *nd_region, |
| 153 | struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk, |
| 154 | resource_size_t start); |
Dan Williams | 0ba1c63 | 2015-05-30 12:35:36 -0400 | [diff] [blame] | 155 | int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 156 | void get_ndd(struct nvdimm_drvdata *ndd); |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 157 | resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns); |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 158 | void nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns); |
| 159 | void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns); |
| 160 | bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, |
| 161 | struct nd_namespace_common **_ndns); |
| 162 | bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, |
| 163 | struct nd_namespace_common **_ndns); |
| 164 | ssize_t nd_namespace_store(struct device *dev, |
| 165 | struct nd_namespace_common **_ndns, const char *buf, |
| 166 | size_t len); |
Dan Williams | 03dca34 | 2016-05-21 12:22:41 -0700 | [diff] [blame] | 167 | struct nd_pfn *to_nd_pfn_safe(struct device *dev); |
Dan Williams | 87a30e1 | 2019-07-17 18:08:26 -0700 | [diff] [blame] | 168 | bool is_nvdimm_bus(struct device *dev); |
| 169 | |
Aneesh Kumar K.V | 8f4b01f | 2019-10-31 16:27:41 +0530 | [diff] [blame] | 170 | #if IS_ENABLED(CONFIG_ND_CLAIM) |
| 171 | int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, |
| 172 | resource_size_t size); |
| 173 | void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio); |
| 174 | #else |
| 175 | static inline int devm_nsio_enable(struct device *dev, |
| 176 | struct nd_namespace_io *nsio, resource_size_t size) |
| 177 | { |
| 178 | return -ENXIO; |
| 179 | } |
| 180 | |
| 181 | static inline void devm_nsio_disable(struct device *dev, |
| 182 | struct nd_namespace_io *nsio) |
| 183 | { |
| 184 | } |
| 185 | #endif |
| 186 | |
Dan Williams | 87a30e1 | 2019-07-17 18:08:26 -0700 | [diff] [blame] | 187 | #ifdef CONFIG_PROVE_LOCKING |
| 188 | extern struct class *nd_class; |
| 189 | |
| 190 | enum { |
| 191 | LOCK_BUS, |
| 192 | LOCK_NDCTL, |
| 193 | LOCK_REGION, |
| 194 | LOCK_DIMM = LOCK_REGION, |
| 195 | LOCK_NAMESPACE, |
| 196 | LOCK_CLAIM, |
| 197 | }; |
| 198 | |
| 199 | static inline void debug_nvdimm_lock(struct device *dev) |
| 200 | { |
| 201 | if (is_nd_region(dev)) |
| 202 | mutex_lock_nested(&dev->lockdep_mutex, LOCK_REGION); |
| 203 | else if (is_nvdimm(dev)) |
| 204 | mutex_lock_nested(&dev->lockdep_mutex, LOCK_DIMM); |
| 205 | else if (is_nd_btt(dev) || is_nd_pfn(dev) || is_nd_dax(dev)) |
| 206 | mutex_lock_nested(&dev->lockdep_mutex, LOCK_CLAIM); |
| 207 | else if (dev->parent && (is_nd_region(dev->parent))) |
| 208 | mutex_lock_nested(&dev->lockdep_mutex, LOCK_NAMESPACE); |
| 209 | else if (is_nvdimm_bus(dev)) |
| 210 | mutex_lock_nested(&dev->lockdep_mutex, LOCK_BUS); |
| 211 | else if (dev->class && dev->class == nd_class) |
| 212 | mutex_lock_nested(&dev->lockdep_mutex, LOCK_NDCTL); |
| 213 | else |
| 214 | dev_WARN(dev, "unknown lock level\n"); |
| 215 | } |
| 216 | |
| 217 | static inline void debug_nvdimm_unlock(struct device *dev) |
| 218 | { |
| 219 | mutex_unlock(&dev->lockdep_mutex); |
| 220 | } |
| 221 | |
| 222 | static inline void nd_device_lock(struct device *dev) |
| 223 | { |
| 224 | device_lock(dev); |
| 225 | debug_nvdimm_lock(dev); |
| 226 | } |
| 227 | |
| 228 | static inline void nd_device_unlock(struct device *dev) |
| 229 | { |
| 230 | debug_nvdimm_unlock(dev); |
| 231 | device_unlock(dev); |
| 232 | } |
| 233 | #else |
| 234 | static inline void nd_device_lock(struct device *dev) |
| 235 | { |
| 236 | device_lock(dev); |
| 237 | } |
| 238 | |
| 239 | static inline void nd_device_unlock(struct device *dev) |
| 240 | { |
| 241 | device_unlock(dev); |
| 242 | } |
| 243 | |
| 244 | static inline void debug_nvdimm_lock(struct device *dev) |
| 245 | { |
| 246 | } |
| 247 | |
| 248 | static inline void debug_nvdimm_unlock(struct device *dev) |
| 249 | { |
| 250 | } |
| 251 | #endif |
Dan Williams | b94d523 | 2015-05-19 22:54:31 -0400 | [diff] [blame] | 252 | #endif /* __ND_CORE_H__ */ |