Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Copyright(c) 2018 Intel Corporation. All rights reserved. */ |
| 3 | |
| 4 | #include <linux/module.h> |
| 5 | #include <linux/device.h> |
| 6 | #include <linux/ndctl.h> |
| 7 | #include <linux/slab.h> |
| 8 | #include <linux/io.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/cred.h> |
| 11 | #include <linux/key.h> |
| 12 | #include <linux/key-type.h> |
| 13 | #include <keys/user-type.h> |
| 14 | #include <keys/encrypted-type.h> |
| 15 | #include "nd-core.h" |
| 16 | #include "nd.h" |
| 17 | |
Dave Jiang | d2a4ac7 | 2018-12-07 13:29:09 -0700 | [diff] [blame] | 18 | #define NVDIMM_BASE_KEY 0 |
| 19 | #define NVDIMM_NEW_KEY 1 |
| 20 | |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 21 | static bool key_revalidate = true; |
| 22 | module_param(key_revalidate, bool, 0444); |
| 23 | MODULE_PARM_DESC(key_revalidate, "Require key validation at init."); |
| 24 | |
Dave Jiang | 037c848 | 2019-03-27 11:10:44 -0700 | [diff] [blame] | 25 | static const char zero_key[NVDIMM_PASSPHRASE_LEN]; |
| 26 | |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 27 | static void *key_data(struct key *key) |
| 28 | { |
| 29 | struct encrypted_key_payload *epayload = dereference_key_locked(key); |
| 30 | |
| 31 | lockdep_assert_held_read(&key->sem); |
| 32 | |
| 33 | return epayload->decrypted_data; |
| 34 | } |
| 35 | |
| 36 | static void nvdimm_put_key(struct key *key) |
| 37 | { |
Dave Jiang | 64e77c8 | 2018-12-07 14:02:12 -0700 | [diff] [blame] | 38 | if (!key) |
| 39 | return; |
| 40 | |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 41 | up_read(&key->sem); |
| 42 | key_put(key); |
| 43 | } |
| 44 | |
| 45 | /* |
| 46 | * Retrieve kernel key for DIMM and request from user space if |
| 47 | * necessary. Returns a key held for read and must be put by |
| 48 | * nvdimm_put_key() before the usage goes out of scope. |
| 49 | */ |
| 50 | static struct key *nvdimm_request_key(struct nvdimm *nvdimm) |
| 51 | { |
| 52 | struct key *key = NULL; |
| 53 | static const char NVDIMM_PREFIX[] = "nvdimm:"; |
| 54 | char desc[NVDIMM_KEY_DESC_LEN + sizeof(NVDIMM_PREFIX)]; |
| 55 | struct device *dev = &nvdimm->dev; |
| 56 | |
| 57 | sprintf(desc, "%s%s", NVDIMM_PREFIX, nvdimm->dimm_id); |
Linus Torvalds | 028db3e | 2019-07-10 18:43:43 -0700 | [diff] [blame] | 58 | key = request_key(&key_type_encrypted, desc, ""); |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 59 | if (IS_ERR(key)) { |
| 60 | if (PTR_ERR(key) == -ENOKEY) |
Dan Williams | 37379cf | 2018-12-22 11:35:41 -0800 | [diff] [blame] | 61 | dev_dbg(dev, "request_key() found no key\n"); |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 62 | else |
Dan Williams | 37379cf | 2018-12-22 11:35:41 -0800 | [diff] [blame] | 63 | dev_dbg(dev, "request_key() upcall failed\n"); |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 64 | key = NULL; |
| 65 | } else { |
| 66 | struct encrypted_key_payload *epayload; |
| 67 | |
| 68 | down_read(&key->sem); |
| 69 | epayload = dereference_key_locked(key); |
| 70 | if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) { |
| 71 | up_read(&key->sem); |
| 72 | key_put(key); |
| 73 | key = NULL; |
| 74 | } |
| 75 | } |
| 76 | |
| 77 | return key; |
| 78 | } |
| 79 | |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 80 | static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm, |
| 81 | struct key **key) |
| 82 | { |
| 83 | *key = nvdimm_request_key(nvdimm); |
| 84 | if (!*key) |
| 85 | return zero_key; |
| 86 | |
| 87 | return key_data(*key); |
| 88 | } |
| 89 | |
Dave Jiang | 03b65b2 | 2018-12-07 10:33:30 -0700 | [diff] [blame] | 90 | static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm, |
Dave Jiang | d2a4ac7 | 2018-12-07 13:29:09 -0700 | [diff] [blame] | 91 | key_serial_t id, int subclass) |
Dave Jiang | 03b65b2 | 2018-12-07 10:33:30 -0700 | [diff] [blame] | 92 | { |
| 93 | key_ref_t keyref; |
| 94 | struct key *key; |
| 95 | struct encrypted_key_payload *epayload; |
| 96 | struct device *dev = &nvdimm->dev; |
| 97 | |
Dan Williams | 813357f | 2020-06-23 21:35:26 -0700 | [diff] [blame] | 98 | keyref = lookup_user_key(id, 0, KEY_NEED_SEARCH); |
Dave Jiang | 03b65b2 | 2018-12-07 10:33:30 -0700 | [diff] [blame] | 99 | if (IS_ERR(keyref)) |
| 100 | return NULL; |
| 101 | |
| 102 | key = key_ref_to_ptr(keyref); |
| 103 | if (key->type != &key_type_encrypted) { |
| 104 | key_put(key); |
| 105 | return NULL; |
| 106 | } |
Dave Jiang | d2a4ac7 | 2018-12-07 13:29:09 -0700 | [diff] [blame] | 107 | |
Dave Jiang | 03b65b2 | 2018-12-07 10:33:30 -0700 | [diff] [blame] | 108 | dev_dbg(dev, "%s: key found: %#x\n", __func__, key_serial(key)); |
| 109 | |
Dave Jiang | d2a4ac7 | 2018-12-07 13:29:09 -0700 | [diff] [blame] | 110 | down_read_nested(&key->sem, subclass); |
Dave Jiang | 03b65b2 | 2018-12-07 10:33:30 -0700 | [diff] [blame] | 111 | epayload = dereference_key_locked(key); |
| 112 | if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) { |
| 113 | up_read(&key->sem); |
| 114 | key_put(key); |
| 115 | key = NULL; |
| 116 | } |
| 117 | return key; |
| 118 | } |
| 119 | |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 120 | static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm, |
| 121 | key_serial_t id, int subclass, struct key **key) |
| 122 | { |
| 123 | *key = NULL; |
| 124 | if (id == 0) { |
| 125 | if (subclass == NVDIMM_BASE_KEY) |
| 126 | return zero_key; |
| 127 | else |
| 128 | return NULL; |
| 129 | } |
| 130 | |
| 131 | *key = nvdimm_lookup_user_key(nvdimm, id, subclass); |
| 132 | if (!*key) |
| 133 | return NULL; |
| 134 | |
| 135 | return key_data(*key); |
| 136 | } |
| 137 | |
| 138 | |
| 139 | static int nvdimm_key_revalidate(struct nvdimm *nvdimm) |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 140 | { |
| 141 | struct key *key; |
| 142 | int rc; |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 143 | const void *data; |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 144 | |
| 145 | if (!nvdimm->sec.ops->change_key) |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 146 | return -EOPNOTSUPP; |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 147 | |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 148 | data = nvdimm_get_key_payload(nvdimm, &key); |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 149 | |
| 150 | /* |
| 151 | * Send the same key to the hardware as new and old key to |
| 152 | * verify that the key is good. |
| 153 | */ |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 154 | rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER); |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 155 | if (rc < 0) { |
| 156 | nvdimm_put_key(key); |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 157 | return rc; |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 158 | } |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 159 | |
| 160 | nvdimm_put_key(key); |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 161 | nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 162 | return 0; |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 163 | } |
| 164 | |
| 165 | static int __nvdimm_security_unlock(struct nvdimm *nvdimm) |
| 166 | { |
| 167 | struct device *dev = &nvdimm->dev; |
| 168 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 169 | struct key *key; |
| 170 | const void *data; |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 171 | int rc; |
| 172 | |
| 173 | /* The bus lock should be held at the top level of the call stack */ |
| 174 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); |
| 175 | |
| 176 | if (!nvdimm->sec.ops || !nvdimm->sec.ops->unlock |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 177 | || !nvdimm->sec.flags) |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 178 | return -EIO; |
| 179 | |
Dave Jiang | 674f31a | 2019-09-24 10:34:49 -0700 | [diff] [blame] | 180 | /* No need to go further if security is disabled */ |
| 181 | if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags)) |
| 182 | return 0; |
| 183 | |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 184 | if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) { |
Dan Williams | 37379cf | 2018-12-22 11:35:41 -0800 | [diff] [blame] | 185 | dev_dbg(dev, "Security operation in progress.\n"); |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 186 | return -EBUSY; |
| 187 | } |
| 188 | |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 189 | /* |
| 190 | * If the pre-OS has unlocked the DIMM, attempt to send the key |
| 191 | * from request_key() to the hardware for verification. Failure |
| 192 | * to revalidate the key against the hardware results in a |
| 193 | * freeze of the security configuration. I.e. if the OS does not |
| 194 | * have the key, security is being managed pre-OS. |
| 195 | */ |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 196 | if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags)) { |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 197 | if (!key_revalidate) |
| 198 | return 0; |
| 199 | |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 200 | return nvdimm_key_revalidate(nvdimm); |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 201 | } else |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 202 | data = nvdimm_get_key_payload(nvdimm, &key); |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 203 | |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 204 | rc = nvdimm->sec.ops->unlock(nvdimm, data); |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 205 | dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key), |
| 206 | rc == 0 ? "success" : "fail"); |
| 207 | |
| 208 | nvdimm_put_key(key); |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 209 | nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); |
Dave Jiang | 4c6926a | 2018-12-06 12:40:01 -0800 | [diff] [blame] | 210 | return rc; |
| 211 | } |
| 212 | |
| 213 | int nvdimm_security_unlock(struct device *dev) |
| 214 | { |
| 215 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 216 | int rc; |
| 217 | |
| 218 | nvdimm_bus_lock(dev); |
| 219 | rc = __nvdimm_security_unlock(nvdimm); |
| 220 | nvdimm_bus_unlock(dev); |
| 221 | return rc; |
| 222 | } |
Dave Jiang | 03b65b2 | 2018-12-07 10:33:30 -0700 | [diff] [blame] | 223 | |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 224 | static int check_security_state(struct nvdimm *nvdimm) |
| 225 | { |
| 226 | struct device *dev = &nvdimm->dev; |
| 227 | |
| 228 | if (test_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags)) { |
| 229 | dev_dbg(dev, "Incorrect security state: %#lx\n", |
| 230 | nvdimm->sec.flags); |
| 231 | return -EIO; |
| 232 | } |
| 233 | |
| 234 | if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) { |
| 235 | dev_dbg(dev, "Security operation in progress.\n"); |
| 236 | return -EBUSY; |
| 237 | } |
| 238 | |
| 239 | return 0; |
| 240 | } |
| 241 | |
Dan Williams | 7b60422 | 2019-08-26 17:55:05 -0700 | [diff] [blame] | 242 | static int security_disable(struct nvdimm *nvdimm, unsigned int keyid) |
Dave Jiang | 03b65b2 | 2018-12-07 10:33:30 -0700 | [diff] [blame] | 243 | { |
| 244 | struct device *dev = &nvdimm->dev; |
| 245 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); |
| 246 | struct key *key; |
| 247 | int rc; |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 248 | const void *data; |
Dave Jiang | 03b65b2 | 2018-12-07 10:33:30 -0700 | [diff] [blame] | 249 | |
| 250 | /* The bus lock should be held at the top level of the call stack */ |
| 251 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); |
| 252 | |
| 253 | if (!nvdimm->sec.ops || !nvdimm->sec.ops->disable |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 254 | || !nvdimm->sec.flags) |
Dave Jiang | 03b65b2 | 2018-12-07 10:33:30 -0700 | [diff] [blame] | 255 | return -EOPNOTSUPP; |
| 256 | |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 257 | rc = check_security_state(nvdimm); |
| 258 | if (rc) |
| 259 | return rc; |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 260 | |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 261 | data = nvdimm_get_user_key_payload(nvdimm, keyid, |
| 262 | NVDIMM_BASE_KEY, &key); |
| 263 | if (!data) |
Dave Jiang | 03b65b2 | 2018-12-07 10:33:30 -0700 | [diff] [blame] | 264 | return -ENOKEY; |
| 265 | |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 266 | rc = nvdimm->sec.ops->disable(nvdimm, data); |
Dave Jiang | 03b65b2 | 2018-12-07 10:33:30 -0700 | [diff] [blame] | 267 | dev_dbg(dev, "key: %d disable: %s\n", key_serial(key), |
| 268 | rc == 0 ? "success" : "fail"); |
| 269 | |
| 270 | nvdimm_put_key(key); |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 271 | nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); |
Dave Jiang | 03b65b2 | 2018-12-07 10:33:30 -0700 | [diff] [blame] | 272 | return rc; |
| 273 | } |
Dave Jiang | d2a4ac7 | 2018-12-07 13:29:09 -0700 | [diff] [blame] | 274 | |
Dan Williams | 7b60422 | 2019-08-26 17:55:05 -0700 | [diff] [blame] | 275 | static int security_update(struct nvdimm *nvdimm, unsigned int keyid, |
Dave Jiang | 89fa9d8 | 2018-12-10 10:53:22 -0700 | [diff] [blame] | 276 | unsigned int new_keyid, |
| 277 | enum nvdimm_passphrase_type pass_type) |
Dave Jiang | d2a4ac7 | 2018-12-07 13:29:09 -0700 | [diff] [blame] | 278 | { |
| 279 | struct device *dev = &nvdimm->dev; |
| 280 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); |
| 281 | struct key *key, *newkey; |
| 282 | int rc; |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 283 | const void *data, *newdata; |
Dave Jiang | d2a4ac7 | 2018-12-07 13:29:09 -0700 | [diff] [blame] | 284 | |
| 285 | /* The bus lock should be held at the top level of the call stack */ |
| 286 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); |
| 287 | |
| 288 | if (!nvdimm->sec.ops || !nvdimm->sec.ops->change_key |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 289 | || !nvdimm->sec.flags) |
Dave Jiang | d2a4ac7 | 2018-12-07 13:29:09 -0700 | [diff] [blame] | 290 | return -EOPNOTSUPP; |
| 291 | |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 292 | rc = check_security_state(nvdimm); |
| 293 | if (rc) |
| 294 | return rc; |
Dave Jiang | d2a4ac7 | 2018-12-07 13:29:09 -0700 | [diff] [blame] | 295 | |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 296 | data = nvdimm_get_user_key_payload(nvdimm, keyid, |
| 297 | NVDIMM_BASE_KEY, &key); |
| 298 | if (!data) |
| 299 | return -ENOKEY; |
Dave Jiang | d2a4ac7 | 2018-12-07 13:29:09 -0700 | [diff] [blame] | 300 | |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 301 | newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid, |
| 302 | NVDIMM_NEW_KEY, &newkey); |
| 303 | if (!newdata) { |
Dave Jiang | d2a4ac7 | 2018-12-07 13:29:09 -0700 | [diff] [blame] | 304 | nvdimm_put_key(key); |
| 305 | return -ENOKEY; |
| 306 | } |
| 307 | |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 308 | rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type); |
Dave Jiang | 89fa9d8 | 2018-12-10 10:53:22 -0700 | [diff] [blame] | 309 | dev_dbg(dev, "key: %d %d update%s: %s\n", |
Dave Jiang | d2a4ac7 | 2018-12-07 13:29:09 -0700 | [diff] [blame] | 310 | key_serial(key), key_serial(newkey), |
Dave Jiang | 89fa9d8 | 2018-12-10 10:53:22 -0700 | [diff] [blame] | 311 | pass_type == NVDIMM_MASTER ? "(master)" : "(user)", |
Dave Jiang | d2a4ac7 | 2018-12-07 13:29:09 -0700 | [diff] [blame] | 312 | rc == 0 ? "success" : "fail"); |
| 313 | |
| 314 | nvdimm_put_key(newkey); |
| 315 | nvdimm_put_key(key); |
Dave Jiang | 89fa9d8 | 2018-12-10 10:53:22 -0700 | [diff] [blame] | 316 | if (pass_type == NVDIMM_MASTER) |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 317 | nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, |
Dave Jiang | 89fa9d8 | 2018-12-10 10:53:22 -0700 | [diff] [blame] | 318 | NVDIMM_MASTER); |
| 319 | else |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 320 | nvdimm->sec.flags = nvdimm_security_flags(nvdimm, |
Dave Jiang | 89fa9d8 | 2018-12-10 10:53:22 -0700 | [diff] [blame] | 321 | NVDIMM_USER); |
Dave Jiang | d2a4ac7 | 2018-12-07 13:29:09 -0700 | [diff] [blame] | 322 | return rc; |
| 323 | } |
Dave Jiang | 64e77c8 | 2018-12-07 14:02:12 -0700 | [diff] [blame] | 324 | |
Dan Williams | 7b60422 | 2019-08-26 17:55:05 -0700 | [diff] [blame] | 325 | static int security_erase(struct nvdimm *nvdimm, unsigned int keyid, |
Dave Jiang | 89fa9d8 | 2018-12-10 10:53:22 -0700 | [diff] [blame] | 326 | enum nvdimm_passphrase_type pass_type) |
Dave Jiang | 64e77c8 | 2018-12-07 14:02:12 -0700 | [diff] [blame] | 327 | { |
| 328 | struct device *dev = &nvdimm->dev; |
| 329 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); |
Dave Jiang | 037c848 | 2019-03-27 11:10:44 -0700 | [diff] [blame] | 330 | struct key *key = NULL; |
Dave Jiang | 64e77c8 | 2018-12-07 14:02:12 -0700 | [diff] [blame] | 331 | int rc; |
Dave Jiang | 037c848 | 2019-03-27 11:10:44 -0700 | [diff] [blame] | 332 | const void *data; |
Dave Jiang | 64e77c8 | 2018-12-07 14:02:12 -0700 | [diff] [blame] | 333 | |
| 334 | /* The bus lock should be held at the top level of the call stack */ |
| 335 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); |
| 336 | |
| 337 | if (!nvdimm->sec.ops || !nvdimm->sec.ops->erase |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 338 | || !nvdimm->sec.flags) |
Dave Jiang | 64e77c8 | 2018-12-07 14:02:12 -0700 | [diff] [blame] | 339 | return -EOPNOTSUPP; |
| 340 | |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 341 | rc = check_security_state(nvdimm); |
| 342 | if (rc) |
| 343 | return rc; |
Dave Jiang | 64e77c8 | 2018-12-07 14:02:12 -0700 | [diff] [blame] | 344 | |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 345 | if (!test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.ext_flags) |
Dave Jiang | 89fa9d8 | 2018-12-10 10:53:22 -0700 | [diff] [blame] | 346 | && pass_type == NVDIMM_MASTER) { |
Dan Williams | 37379cf | 2018-12-22 11:35:41 -0800 | [diff] [blame] | 347 | dev_dbg(dev, |
Dave Jiang | 89fa9d8 | 2018-12-10 10:53:22 -0700 | [diff] [blame] | 348 | "Attempt to secure erase in wrong master state.\n"); |
| 349 | return -EOPNOTSUPP; |
| 350 | } |
| 351 | |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 352 | data = nvdimm_get_user_key_payload(nvdimm, keyid, |
| 353 | NVDIMM_BASE_KEY, &key); |
| 354 | if (!data) |
| 355 | return -ENOKEY; |
Dave Jiang | 64e77c8 | 2018-12-07 14:02:12 -0700 | [diff] [blame] | 356 | |
Dave Jiang | 037c848 | 2019-03-27 11:10:44 -0700 | [diff] [blame] | 357 | rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type); |
Dave Jiang | 89fa9d8 | 2018-12-10 10:53:22 -0700 | [diff] [blame] | 358 | dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key), |
| 359 | pass_type == NVDIMM_MASTER ? "(master)" : "(user)", |
Dave Jiang | 64e77c8 | 2018-12-07 14:02:12 -0700 | [diff] [blame] | 360 | rc == 0 ? "success" : "fail"); |
| 361 | |
| 362 | nvdimm_put_key(key); |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 363 | nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); |
Dave Jiang | 64e77c8 | 2018-12-07 14:02:12 -0700 | [diff] [blame] | 364 | return rc; |
| 365 | } |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 366 | |
Dan Williams | 7b60422 | 2019-08-26 17:55:05 -0700 | [diff] [blame] | 367 | static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid) |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 368 | { |
| 369 | struct device *dev = &nvdimm->dev; |
| 370 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 371 | struct key *key = NULL; |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 372 | int rc; |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 373 | const void *data; |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 374 | |
| 375 | /* The bus lock should be held at the top level of the call stack */ |
| 376 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); |
| 377 | |
| 378 | if (!nvdimm->sec.ops || !nvdimm->sec.ops->overwrite |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 379 | || !nvdimm->sec.flags) |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 380 | return -EOPNOTSUPP; |
| 381 | |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 382 | if (dev->driver == NULL) { |
Dan Williams | 37379cf | 2018-12-22 11:35:41 -0800 | [diff] [blame] | 383 | dev_dbg(dev, "Unable to overwrite while DIMM active.\n"); |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 384 | return -EINVAL; |
| 385 | } |
| 386 | |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 387 | rc = check_security_state(nvdimm); |
| 388 | if (rc) |
| 389 | return rc; |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 390 | |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 391 | data = nvdimm_get_user_key_payload(nvdimm, keyid, |
| 392 | NVDIMM_BASE_KEY, &key); |
| 393 | if (!data) |
| 394 | return -ENOKEY; |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 395 | |
Dave Jiang | d2e5b64 | 2019-03-27 11:12:45 -0700 | [diff] [blame] | 396 | rc = nvdimm->sec.ops->overwrite(nvdimm, data); |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 397 | dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key), |
| 398 | rc == 0 ? "success" : "fail"); |
| 399 | |
| 400 | nvdimm_put_key(key); |
| 401 | if (rc == 0) { |
| 402 | set_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags); |
| 403 | set_bit(NDD_WORK_PENDING, &nvdimm->flags); |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 404 | set_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags); |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 405 | /* |
| 406 | * Make sure we don't lose device while doing overwrite |
| 407 | * query. |
| 408 | */ |
| 409 | get_device(dev); |
| 410 | queue_delayed_work(system_wq, &nvdimm->dwork, 0); |
| 411 | } |
Dave Jiang | 89fa9d8 | 2018-12-10 10:53:22 -0700 | [diff] [blame] | 412 | |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 413 | return rc; |
| 414 | } |
| 415 | |
| 416 | void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm) |
| 417 | { |
| 418 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev); |
| 419 | int rc; |
| 420 | unsigned int tmo; |
| 421 | |
| 422 | /* The bus lock should be held at the top level of the call stack */ |
| 423 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); |
| 424 | |
| 425 | /* |
| 426 | * Abort and release device if we no longer have the overwrite |
| 427 | * flag set. It means the work has been canceled. |
| 428 | */ |
| 429 | if (!test_bit(NDD_WORK_PENDING, &nvdimm->flags)) |
| 430 | return; |
| 431 | |
| 432 | tmo = nvdimm->sec.overwrite_tmo; |
| 433 | |
| 434 | if (!nvdimm->sec.ops || !nvdimm->sec.ops->query_overwrite |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 435 | || !nvdimm->sec.flags) |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 436 | return; |
| 437 | |
| 438 | rc = nvdimm->sec.ops->query_overwrite(nvdimm); |
| 439 | if (rc == -EBUSY) { |
| 440 | |
| 441 | /* setup delayed work again */ |
| 442 | tmo += 10; |
| 443 | queue_delayed_work(system_wq, &nvdimm->dwork, tmo * HZ); |
| 444 | nvdimm->sec.overwrite_tmo = min(15U * 60U, tmo); |
| 445 | return; |
| 446 | } |
| 447 | |
| 448 | if (rc < 0) |
Dan Williams | 37379cf | 2018-12-22 11:35:41 -0800 | [diff] [blame] | 449 | dev_dbg(&nvdimm->dev, "overwrite failed\n"); |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 450 | else |
| 451 | dev_dbg(&nvdimm->dev, "overwrite completed\n"); |
| 452 | |
Jane Chu | 7f674025 | 2020-08-03 16:41:39 -0600 | [diff] [blame] | 453 | /* |
| 454 | * Mark the overwrite work done and update dimm security flags, |
| 455 | * then send a sysfs event notification to wake up userspace |
| 456 | * poll threads to picked up the changed state. |
| 457 | */ |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 458 | nvdimm->sec.overwrite_tmo = 0; |
| 459 | clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags); |
| 460 | clear_bit(NDD_WORK_PENDING, &nvdimm->flags); |
Dan Williams | d78c620 | 2019-08-26 17:54:54 -0700 | [diff] [blame] | 461 | nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); |
Jane Chu | dad42d1 | 2020-08-03 16:41:37 -0600 | [diff] [blame] | 462 | nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER); |
Jane Chu | 7f674025 | 2020-08-03 16:41:39 -0600 | [diff] [blame] | 463 | if (nvdimm->sec.overwrite_state) |
| 464 | sysfs_notify_dirent(nvdimm->sec.overwrite_state); |
| 465 | put_device(&nvdimm->dev); |
Dave Jiang | 7d98809 | 2018-12-13 15:36:18 -0700 | [diff] [blame] | 466 | } |
| 467 | |
| 468 | void nvdimm_security_overwrite_query(struct work_struct *work) |
| 469 | { |
| 470 | struct nvdimm *nvdimm = |
| 471 | container_of(work, typeof(*nvdimm), dwork.work); |
| 472 | |
| 473 | nvdimm_bus_lock(&nvdimm->dev); |
| 474 | __nvdimm_security_overwrite_query(nvdimm); |
| 475 | nvdimm_bus_unlock(&nvdimm->dev); |
| 476 | } |
Dan Williams | 7b60422 | 2019-08-26 17:55:05 -0700 | [diff] [blame] | 477 | |
| 478 | #define OPS \ |
| 479 | C( OP_FREEZE, "freeze", 1), \ |
| 480 | C( OP_DISABLE, "disable", 2), \ |
| 481 | C( OP_UPDATE, "update", 3), \ |
| 482 | C( OP_ERASE, "erase", 2), \ |
| 483 | C( OP_OVERWRITE, "overwrite", 2), \ |
| 484 | C( OP_MASTER_UPDATE, "master_update", 3), \ |
| 485 | C( OP_MASTER_ERASE, "master_erase", 2) |
| 486 | #undef C |
| 487 | #define C(a, b, c) a |
| 488 | enum nvdimmsec_op_ids { OPS }; |
| 489 | #undef C |
| 490 | #define C(a, b, c) { b, c } |
| 491 | static struct { |
| 492 | const char *name; |
| 493 | int args; |
| 494 | } ops[] = { OPS }; |
| 495 | #undef C |
| 496 | |
| 497 | #define SEC_CMD_SIZE 32 |
| 498 | #define KEY_ID_SIZE 10 |
| 499 | |
| 500 | ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len) |
| 501 | { |
| 502 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 503 | ssize_t rc; |
| 504 | char cmd[SEC_CMD_SIZE+1], keystr[KEY_ID_SIZE+1], |
| 505 | nkeystr[KEY_ID_SIZE+1]; |
| 506 | unsigned int key, newkey; |
| 507 | int i; |
| 508 | |
| 509 | rc = sscanf(buf, "%"__stringify(SEC_CMD_SIZE)"s" |
| 510 | " %"__stringify(KEY_ID_SIZE)"s" |
| 511 | " %"__stringify(KEY_ID_SIZE)"s", |
| 512 | cmd, keystr, nkeystr); |
| 513 | if (rc < 1) |
| 514 | return -EINVAL; |
| 515 | for (i = 0; i < ARRAY_SIZE(ops); i++) |
| 516 | if (sysfs_streq(cmd, ops[i].name)) |
| 517 | break; |
| 518 | if (i >= ARRAY_SIZE(ops)) |
| 519 | return -EINVAL; |
| 520 | if (ops[i].args > 1) |
| 521 | rc = kstrtouint(keystr, 0, &key); |
| 522 | if (rc >= 0 && ops[i].args > 2) |
| 523 | rc = kstrtouint(nkeystr, 0, &newkey); |
| 524 | if (rc < 0) |
| 525 | return rc; |
| 526 | |
| 527 | if (i == OP_FREEZE) { |
| 528 | dev_dbg(dev, "freeze\n"); |
| 529 | rc = nvdimm_security_freeze(nvdimm); |
| 530 | } else if (i == OP_DISABLE) { |
| 531 | dev_dbg(dev, "disable %u\n", key); |
| 532 | rc = security_disable(nvdimm, key); |
| 533 | } else if (i == OP_UPDATE || i == OP_MASTER_UPDATE) { |
| 534 | dev_dbg(dev, "%s %u %u\n", ops[i].name, key, newkey); |
| 535 | rc = security_update(nvdimm, key, newkey, i == OP_UPDATE |
| 536 | ? NVDIMM_USER : NVDIMM_MASTER); |
| 537 | } else if (i == OP_ERASE || i == OP_MASTER_ERASE) { |
| 538 | dev_dbg(dev, "%s %u\n", ops[i].name, key); |
| 539 | if (atomic_read(&nvdimm->busy)) { |
| 540 | dev_dbg(dev, "Unable to secure erase while DIMM active.\n"); |
| 541 | return -EBUSY; |
| 542 | } |
| 543 | rc = security_erase(nvdimm, key, i == OP_ERASE |
| 544 | ? NVDIMM_USER : NVDIMM_MASTER); |
| 545 | } else if (i == OP_OVERWRITE) { |
| 546 | dev_dbg(dev, "overwrite %u\n", key); |
| 547 | if (atomic_read(&nvdimm->busy)) { |
| 548 | dev_dbg(dev, "Unable to overwrite while DIMM active.\n"); |
| 549 | return -EBUSY; |
| 550 | } |
| 551 | rc = security_overwrite(nvdimm, key); |
| 552 | } else |
| 553 | return -EINVAL; |
| 554 | |
| 555 | if (rc == 0) |
| 556 | rc = len; |
| 557 | return rc; |
| 558 | } |