Thomas Gleixner | 5b497af | 2019-05-29 07:18:09 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 2 | /* |
| 3 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 4 | */ |
| 5 | #include <linux/device.h> |
| 6 | #include <linux/sizes.h> |
Enrico Weigelt | 5ae96d7 | 2020-12-15 17:35:31 +0100 | [diff] [blame] | 7 | #include <linux/badblocks.h> |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 8 | #include "nd-core.h" |
Dan Williams | f2b6125 | 2017-05-29 23:00:34 -0700 | [diff] [blame] | 9 | #include "pmem.h" |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 10 | #include "pfn.h" |
| 11 | #include "btt.h" |
| 12 | #include "nd.h" |
| 13 | |
| 14 | void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns) |
| 15 | { |
| 16 | struct nd_namespace_common *ndns = *_ndns; |
Dan Williams | 452bae0 | 2017-04-28 22:05:14 -0700 | [diff] [blame] | 17 | struct nvdimm_bus *nvdimm_bus; |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 18 | |
Dan Williams | 452bae0 | 2017-04-28 22:05:14 -0700 | [diff] [blame] | 19 | if (!ndns) |
| 20 | return; |
| 21 | |
| 22 | nvdimm_bus = walk_to_nvdimm_bus(&ndns->dev); |
| 23 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); |
Dan Williams | 9cf8bd5 | 2016-12-15 20:04:31 -0800 | [diff] [blame] | 24 | dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__); |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 25 | ndns->claim = NULL; |
| 26 | *_ndns = NULL; |
| 27 | put_device(&ndns->dev); |
| 28 | } |
| 29 | |
| 30 | void nd_detach_ndns(struct device *dev, |
| 31 | struct nd_namespace_common **_ndns) |
| 32 | { |
| 33 | struct nd_namespace_common *ndns = *_ndns; |
| 34 | |
| 35 | if (!ndns) |
| 36 | return; |
| 37 | get_device(&ndns->dev); |
Dan Williams | 452bae0 | 2017-04-28 22:05:14 -0700 | [diff] [blame] | 38 | nvdimm_bus_lock(&ndns->dev); |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 39 | __nd_detach_ndns(dev, _ndns); |
Dan Williams | 452bae0 | 2017-04-28 22:05:14 -0700 | [diff] [blame] | 40 | nvdimm_bus_unlock(&ndns->dev); |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 41 | put_device(&ndns->dev); |
| 42 | } |
| 43 | |
| 44 | bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, |
| 45 | struct nd_namespace_common **_ndns) |
| 46 | { |
Dan Williams | 452bae0 | 2017-04-28 22:05:14 -0700 | [diff] [blame] | 47 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&attach->dev); |
| 48 | |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 49 | if (attach->claim) |
| 50 | return false; |
Dan Williams | 452bae0 | 2017-04-28 22:05:14 -0700 | [diff] [blame] | 51 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); |
Dan Williams | 9cf8bd5 | 2016-12-15 20:04:31 -0800 | [diff] [blame] | 52 | dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__); |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 53 | attach->claim = dev; |
| 54 | *_ndns = attach; |
| 55 | get_device(&attach->dev); |
| 56 | return true; |
| 57 | } |
| 58 | |
| 59 | bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, |
| 60 | struct nd_namespace_common **_ndns) |
| 61 | { |
| 62 | bool claimed; |
| 63 | |
Dan Williams | 452bae0 | 2017-04-28 22:05:14 -0700 | [diff] [blame] | 64 | nvdimm_bus_lock(&attach->dev); |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 65 | claimed = __nd_attach_ndns(dev, attach, _ndns); |
Dan Williams | 452bae0 | 2017-04-28 22:05:14 -0700 | [diff] [blame] | 66 | nvdimm_bus_unlock(&attach->dev); |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 67 | return claimed; |
| 68 | } |
| 69 | |
| 70 | static int namespace_match(struct device *dev, void *data) |
| 71 | { |
| 72 | char *name = data; |
| 73 | |
| 74 | return strcmp(name, dev_name(dev)) == 0; |
| 75 | } |
| 76 | |
| 77 | static bool is_idle(struct device *dev, struct nd_namespace_common *ndns) |
| 78 | { |
| 79 | struct nd_region *nd_region = to_nd_region(dev->parent); |
| 80 | struct device *seed = NULL; |
| 81 | |
| 82 | if (is_nd_btt(dev)) |
| 83 | seed = nd_region->btt_seed; |
| 84 | else if (is_nd_pfn(dev)) |
| 85 | seed = nd_region->pfn_seed; |
Dan Williams | cd03412 | 2016-03-11 10:15:36 -0800 | [diff] [blame] | 86 | else if (is_nd_dax(dev)) |
| 87 | seed = nd_region->dax_seed; |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 88 | |
| 89 | if (seed == dev || ndns || dev->driver) |
| 90 | return false; |
| 91 | return true; |
| 92 | } |
| 93 | |
Dan Williams | 03dca34 | 2016-05-21 12:22:41 -0700 | [diff] [blame] | 94 | struct nd_pfn *to_nd_pfn_safe(struct device *dev) |
| 95 | { |
| 96 | /* |
| 97 | * pfn device attributes are re-used by dax device instances, so we |
| 98 | * need to be careful to correct device-to-nd_pfn conversion. |
| 99 | */ |
| 100 | if (is_nd_pfn(dev)) |
| 101 | return to_nd_pfn(dev); |
| 102 | |
| 103 | if (is_nd_dax(dev)) { |
| 104 | struct nd_dax *nd_dax = to_nd_dax(dev); |
| 105 | |
| 106 | return &nd_dax->nd_pfn; |
| 107 | } |
| 108 | |
| 109 | WARN_ON(1); |
| 110 | return NULL; |
| 111 | } |
| 112 | |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 113 | static void nd_detach_and_reset(struct device *dev, |
| 114 | struct nd_namespace_common **_ndns) |
| 115 | { |
| 116 | /* detach the namespace and destroy / reset the device */ |
Dan Williams | 452bae0 | 2017-04-28 22:05:14 -0700 | [diff] [blame] | 117 | __nd_detach_ndns(dev, _ndns); |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 118 | if (is_idle(dev, *_ndns)) { |
| 119 | nd_device_unregister(dev, ND_ASYNC); |
| 120 | } else if (is_nd_btt(dev)) { |
| 121 | struct nd_btt *nd_btt = to_nd_btt(dev); |
| 122 | |
| 123 | nd_btt->lbasize = 0; |
| 124 | kfree(nd_btt->uuid); |
| 125 | nd_btt->uuid = NULL; |
Dan Williams | 03dca34 | 2016-05-21 12:22:41 -0700 | [diff] [blame] | 126 | } else if (is_nd_pfn(dev) || is_nd_dax(dev)) { |
| 127 | struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 128 | |
| 129 | kfree(nd_pfn->uuid); |
| 130 | nd_pfn->uuid = NULL; |
| 131 | nd_pfn->mode = PFN_MODE_NONE; |
| 132 | } |
| 133 | } |
| 134 | |
| 135 | ssize_t nd_namespace_store(struct device *dev, |
| 136 | struct nd_namespace_common **_ndns, const char *buf, |
| 137 | size_t len) |
| 138 | { |
| 139 | struct nd_namespace_common *ndns; |
| 140 | struct device *found; |
| 141 | char *name; |
| 142 | |
| 143 | if (dev->driver) { |
Dan Williams | 426824d | 2018-03-05 16:39:31 -0800 | [diff] [blame] | 144 | dev_dbg(dev, "namespace already active\n"); |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 145 | return -EBUSY; |
| 146 | } |
| 147 | |
| 148 | name = kstrndup(buf, len, GFP_KERNEL); |
| 149 | if (!name) |
| 150 | return -ENOMEM; |
| 151 | strim(name); |
| 152 | |
| 153 | if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0) |
| 154 | /* pass */; |
| 155 | else { |
| 156 | len = -EINVAL; |
| 157 | goto out; |
| 158 | } |
| 159 | |
| 160 | ndns = *_ndns; |
| 161 | if (strcmp(name, "") == 0) { |
| 162 | nd_detach_and_reset(dev, _ndns); |
| 163 | goto out; |
| 164 | } else if (ndns) { |
| 165 | dev_dbg(dev, "namespace already set to: %s\n", |
| 166 | dev_name(&ndns->dev)); |
| 167 | len = -EBUSY; |
| 168 | goto out; |
| 169 | } |
| 170 | |
| 171 | found = device_find_child(dev->parent, name, namespace_match); |
| 172 | if (!found) { |
| 173 | dev_dbg(dev, "'%s' not found under %s\n", name, |
| 174 | dev_name(dev->parent)); |
| 175 | len = -ENODEV; |
| 176 | goto out; |
| 177 | } |
| 178 | |
| 179 | ndns = to_ndns(found); |
Dan Williams | b3fde74 | 2017-06-04 10:18:39 +0900 | [diff] [blame] | 180 | |
| 181 | switch (ndns->claim_class) { |
| 182 | case NVDIMM_CCLASS_NONE: |
| 183 | break; |
| 184 | case NVDIMM_CCLASS_BTT: |
Vishal Verma | 14e4945 | 2017-06-28 14:25:00 -0600 | [diff] [blame] | 185 | case NVDIMM_CCLASS_BTT2: |
Dan Williams | b3fde74 | 2017-06-04 10:18:39 +0900 | [diff] [blame] | 186 | if (!is_nd_btt(dev)) { |
| 187 | len = -EBUSY; |
| 188 | goto out_attach; |
| 189 | } |
| 190 | break; |
| 191 | case NVDIMM_CCLASS_PFN: |
| 192 | if (!is_nd_pfn(dev)) { |
| 193 | len = -EBUSY; |
| 194 | goto out_attach; |
| 195 | } |
| 196 | break; |
| 197 | case NVDIMM_CCLASS_DAX: |
| 198 | if (!is_nd_dax(dev)) { |
| 199 | len = -EBUSY; |
| 200 | goto out_attach; |
| 201 | } |
| 202 | break; |
| 203 | default: |
| 204 | len = -EBUSY; |
| 205 | goto out_attach; |
| 206 | break; |
| 207 | } |
| 208 | |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 209 | if (__nvdimm_namespace_capacity(ndns) < SZ_16M) { |
| 210 | dev_dbg(dev, "%s too small to host\n", name); |
| 211 | len = -ENXIO; |
| 212 | goto out_attach; |
| 213 | } |
| 214 | |
| 215 | WARN_ON_ONCE(!is_nvdimm_bus_locked(dev)); |
Dan Williams | 452bae0 | 2017-04-28 22:05:14 -0700 | [diff] [blame] | 216 | if (!__nd_attach_ndns(dev, ndns, _ndns)) { |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 217 | dev_dbg(dev, "%s already claimed\n", |
| 218 | dev_name(&ndns->dev)); |
| 219 | len = -EBUSY; |
| 220 | } |
| 221 | |
| 222 | out_attach: |
| 223 | put_device(&ndns->dev); /* from device_find_child */ |
| 224 | out: |
| 225 | kfree(name); |
| 226 | return len; |
| 227 | } |
| 228 | |
| 229 | /* |
| 230 | * nd_sb_checksum: compute checksum for a generic info block |
| 231 | * |
| 232 | * Returns a fletcher64 checksum of everything in the given info block |
| 233 | * except the last field (since that's where the checksum lives). |
| 234 | */ |
| 235 | u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb) |
| 236 | { |
| 237 | u64 sum; |
| 238 | __le64 sum_save; |
| 239 | |
| 240 | BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K); |
| 241 | BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K); |
| 242 | BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K); |
| 243 | |
| 244 | sum_save = nd_gen_sb->checksum; |
| 245 | nd_gen_sb->checksum = 0; |
| 246 | sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1); |
| 247 | nd_gen_sb->checksum = sum_save; |
| 248 | return sum; |
| 249 | } |
| 250 | EXPORT_SYMBOL(nd_sb_checksum); |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 251 | |
| 252 | static int nsio_rw_bytes(struct nd_namespace_common *ndns, |
Vishal Verma | 3ae3d67 | 2017-05-10 15:01:30 -0600 | [diff] [blame] | 253 | resource_size_t offset, void *buf, size_t size, int rw, |
| 254 | unsigned long flags) |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 255 | { |
| 256 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); |
Dave Jiang | 82bf103 | 2016-11-11 12:37:36 -0700 | [diff] [blame] | 257 | unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512); |
| 258 | sector_t sector = offset >> 9; |
Pankaj Gupta | c5d4355 | 2019-07-05 19:33:22 +0530 | [diff] [blame] | 259 | int rc = 0, ret = 0; |
Dave Jiang | 82bf103 | 2016-11-11 12:37:36 -0700 | [diff] [blame] | 260 | |
| 261 | if (unlikely(!size)) |
| 262 | return 0; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 263 | |
| 264 | if (unlikely(offset + size > nsio->size)) { |
| 265 | dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n"); |
| 266 | return -EFAULT; |
| 267 | } |
| 268 | |
| 269 | if (rw == READ) { |
Dave Jiang | 82bf103 | 2016-11-11 12:37:36 -0700 | [diff] [blame] | 270 | if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 271 | return -EIO; |
Dan Williams | ec6347b | 2020-10-05 20:40:16 -0700 | [diff] [blame] | 272 | if (copy_mc_to_kernel(buf, nsio->addr + offset, size) != 0) |
Dan Williams | 60622d6 | 2018-05-03 17:06:21 -0700 | [diff] [blame] | 273 | return -EIO; |
Dan Williams | b62cc6f | 2018-06-18 10:07:35 -0700 | [diff] [blame] | 274 | return 0; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 275 | } |
| 276 | |
Fabian Frederick | d37806d | 2016-12-04 10:45:13 -0800 | [diff] [blame] | 277 | if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { |
Dan Williams | a3e9af9 | 2017-05-01 10:00:02 -0700 | [diff] [blame] | 278 | if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512) |
Vishal Verma | 7e5a21d | 2017-06-30 18:32:52 -0600 | [diff] [blame] | 279 | && !(flags & NVDIMM_IO_ATOMIC)) { |
Fabian Frederick | d37806d | 2016-12-04 10:45:13 -0800 | [diff] [blame] | 280 | long cleared; |
| 281 | |
Vishal Verma | 1db1f3c | 2017-08-30 19:35:58 -0600 | [diff] [blame] | 282 | might_sleep(); |
Toshi Kani | 97681f9 | 2017-04-25 15:16:51 -0600 | [diff] [blame] | 283 | cleared = nvdimm_clear_poison(&ndns->dev, |
| 284 | nsio->res.start + offset, size); |
Dan Williams | 868f036f | 2016-12-16 08:10:31 -0800 | [diff] [blame] | 285 | if (cleared < size) |
Fabian Frederick | d37806d | 2016-12-04 10:45:13 -0800 | [diff] [blame] | 286 | rc = -EIO; |
Dan Williams | 868f036f | 2016-12-16 08:10:31 -0800 | [diff] [blame] | 287 | if (cleared > 0 && cleared / 512) { |
| 288 | cleared /= 512; |
| 289 | badblocks_clear(&nsio->bb, sector, cleared); |
Fabian Frederick | d37806d | 2016-12-04 10:45:13 -0800 | [diff] [blame] | 290 | } |
Dan Williams | f2b6125 | 2017-05-29 23:00:34 -0700 | [diff] [blame] | 291 | arch_invalidate_pmem(nsio->addr + offset, size); |
Fabian Frederick | d37806d | 2016-12-04 10:45:13 -0800 | [diff] [blame] | 292 | } else |
| 293 | rc = -EIO; |
| 294 | } |
| 295 | |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 296 | memcpy_flushcache(nsio->addr + offset, buf, size); |
Pankaj Gupta | c5d4355 | 2019-07-05 19:33:22 +0530 | [diff] [blame] | 297 | ret = nvdimm_flush(to_nd_region(ndns->dev.parent), NULL); |
| 298 | if (ret) |
| 299 | rc = ret; |
Fabian Frederick | d37806d | 2016-12-04 10:45:13 -0800 | [diff] [blame] | 300 | |
Dave Jiang | 82bf103 | 2016-11-11 12:37:36 -0700 | [diff] [blame] | 301 | return rc; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 302 | } |
| 303 | |
Aneesh Kumar K.V | 8f4b01f | 2019-10-31 16:27:41 +0530 | [diff] [blame] | 304 | int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, |
| 305 | resource_size_t size) |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 306 | { |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 307 | struct nd_namespace_common *ndns = &nsio->common; |
Dan Williams | a4574f6 | 2020-10-13 16:50:29 -0700 | [diff] [blame] | 308 | struct range range = { |
| 309 | .start = nsio->res.start, |
| 310 | .end = nsio->res.end, |
| 311 | }; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 312 | |
Aneesh Kumar K.V | 8f4b01f | 2019-10-31 16:27:41 +0530 | [diff] [blame] | 313 | nsio->size = size; |
Dan Williams | a4574f6 | 2020-10-13 16:50:29 -0700 | [diff] [blame] | 314 | if (!devm_request_mem_region(dev, range.start, size, |
Dan Williams | 450c663 | 2016-11-28 11:15:18 -0800 | [diff] [blame] | 315 | dev_name(&ndns->dev))) { |
Dan Williams | a4574f6 | 2020-10-13 16:50:29 -0700 | [diff] [blame] | 316 | dev_warn(dev, "could not reserve region %pR\n", &nsio->res); |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 317 | return -EBUSY; |
| 318 | } |
| 319 | |
| 320 | ndns->rw_bytes = nsio_rw_bytes; |
| 321 | if (devm_init_badblocks(dev, &nsio->bb)) |
| 322 | return -ENOMEM; |
| 323 | nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb, |
Dan Williams | a4574f6 | 2020-10-13 16:50:29 -0700 | [diff] [blame] | 324 | &range); |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 325 | |
Dan Williams | a4574f6 | 2020-10-13 16:50:29 -0700 | [diff] [blame] | 326 | nsio->addr = devm_memremap(dev, range.start, size, ARCH_MEMREMAP_PMEM); |
Dan Williams | 4258895 | 2016-05-27 13:28:31 -0700 | [diff] [blame] | 327 | |
| 328 | return PTR_ERR_OR_ZERO(nsio->addr); |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 329 | } |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 330 | |
| 331 | void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio) |
| 332 | { |
| 333 | struct resource *res = &nsio->res; |
| 334 | |
| 335 | devm_memunmap(dev, nsio->addr); |
| 336 | devm_exit_badblocks(dev, &nsio->bb); |
Aneesh Kumar K.V | 8f4b01f | 2019-10-31 16:27:41 +0530 | [diff] [blame] | 337 | devm_release_mem_region(dev, res->start, nsio->size); |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 338 | } |