Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of version 2 of the GNU General Public License as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | */ |
| 13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 14 | #include <linux/vmalloc.h> |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 15 | #include <linux/device.h> |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 16 | #include <linux/ndctl.h> |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 17 | #include <linux/slab.h> |
| 18 | #include <linux/io.h> |
| 19 | #include <linux/fs.h> |
| 20 | #include <linux/mm.h> |
| 21 | #include "nd-core.h" |
Dan Williams | 0ba1c63 | 2015-05-30 12:35:36 -0400 | [diff] [blame] | 22 | #include "label.h" |
Dan Williams | ca6a465 | 2017-01-13 20:36:58 -0800 | [diff] [blame] | 23 | #include "pmem.h" |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 24 | #include "nd.h" |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 25 | |
| 26 | static DEFINE_IDA(dimm_ida); |
| 27 | |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 28 | /* |
| 29 | * Retrieve bus and dimm handle and return if this bus supports |
| 30 | * get_config_data commands |
| 31 | */ |
Toshi Kani | aee6598 | 2016-08-16 13:08:40 -0600 | [diff] [blame] | 32 | int nvdimm_check_config_data(struct device *dev) |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 33 | { |
Toshi Kani | aee6598 | 2016-08-16 13:08:40 -0600 | [diff] [blame] | 34 | struct nvdimm *nvdimm = to_nvdimm(dev); |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 35 | |
Toshi Kani | aee6598 | 2016-08-16 13:08:40 -0600 | [diff] [blame] | 36 | if (!nvdimm->cmd_mask || |
| 37 | !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) { |
Dan Williams | 8f078b3 | 2017-05-04 14:01:24 -0700 | [diff] [blame] | 38 | if (test_bit(NDD_ALIASING, &nvdimm->flags)) |
Toshi Kani | aee6598 | 2016-08-16 13:08:40 -0600 | [diff] [blame] | 39 | return -ENXIO; |
| 40 | else |
| 41 | return -ENOTTY; |
| 42 | } |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 43 | |
| 44 | return 0; |
| 45 | } |
| 46 | |
| 47 | static int validate_dimm(struct nvdimm_drvdata *ndd) |
| 48 | { |
Toshi Kani | aee6598 | 2016-08-16 13:08:40 -0600 | [diff] [blame] | 49 | int rc; |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 50 | |
Toshi Kani | aee6598 | 2016-08-16 13:08:40 -0600 | [diff] [blame] | 51 | if (!ndd) |
| 52 | return -EINVAL; |
| 53 | |
| 54 | rc = nvdimm_check_config_data(ndd->dev); |
| 55 | if (rc) |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 56 | dev_dbg(ndd->dev, "%pf: %s error: %d\n", |
| 57 | __builtin_return_address(0), __func__, rc); |
| 58 | return rc; |
| 59 | } |
| 60 | |
| 61 | /** |
| 62 | * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area |
| 63 | * @nvdimm: dimm to initialize |
| 64 | */ |
| 65 | int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) |
| 66 | { |
| 67 | struct nd_cmd_get_config_size *cmd = &ndd->nsarea; |
| 68 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); |
| 69 | struct nvdimm_bus_descriptor *nd_desc; |
| 70 | int rc = validate_dimm(ndd); |
Dan Williams | 9d62ed9 | 2017-05-04 11:47:22 -0700 | [diff] [blame] | 71 | int cmd_rc = 0; |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 72 | |
| 73 | if (rc) |
| 74 | return rc; |
| 75 | |
| 76 | if (cmd->config_size) |
| 77 | return 0; /* already valid */ |
| 78 | |
| 79 | memset(cmd, 0, sizeof(*cmd)); |
| 80 | nd_desc = nvdimm_bus->nd_desc; |
Dan Williams | 9d62ed9 | 2017-05-04 11:47:22 -0700 | [diff] [blame] | 81 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), |
| 82 | ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc); |
| 83 | if (rc < 0) |
| 84 | return rc; |
| 85 | return cmd_rc; |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) |
| 89 | { |
| 90 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); |
Dan Williams | e7c5a57 | 2018-04-09 12:34:24 -0700 | [diff] [blame] | 91 | int rc = validate_dimm(ndd), cmd_rc = 0; |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 92 | struct nd_cmd_get_config_data_hdr *cmd; |
| 93 | struct nvdimm_bus_descriptor *nd_desc; |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 94 | u32 max_cmd_size, config_size; |
| 95 | size_t offset; |
| 96 | |
| 97 | if (rc) |
| 98 | return rc; |
| 99 | |
| 100 | if (ndd->data) |
| 101 | return 0; |
| 102 | |
Dan Williams | 4a826c8 | 2015-06-09 16:09:36 -0400 | [diff] [blame] | 103 | if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 |
| 104 | || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) { |
| 105 | dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n", |
| 106 | ndd->nsarea.max_xfer, ndd->nsarea.config_size); |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 107 | return -ENXIO; |
Dan Williams | 4a826c8 | 2015-06-09 16:09:36 -0400 | [diff] [blame] | 108 | } |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 109 | |
Michal Hocko | 752ade6 | 2017-05-08 15:57:27 -0700 | [diff] [blame] | 110 | ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL); |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 111 | if (!ndd->data) |
| 112 | return -ENOMEM; |
| 113 | |
Dan Williams | d11cf4a | 2018-10-10 16:38:24 -0700 | [diff] [blame^] | 114 | max_cmd_size = min_t(u32, ndd->nsarea.config_size, ndd->nsarea.max_xfer); |
| 115 | cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL); |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 116 | if (!cmd) |
| 117 | return -ENOMEM; |
| 118 | |
| 119 | nd_desc = nvdimm_bus->nd_desc; |
| 120 | for (config_size = ndd->nsarea.config_size, offset = 0; |
| 121 | config_size; config_size -= cmd->in_length, |
| 122 | offset += cmd->in_length) { |
| 123 | cmd->in_length = min(config_size, max_cmd_size); |
| 124 | cmd->in_offset = offset; |
| 125 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), |
| 126 | ND_CMD_GET_CONFIG_DATA, cmd, |
Dan Williams | e7c5a57 | 2018-04-09 12:34:24 -0700 | [diff] [blame] | 127 | cmd->in_length + sizeof(*cmd), &cmd_rc); |
| 128 | if (rc < 0) |
| 129 | break; |
| 130 | if (cmd_rc < 0) { |
| 131 | rc = cmd_rc; |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 132 | break; |
| 133 | } |
| 134 | memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); |
| 135 | } |
Dan Williams | 426824d | 2018-03-05 16:39:31 -0800 | [diff] [blame] | 136 | dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc); |
Dan Williams | d11cf4a | 2018-10-10 16:38:24 -0700 | [diff] [blame^] | 137 | kvfree(cmd); |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 138 | |
| 139 | return rc; |
| 140 | } |
| 141 | |
Dan Williams | f524bf2 | 2015-05-30 12:36:02 -0400 | [diff] [blame] | 142 | int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, |
| 143 | void *buf, size_t len) |
| 144 | { |
Dan Williams | f524bf2 | 2015-05-30 12:36:02 -0400 | [diff] [blame] | 145 | size_t max_cmd_size, buf_offset; |
| 146 | struct nd_cmd_set_config_hdr *cmd; |
Dan Williams | e7c5a57 | 2018-04-09 12:34:24 -0700 | [diff] [blame] | 147 | int rc = validate_dimm(ndd), cmd_rc = 0; |
Dan Williams | f524bf2 | 2015-05-30 12:36:02 -0400 | [diff] [blame] | 148 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); |
| 149 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; |
| 150 | |
| 151 | if (rc) |
| 152 | return rc; |
| 153 | |
| 154 | if (!ndd->data) |
| 155 | return -ENXIO; |
| 156 | |
| 157 | if (offset + len > ndd->nsarea.config_size) |
| 158 | return -ENXIO; |
| 159 | |
Dan Williams | d11cf4a | 2018-10-10 16:38:24 -0700 | [diff] [blame^] | 160 | max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer); |
| 161 | cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL); |
Dan Williams | f524bf2 | 2015-05-30 12:36:02 -0400 | [diff] [blame] | 162 | if (!cmd) |
| 163 | return -ENOMEM; |
| 164 | |
| 165 | for (buf_offset = 0; len; len -= cmd->in_length, |
| 166 | buf_offset += cmd->in_length) { |
| 167 | size_t cmd_size; |
Dan Williams | f524bf2 | 2015-05-30 12:36:02 -0400 | [diff] [blame] | 168 | |
| 169 | cmd->in_offset = offset + buf_offset; |
| 170 | cmd->in_length = min(max_cmd_size, len); |
| 171 | memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length); |
| 172 | |
| 173 | /* status is output in the last 4-bytes of the command buffer */ |
| 174 | cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); |
Dan Williams | f524bf2 | 2015-05-30 12:36:02 -0400 | [diff] [blame] | 175 | |
| 176 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), |
Dan Williams | e7c5a57 | 2018-04-09 12:34:24 -0700 | [diff] [blame] | 177 | ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); |
| 178 | if (rc < 0) |
| 179 | break; |
| 180 | if (cmd_rc < 0) { |
| 181 | rc = cmd_rc; |
Dan Williams | f524bf2 | 2015-05-30 12:36:02 -0400 | [diff] [blame] | 182 | break; |
| 183 | } |
| 184 | } |
Dan Williams | d11cf4a | 2018-10-10 16:38:24 -0700 | [diff] [blame^] | 185 | kvfree(cmd); |
Dan Williams | f524bf2 | 2015-05-30 12:36:02 -0400 | [diff] [blame] | 186 | |
| 187 | return rc; |
| 188 | } |
| 189 | |
Dan Williams | 42237e3 | 2016-10-15 15:33:52 -0700 | [diff] [blame] | 190 | void nvdimm_set_aliasing(struct device *dev) |
| 191 | { |
| 192 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 193 | |
Dan Williams | 8f078b3 | 2017-05-04 14:01:24 -0700 | [diff] [blame] | 194 | set_bit(NDD_ALIASING, &nvdimm->flags); |
| 195 | } |
| 196 | |
| 197 | void nvdimm_set_locked(struct device *dev) |
| 198 | { |
| 199 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 200 | |
| 201 | set_bit(NDD_LOCKED, &nvdimm->flags); |
Dan Williams | 42237e3 | 2016-10-15 15:33:52 -0700 | [diff] [blame] | 202 | } |
| 203 | |
Dan Williams | d34cb80 | 2017-09-25 11:01:31 -0700 | [diff] [blame] | 204 | void nvdimm_clear_locked(struct device *dev) |
| 205 | { |
| 206 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 207 | |
| 208 | clear_bit(NDD_LOCKED, &nvdimm->flags); |
| 209 | } |
| 210 | |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 211 | static void nvdimm_release(struct device *dev) |
| 212 | { |
| 213 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 214 | |
| 215 | ida_simple_remove(&dimm_ida, nvdimm->id); |
| 216 | kfree(nvdimm); |
| 217 | } |
| 218 | |
| 219 | static struct device_type nvdimm_device_type = { |
| 220 | .name = "nvdimm", |
| 221 | .release = nvdimm_release, |
| 222 | }; |
| 223 | |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 224 | bool is_nvdimm(struct device *dev) |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 225 | { |
| 226 | return dev->type == &nvdimm_device_type; |
| 227 | } |
| 228 | |
| 229 | struct nvdimm *to_nvdimm(struct device *dev) |
| 230 | { |
| 231 | struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev); |
| 232 | |
| 233 | WARN_ON(!is_nvdimm(dev)); |
| 234 | return nvdimm; |
| 235 | } |
| 236 | EXPORT_SYMBOL_GPL(to_nvdimm); |
| 237 | |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 238 | struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr) |
| 239 | { |
| 240 | struct nd_region *nd_region = &ndbr->nd_region; |
| 241 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; |
| 242 | |
| 243 | return nd_mapping->nvdimm; |
| 244 | } |
| 245 | EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm); |
| 246 | |
Dan Williams | ca6a465 | 2017-01-13 20:36:58 -0800 | [diff] [blame] | 247 | unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr) |
| 248 | { |
| 249 | /* pmem mapping properties are private to libnvdimm */ |
| 250 | return ARCH_MEMREMAP_PMEM; |
| 251 | } |
| 252 | EXPORT_SYMBOL_GPL(nd_blk_memremap_flags); |
| 253 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 254 | struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) |
| 255 | { |
| 256 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 257 | |
| 258 | WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev)); |
| 259 | |
| 260 | return dev_get_drvdata(&nvdimm->dev); |
| 261 | } |
| 262 | EXPORT_SYMBOL(to_ndd); |
| 263 | |
| 264 | void nvdimm_drvdata_release(struct kref *kref) |
| 265 | { |
| 266 | struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref); |
| 267 | struct device *dev = ndd->dev; |
| 268 | struct resource *res, *_r; |
| 269 | |
Dan Williams | 426824d | 2018-03-05 16:39:31 -0800 | [diff] [blame] | 270 | dev_dbg(dev, "trace\n"); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 271 | nvdimm_bus_lock(dev); |
| 272 | for_each_dpa_resource_safe(ndd, res, _r) |
| 273 | nvdimm_free_dpa(ndd, res); |
| 274 | nvdimm_bus_unlock(dev); |
| 275 | |
yalin wang | a06a757 | 2015-08-27 19:35:48 -0400 | [diff] [blame] | 276 | kvfree(ndd->data); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 277 | kfree(ndd); |
| 278 | put_device(dev); |
| 279 | } |
| 280 | |
| 281 | void get_ndd(struct nvdimm_drvdata *ndd) |
| 282 | { |
| 283 | kref_get(&ndd->kref); |
| 284 | } |
| 285 | |
| 286 | void put_ndd(struct nvdimm_drvdata *ndd) |
| 287 | { |
| 288 | if (ndd) |
| 289 | kref_put(&ndd->kref, nvdimm_drvdata_release); |
| 290 | } |
| 291 | |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 292 | const char *nvdimm_name(struct nvdimm *nvdimm) |
| 293 | { |
| 294 | return dev_name(&nvdimm->dev); |
| 295 | } |
| 296 | EXPORT_SYMBOL_GPL(nvdimm_name); |
| 297 | |
Dan Williams | ba9c8dd | 2016-08-22 19:28:37 -0700 | [diff] [blame] | 298 | struct kobject *nvdimm_kobj(struct nvdimm *nvdimm) |
| 299 | { |
| 300 | return &nvdimm->dev.kobj; |
| 301 | } |
| 302 | EXPORT_SYMBOL_GPL(nvdimm_kobj); |
| 303 | |
Dan Williams | e3654ec | 2016-04-28 16:17:07 -0700 | [diff] [blame] | 304 | unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm) |
| 305 | { |
| 306 | return nvdimm->cmd_mask; |
| 307 | } |
| 308 | EXPORT_SYMBOL_GPL(nvdimm_cmd_mask); |
| 309 | |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 310 | void *nvdimm_provider_data(struct nvdimm *nvdimm) |
| 311 | { |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 312 | if (nvdimm) |
| 313 | return nvdimm->provider_data; |
| 314 | return NULL; |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 315 | } |
| 316 | EXPORT_SYMBOL_GPL(nvdimm_provider_data); |
| 317 | |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 318 | static ssize_t commands_show(struct device *dev, |
| 319 | struct device_attribute *attr, char *buf) |
| 320 | { |
| 321 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 322 | int cmd, len = 0; |
| 323 | |
Dan Williams | e3654ec | 2016-04-28 16:17:07 -0700 | [diff] [blame] | 324 | if (!nvdimm->cmd_mask) |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 325 | return sprintf(buf, "\n"); |
| 326 | |
Dan Williams | e3654ec | 2016-04-28 16:17:07 -0700 | [diff] [blame] | 327 | for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG) |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 328 | len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd)); |
| 329 | len += sprintf(buf + len, "\n"); |
| 330 | return len; |
| 331 | } |
| 332 | static DEVICE_ATTR_RO(commands); |
| 333 | |
Dan Williams | efbf6f5 | 2017-09-25 10:24:26 -0700 | [diff] [blame] | 334 | static ssize_t flags_show(struct device *dev, |
| 335 | struct device_attribute *attr, char *buf) |
| 336 | { |
| 337 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 338 | |
| 339 | return sprintf(buf, "%s%s\n", |
| 340 | test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "", |
| 341 | test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : ""); |
| 342 | } |
| 343 | static DEVICE_ATTR_RO(flags); |
| 344 | |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 345 | static ssize_t state_show(struct device *dev, struct device_attribute *attr, |
| 346 | char *buf) |
| 347 | { |
| 348 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 349 | |
| 350 | /* |
| 351 | * The state may be in the process of changing, userspace should |
| 352 | * quiesce probing if it wants a static answer |
| 353 | */ |
| 354 | nvdimm_bus_lock(dev); |
| 355 | nvdimm_bus_unlock(dev); |
| 356 | return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy) |
| 357 | ? "active" : "idle"); |
| 358 | } |
| 359 | static DEVICE_ATTR_RO(state); |
| 360 | |
Dan Williams | 0ba1c63 | 2015-05-30 12:35:36 -0400 | [diff] [blame] | 361 | static ssize_t available_slots_show(struct device *dev, |
| 362 | struct device_attribute *attr, char *buf) |
| 363 | { |
| 364 | struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); |
| 365 | ssize_t rc; |
| 366 | u32 nfree; |
| 367 | |
| 368 | if (!ndd) |
| 369 | return -ENXIO; |
| 370 | |
| 371 | nvdimm_bus_lock(dev); |
| 372 | nfree = nd_label_nfree(ndd); |
| 373 | if (nfree - 1 > nfree) { |
| 374 | dev_WARN_ONCE(dev, 1, "we ate our last label?\n"); |
| 375 | nfree = 0; |
| 376 | } else |
| 377 | nfree--; |
| 378 | rc = sprintf(buf, "%d\n", nfree); |
| 379 | nvdimm_bus_unlock(dev); |
| 380 | return rc; |
| 381 | } |
| 382 | static DEVICE_ATTR_RO(available_slots); |
| 383 | |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 384 | static struct attribute *nvdimm_attributes[] = { |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 385 | &dev_attr_state.attr, |
Dan Williams | efbf6f5 | 2017-09-25 10:24:26 -0700 | [diff] [blame] | 386 | &dev_attr_flags.attr, |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 387 | &dev_attr_commands.attr, |
Dan Williams | 0ba1c63 | 2015-05-30 12:35:36 -0400 | [diff] [blame] | 388 | &dev_attr_available_slots.attr, |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 389 | NULL, |
| 390 | }; |
| 391 | |
| 392 | struct attribute_group nvdimm_attribute_group = { |
| 393 | .attrs = nvdimm_attributes, |
| 394 | }; |
| 395 | EXPORT_SYMBOL_GPL(nvdimm_attribute_group); |
| 396 | |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 397 | struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 398 | const struct attribute_group **groups, unsigned long flags, |
Dan Williams | e5ae3b2 | 2016-06-07 17:00:04 -0700 | [diff] [blame] | 399 | unsigned long cmd_mask, int num_flush, |
| 400 | struct resource *flush_wpq) |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 401 | { |
| 402 | struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL); |
| 403 | struct device *dev; |
| 404 | |
| 405 | if (!nvdimm) |
| 406 | return NULL; |
| 407 | |
| 408 | nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL); |
| 409 | if (nvdimm->id < 0) { |
| 410 | kfree(nvdimm); |
| 411 | return NULL; |
| 412 | } |
| 413 | nvdimm->provider_data = provider_data; |
| 414 | nvdimm->flags = flags; |
Dan Williams | e3654ec | 2016-04-28 16:17:07 -0700 | [diff] [blame] | 415 | nvdimm->cmd_mask = cmd_mask; |
Dan Williams | e5ae3b2 | 2016-06-07 17:00:04 -0700 | [diff] [blame] | 416 | nvdimm->num_flush = num_flush; |
| 417 | nvdimm->flush_wpq = flush_wpq; |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 418 | atomic_set(&nvdimm->busy, 0); |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 419 | dev = &nvdimm->dev; |
| 420 | dev_set_name(dev, "nmem%d", nvdimm->id); |
| 421 | dev->parent = &nvdimm_bus->dev; |
| 422 | dev->type = &nvdimm_device_type; |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 423 | dev->devt = MKDEV(nvdimm_major, nvdimm->id); |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 424 | dev->groups = groups; |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 425 | nd_device_register(dev); |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 426 | |
| 427 | return nvdimm; |
| 428 | } |
| 429 | EXPORT_SYMBOL_GPL(nvdimm_create); |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 430 | |
Dan Williams | 762d067 | 2016-10-04 16:09:59 -0700 | [diff] [blame] | 431 | int alias_dpa_busy(struct device *dev, void *data) |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 432 | { |
Dan Williams | fe51473 | 2017-04-04 15:08:36 -0700 | [diff] [blame] | 433 | resource_size_t map_end, blk_start, new; |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 434 | struct blk_alloc_info *info = data; |
| 435 | struct nd_mapping *nd_mapping; |
| 436 | struct nd_region *nd_region; |
| 437 | struct nvdimm_drvdata *ndd; |
| 438 | struct resource *res; |
| 439 | int i; |
| 440 | |
Dan Williams | c9e582a | 2017-05-29 23:12:19 -0700 | [diff] [blame] | 441 | if (!is_memory(dev)) |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 442 | return 0; |
| 443 | |
| 444 | nd_region = to_nd_region(dev); |
| 445 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 446 | nd_mapping = &nd_region->mapping[i]; |
| 447 | if (nd_mapping->nvdimm == info->nd_mapping->nvdimm) |
| 448 | break; |
| 449 | } |
| 450 | |
| 451 | if (i >= nd_region->ndr_mappings) |
| 452 | return 0; |
| 453 | |
| 454 | ndd = to_ndd(nd_mapping); |
| 455 | map_end = nd_mapping->start + nd_mapping->size - 1; |
| 456 | blk_start = nd_mapping->start; |
Dan Williams | 762d067 | 2016-10-04 16:09:59 -0700 | [diff] [blame] | 457 | |
| 458 | /* |
| 459 | * In the allocation case ->res is set to free space that we are |
| 460 | * looking to validate against PMEM aliasing collision rules |
| 461 | * (i.e. BLK is allocated after all aliased PMEM). |
| 462 | */ |
| 463 | if (info->res) { |
| 464 | if (info->res->start >= nd_mapping->start |
| 465 | && info->res->start < map_end) |
| 466 | /* pass */; |
| 467 | else |
| 468 | return 0; |
| 469 | } |
| 470 | |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 471 | retry: |
| 472 | /* |
| 473 | * Find the free dpa from the end of the last pmem allocation to |
Dan Williams | fe51473 | 2017-04-04 15:08:36 -0700 | [diff] [blame] | 474 | * the end of the interleave-set mapping. |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 475 | */ |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 476 | for_each_dpa_resource(ndd, res) { |
Dan Williams | fe51473 | 2017-04-04 15:08:36 -0700 | [diff] [blame] | 477 | if (strncmp(res->name, "pmem", 4) != 0) |
| 478 | continue; |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 479 | if ((res->start >= blk_start && res->start < map_end) |
| 480 | || (res->end >= blk_start |
| 481 | && res->end <= map_end)) { |
Dan Williams | fe51473 | 2017-04-04 15:08:36 -0700 | [diff] [blame] | 482 | new = max(blk_start, min(map_end + 1, res->end + 1)); |
| 483 | if (new != blk_start) { |
| 484 | blk_start = new; |
| 485 | goto retry; |
| 486 | } |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 487 | } |
| 488 | } |
| 489 | |
Dan Williams | 762d067 | 2016-10-04 16:09:59 -0700 | [diff] [blame] | 490 | /* update the free space range with the probed blk_start */ |
| 491 | if (info->res && blk_start > info->res->start) { |
| 492 | info->res->start = max(info->res->start, blk_start); |
| 493 | if (info->res->start > info->res->end) |
| 494 | info->res->end = info->res->start - 1; |
| 495 | return 1; |
| 496 | } |
| 497 | |
Dan Williams | fe51473 | 2017-04-04 15:08:36 -0700 | [diff] [blame] | 498 | info->available -= blk_start - nd_mapping->start; |
Dan Williams | 762d067 | 2016-10-04 16:09:59 -0700 | [diff] [blame] | 499 | |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 500 | return 0; |
| 501 | } |
| 502 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 503 | /** |
Dan Williams | 1b40e09 | 2015-05-01 13:34:01 -0400 | [diff] [blame] | 504 | * nd_blk_available_dpa - account the unused dpa of BLK region |
| 505 | * @nd_mapping: container of dpa-resource-root + labels |
| 506 | * |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 507 | * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but |
| 508 | * we arrange for them to never start at an lower dpa than the last |
| 509 | * PMEM allocation in an aliased region. |
Dan Williams | 1b40e09 | 2015-05-01 13:34:01 -0400 | [diff] [blame] | 510 | */ |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 511 | resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) |
Dan Williams | 1b40e09 | 2015-05-01 13:34:01 -0400 | [diff] [blame] | 512 | { |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 513 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); |
| 514 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; |
Dan Williams | 1b40e09 | 2015-05-01 13:34:01 -0400 | [diff] [blame] | 515 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 516 | struct blk_alloc_info info = { |
| 517 | .nd_mapping = nd_mapping, |
| 518 | .available = nd_mapping->size, |
Dan Williams | 762d067 | 2016-10-04 16:09:59 -0700 | [diff] [blame] | 519 | .res = NULL, |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 520 | }; |
Dan Williams | 1b40e09 | 2015-05-01 13:34:01 -0400 | [diff] [blame] | 521 | struct resource *res; |
| 522 | |
| 523 | if (!ndd) |
| 524 | return 0; |
| 525 | |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 526 | device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); |
Dan Williams | 1b40e09 | 2015-05-01 13:34:01 -0400 | [diff] [blame] | 527 | |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 528 | /* now account for busy blk allocations in unaliased dpa */ |
| 529 | for_each_dpa_resource(ndd, res) { |
| 530 | if (strncmp(res->name, "blk", 3) != 0) |
| 531 | continue; |
Dan Williams | fe51473 | 2017-04-04 15:08:36 -0700 | [diff] [blame] | 532 | info.available -= resource_size(res); |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 533 | } |
| 534 | |
| 535 | return info.available; |
Dan Williams | 1b40e09 | 2015-05-01 13:34:01 -0400 | [diff] [blame] | 536 | } |
| 537 | |
| 538 | /** |
Keith Busch | 12e3129 | 2018-07-24 15:07:57 -0600 | [diff] [blame] | 539 | * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max |
| 540 | * contiguous unallocated dpa range. |
| 541 | * @nd_region: constrain available space check to this reference region |
| 542 | * @nd_mapping: container of dpa-resource-root + labels |
| 543 | */ |
| 544 | resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, |
| 545 | struct nd_mapping *nd_mapping) |
| 546 | { |
| 547 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 548 | struct nvdimm_bus *nvdimm_bus; |
| 549 | resource_size_t max = 0; |
| 550 | struct resource *res; |
| 551 | |
| 552 | /* if a dimm is disabled the available capacity is zero */ |
| 553 | if (!ndd) |
| 554 | return 0; |
| 555 | |
| 556 | nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); |
| 557 | if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm)) |
| 558 | return 0; |
| 559 | for_each_dpa_resource(ndd, res) { |
| 560 | if (strcmp(res->name, "pmem-reserve") != 0) |
| 561 | continue; |
| 562 | if (resource_size(res) > max) |
| 563 | max = resource_size(res); |
| 564 | } |
| 565 | release_free_pmem(nvdimm_bus, nd_mapping); |
| 566 | return max; |
| 567 | } |
| 568 | |
| 569 | /** |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 570 | * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa |
| 571 | * @nd_mapping: container of dpa-resource-root + labels |
| 572 | * @nd_region: constrain available space check to this reference region |
| 573 | * @overlap: calculate available space assuming this level of overlap |
| 574 | * |
| 575 | * Validate that a PMEM label, if present, aligns with the start of an |
| 576 | * interleave set and truncate the available size at the lowest BLK |
| 577 | * overlap point. |
| 578 | * |
| 579 | * The expectation is that this routine is called multiple times as it |
| 580 | * probes for the largest BLK encroachment for any single member DIMM of |
| 581 | * the interleave set. Once that value is determined the PMEM-limit for |
| 582 | * the set can be established. |
| 583 | */ |
| 584 | resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, |
| 585 | struct nd_mapping *nd_mapping, resource_size_t *overlap) |
| 586 | { |
| 587 | resource_size_t map_start, map_end, busy = 0, available, blk_start; |
| 588 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 589 | struct resource *res; |
| 590 | const char *reason; |
| 591 | |
| 592 | if (!ndd) |
| 593 | return 0; |
| 594 | |
| 595 | map_start = nd_mapping->start; |
| 596 | map_end = map_start + nd_mapping->size - 1; |
| 597 | blk_start = max(map_start, map_end + 1 - *overlap); |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 598 | for_each_dpa_resource(ndd, res) { |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 599 | if (res->start >= map_start && res->start < map_end) { |
| 600 | if (strncmp(res->name, "blk", 3) == 0) |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 601 | blk_start = min(blk_start, |
| 602 | max(map_start, res->start)); |
| 603 | else if (res->end > map_end) { |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 604 | reason = "misaligned to iset"; |
| 605 | goto err; |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 606 | } else |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 607 | busy += resource_size(res); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 608 | } else if (res->end >= map_start && res->end <= map_end) { |
| 609 | if (strncmp(res->name, "blk", 3) == 0) { |
| 610 | /* |
| 611 | * If a BLK allocation overlaps the start of |
| 612 | * PMEM the entire interleave set may now only |
| 613 | * be used for BLK. |
| 614 | */ |
| 615 | blk_start = map_start; |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 616 | } else |
| 617 | busy += resource_size(res); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 618 | } else if (map_start > res->start && map_start < res->end) { |
| 619 | /* total eclipse of the mapping */ |
| 620 | busy += nd_mapping->size; |
| 621 | blk_start = map_start; |
| 622 | } |
Dan Williams | a1f3e4d | 2016-09-30 17:28:58 -0700 | [diff] [blame] | 623 | } |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 624 | |
| 625 | *overlap = map_end + 1 - blk_start; |
| 626 | available = blk_start - map_start; |
| 627 | if (busy < available) |
| 628 | return available - busy; |
| 629 | return 0; |
| 630 | |
| 631 | err: |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 632 | nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason); |
| 633 | return 0; |
| 634 | } |
| 635 | |
Dan Williams | 4a826c8 | 2015-06-09 16:09:36 -0400 | [diff] [blame] | 636 | void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res) |
| 637 | { |
| 638 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); |
| 639 | kfree(res->name); |
| 640 | __release_region(&ndd->dpa, res->start, resource_size(res)); |
| 641 | } |
| 642 | |
| 643 | struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, |
| 644 | struct nd_label_id *label_id, resource_size_t start, |
| 645 | resource_size_t n) |
| 646 | { |
| 647 | char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL); |
| 648 | struct resource *res; |
| 649 | |
| 650 | if (!name) |
| 651 | return NULL; |
| 652 | |
| 653 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); |
| 654 | res = __request_region(&ndd->dpa, start, n, name, 0); |
| 655 | if (!res) |
| 656 | kfree(name); |
| 657 | return res; |
| 658 | } |
| 659 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 660 | /** |
| 661 | * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id |
| 662 | * @nvdimm: container of dpa-resource-root + labels |
| 663 | * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid> |
| 664 | */ |
| 665 | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, |
| 666 | struct nd_label_id *label_id) |
| 667 | { |
| 668 | resource_size_t allocated = 0; |
| 669 | struct resource *res; |
| 670 | |
| 671 | for_each_dpa_resource(ndd, res) |
| 672 | if (strcmp(res->name, label_id->id) == 0) |
| 673 | allocated += resource_size(res); |
| 674 | |
| 675 | return allocated; |
| 676 | } |
| 677 | |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 678 | static int count_dimms(struct device *dev, void *c) |
| 679 | { |
| 680 | int *count = c; |
| 681 | |
| 682 | if (is_nvdimm(dev)) |
| 683 | (*count)++; |
| 684 | return 0; |
| 685 | } |
| 686 | |
| 687 | int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count) |
| 688 | { |
| 689 | int count = 0; |
| 690 | /* Flush any possible dimm registration failures */ |
| 691 | nd_synchronize(); |
| 692 | |
| 693 | device_for_each_child(&nvdimm_bus->dev, &count, count_dimms); |
Dan Williams | 426824d | 2018-03-05 16:39:31 -0800 | [diff] [blame] | 694 | dev_dbg(&nvdimm_bus->dev, "count: %d\n", count); |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 695 | if (count != dimm_count) |
| 696 | return -ENXIO; |
| 697 | return 0; |
| 698 | } |
| 699 | EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count); |
Dan Williams | b354aba | 2016-05-17 20:24:16 -0700 | [diff] [blame] | 700 | |
| 701 | void __exit nvdimm_devs_exit(void) |
| 702 | { |
| 703 | ida_destroy(&dimm_ida); |
| 704 | } |