blob: 75ac78017b155c20573911cacf8ae34397005267 [file] [log] [blame]
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Dan Williams4d88a972015-05-31 14:41:48 -040014#include <linux/vmalloc.h>
Dan Williamse6dfb2d2015-04-25 03:56:17 -040015#include <linux/device.h>
Dan Williams62232e452015-06-08 14:27:06 -040016#include <linux/ndctl.h>
Dan Williamse6dfb2d2015-04-25 03:56:17 -040017#include <linux/slab.h>
18#include <linux/io.h>
19#include <linux/fs.h>
20#include <linux/mm.h>
21#include "nd-core.h"
Dan Williams0ba1c632015-05-30 12:35:36 -040022#include "label.h"
Dan Williamsca6a4652017-01-13 20:36:58 -080023#include "pmem.h"
Dan Williams4d88a972015-05-31 14:41:48 -040024#include "nd.h"
Dan Williamse6dfb2d2015-04-25 03:56:17 -040025
26static DEFINE_IDA(dimm_ida);
27
Dan Williams4d88a972015-05-31 14:41:48 -040028/*
29 * Retrieve bus and dimm handle and return if this bus supports
30 * get_config_data commands
31 */
Toshi Kaniaee65982016-08-16 13:08:40 -060032int nvdimm_check_config_data(struct device *dev)
Dan Williams4d88a972015-05-31 14:41:48 -040033{
Toshi Kaniaee65982016-08-16 13:08:40 -060034 struct nvdimm *nvdimm = to_nvdimm(dev);
Dan Williams4d88a972015-05-31 14:41:48 -040035
Toshi Kaniaee65982016-08-16 13:08:40 -060036 if (!nvdimm->cmd_mask ||
37 !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
Dan Williams8f078b32017-05-04 14:01:24 -070038 if (test_bit(NDD_ALIASING, &nvdimm->flags))
Toshi Kaniaee65982016-08-16 13:08:40 -060039 return -ENXIO;
40 else
41 return -ENOTTY;
42 }
Dan Williams4d88a972015-05-31 14:41:48 -040043
44 return 0;
45}
46
47static int validate_dimm(struct nvdimm_drvdata *ndd)
48{
Toshi Kaniaee65982016-08-16 13:08:40 -060049 int rc;
Dan Williams4d88a972015-05-31 14:41:48 -040050
Toshi Kaniaee65982016-08-16 13:08:40 -060051 if (!ndd)
52 return -EINVAL;
53
54 rc = nvdimm_check_config_data(ndd->dev);
55 if (rc)
Dan Williams4d88a972015-05-31 14:41:48 -040056 dev_dbg(ndd->dev, "%pf: %s error: %d\n",
57 __builtin_return_address(0), __func__, rc);
58 return rc;
59}
60
61/**
62 * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
63 * @nvdimm: dimm to initialize
64 */
65int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
66{
67 struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
68 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
69 struct nvdimm_bus_descriptor *nd_desc;
70 int rc = validate_dimm(ndd);
Dan Williams9d62ed92017-05-04 11:47:22 -070071 int cmd_rc = 0;
Dan Williams4d88a972015-05-31 14:41:48 -040072
73 if (rc)
74 return rc;
75
76 if (cmd->config_size)
77 return 0; /* already valid */
78
79 memset(cmd, 0, sizeof(*cmd));
80 nd_desc = nvdimm_bus->nd_desc;
Dan Williams9d62ed92017-05-04 11:47:22 -070081 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
82 ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
83 if (rc < 0)
84 return rc;
85 return cmd_rc;
Dan Williams4d88a972015-05-31 14:41:48 -040086}
87
88int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
89{
90 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
Dan Williamse7c5a572018-04-09 12:34:24 -070091 int rc = validate_dimm(ndd), cmd_rc = 0;
Dan Williams4d88a972015-05-31 14:41:48 -040092 struct nd_cmd_get_config_data_hdr *cmd;
93 struct nvdimm_bus_descriptor *nd_desc;
Dan Williams4d88a972015-05-31 14:41:48 -040094 u32 max_cmd_size, config_size;
95 size_t offset;
96
97 if (rc)
98 return rc;
99
100 if (ndd->data)
101 return 0;
102
Dan Williams4a826c82015-06-09 16:09:36 -0400103 if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0
104 || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) {
105 dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n",
106 ndd->nsarea.max_xfer, ndd->nsarea.config_size);
Dan Williams4d88a972015-05-31 14:41:48 -0400107 return -ENXIO;
Dan Williams4a826c82015-06-09 16:09:36 -0400108 }
Dan Williams4d88a972015-05-31 14:41:48 -0400109
Michal Hocko752ade62017-05-08 15:57:27 -0700110 ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL);
Dan Williams4d88a972015-05-31 14:41:48 -0400111 if (!ndd->data)
112 return -ENOMEM;
113
Dan Williamsd11cf4a2018-10-10 16:38:24 -0700114 max_cmd_size = min_t(u32, ndd->nsarea.config_size, ndd->nsarea.max_xfer);
115 cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
Dan Williams4d88a972015-05-31 14:41:48 -0400116 if (!cmd)
117 return -ENOMEM;
118
119 nd_desc = nvdimm_bus->nd_desc;
120 for (config_size = ndd->nsarea.config_size, offset = 0;
121 config_size; config_size -= cmd->in_length,
122 offset += cmd->in_length) {
123 cmd->in_length = min(config_size, max_cmd_size);
124 cmd->in_offset = offset;
125 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
126 ND_CMD_GET_CONFIG_DATA, cmd,
Dan Williamse7c5a572018-04-09 12:34:24 -0700127 cmd->in_length + sizeof(*cmd), &cmd_rc);
128 if (rc < 0)
129 break;
130 if (cmd_rc < 0) {
131 rc = cmd_rc;
Dan Williams4d88a972015-05-31 14:41:48 -0400132 break;
133 }
134 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
135 }
Dan Williams426824d2018-03-05 16:39:31 -0800136 dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
Dan Williamsd11cf4a2018-10-10 16:38:24 -0700137 kvfree(cmd);
Dan Williams4d88a972015-05-31 14:41:48 -0400138
139 return rc;
140}
141
Dan Williamsf524bf22015-05-30 12:36:02 -0400142int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
143 void *buf, size_t len)
144{
Dan Williamsf524bf22015-05-30 12:36:02 -0400145 size_t max_cmd_size, buf_offset;
146 struct nd_cmd_set_config_hdr *cmd;
Dan Williamse7c5a572018-04-09 12:34:24 -0700147 int rc = validate_dimm(ndd), cmd_rc = 0;
Dan Williamsf524bf22015-05-30 12:36:02 -0400148 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
149 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
150
151 if (rc)
152 return rc;
153
154 if (!ndd->data)
155 return -ENXIO;
156
157 if (offset + len > ndd->nsarea.config_size)
158 return -ENXIO;
159
Dan Williamsd11cf4a2018-10-10 16:38:24 -0700160 max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
161 cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
Dan Williamsf524bf22015-05-30 12:36:02 -0400162 if (!cmd)
163 return -ENOMEM;
164
165 for (buf_offset = 0; len; len -= cmd->in_length,
166 buf_offset += cmd->in_length) {
167 size_t cmd_size;
Dan Williamsf524bf22015-05-30 12:36:02 -0400168
169 cmd->in_offset = offset + buf_offset;
170 cmd->in_length = min(max_cmd_size, len);
171 memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
172
173 /* status is output in the last 4-bytes of the command buffer */
174 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
Dan Williamsf524bf22015-05-30 12:36:02 -0400175
176 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
Dan Williamse7c5a572018-04-09 12:34:24 -0700177 ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
178 if (rc < 0)
179 break;
180 if (cmd_rc < 0) {
181 rc = cmd_rc;
Dan Williamsf524bf22015-05-30 12:36:02 -0400182 break;
183 }
184 }
Dan Williamsd11cf4a2018-10-10 16:38:24 -0700185 kvfree(cmd);
Dan Williamsf524bf22015-05-30 12:36:02 -0400186
187 return rc;
188}
189
Dan Williams42237e32016-10-15 15:33:52 -0700190void nvdimm_set_aliasing(struct device *dev)
191{
192 struct nvdimm *nvdimm = to_nvdimm(dev);
193
Dan Williams8f078b32017-05-04 14:01:24 -0700194 set_bit(NDD_ALIASING, &nvdimm->flags);
195}
196
197void nvdimm_set_locked(struct device *dev)
198{
199 struct nvdimm *nvdimm = to_nvdimm(dev);
200
201 set_bit(NDD_LOCKED, &nvdimm->flags);
Dan Williams42237e32016-10-15 15:33:52 -0700202}
203
Dan Williamsd34cb802017-09-25 11:01:31 -0700204void nvdimm_clear_locked(struct device *dev)
205{
206 struct nvdimm *nvdimm = to_nvdimm(dev);
207
208 clear_bit(NDD_LOCKED, &nvdimm->flags);
209}
210
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400211static void nvdimm_release(struct device *dev)
212{
213 struct nvdimm *nvdimm = to_nvdimm(dev);
214
215 ida_simple_remove(&dimm_ida, nvdimm->id);
216 kfree(nvdimm);
217}
218
219static struct device_type nvdimm_device_type = {
220 .name = "nvdimm",
221 .release = nvdimm_release,
222};
223
Dan Williams62232e452015-06-08 14:27:06 -0400224bool is_nvdimm(struct device *dev)
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400225{
226 return dev->type == &nvdimm_device_type;
227}
228
229struct nvdimm *to_nvdimm(struct device *dev)
230{
231 struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
232
233 WARN_ON(!is_nvdimm(dev));
234 return nvdimm;
235}
236EXPORT_SYMBOL_GPL(to_nvdimm);
237
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400238struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
239{
240 struct nd_region *nd_region = &ndbr->nd_region;
241 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
242
243 return nd_mapping->nvdimm;
244}
245EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
246
Dan Williamsca6a4652017-01-13 20:36:58 -0800247unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
248{
249 /* pmem mapping properties are private to libnvdimm */
250 return ARCH_MEMREMAP_PMEM;
251}
252EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
253
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400254struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
255{
256 struct nvdimm *nvdimm = nd_mapping->nvdimm;
257
258 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
259
260 return dev_get_drvdata(&nvdimm->dev);
261}
262EXPORT_SYMBOL(to_ndd);
263
264void nvdimm_drvdata_release(struct kref *kref)
265{
266 struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
267 struct device *dev = ndd->dev;
268 struct resource *res, *_r;
269
Dan Williams426824d2018-03-05 16:39:31 -0800270 dev_dbg(dev, "trace\n");
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400271 nvdimm_bus_lock(dev);
272 for_each_dpa_resource_safe(ndd, res, _r)
273 nvdimm_free_dpa(ndd, res);
274 nvdimm_bus_unlock(dev);
275
yalin wanga06a7572015-08-27 19:35:48 -0400276 kvfree(ndd->data);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400277 kfree(ndd);
278 put_device(dev);
279}
280
281void get_ndd(struct nvdimm_drvdata *ndd)
282{
283 kref_get(&ndd->kref);
284}
285
286void put_ndd(struct nvdimm_drvdata *ndd)
287{
288 if (ndd)
289 kref_put(&ndd->kref, nvdimm_drvdata_release);
290}
291
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400292const char *nvdimm_name(struct nvdimm *nvdimm)
293{
294 return dev_name(&nvdimm->dev);
295}
296EXPORT_SYMBOL_GPL(nvdimm_name);
297
Dan Williamsba9c8dd2016-08-22 19:28:37 -0700298struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
299{
300 return &nvdimm->dev.kobj;
301}
302EXPORT_SYMBOL_GPL(nvdimm_kobj);
303
Dan Williamse3654ec2016-04-28 16:17:07 -0700304unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
305{
306 return nvdimm->cmd_mask;
307}
308EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
309
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400310void *nvdimm_provider_data(struct nvdimm *nvdimm)
311{
Dan Williams62232e452015-06-08 14:27:06 -0400312 if (nvdimm)
313 return nvdimm->provider_data;
314 return NULL;
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400315}
316EXPORT_SYMBOL_GPL(nvdimm_provider_data);
317
Dan Williams62232e452015-06-08 14:27:06 -0400318static ssize_t commands_show(struct device *dev,
319 struct device_attribute *attr, char *buf)
320{
321 struct nvdimm *nvdimm = to_nvdimm(dev);
322 int cmd, len = 0;
323
Dan Williamse3654ec2016-04-28 16:17:07 -0700324 if (!nvdimm->cmd_mask)
Dan Williams62232e452015-06-08 14:27:06 -0400325 return sprintf(buf, "\n");
326
Dan Williamse3654ec2016-04-28 16:17:07 -0700327 for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
Dan Williams62232e452015-06-08 14:27:06 -0400328 len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
329 len += sprintf(buf + len, "\n");
330 return len;
331}
332static DEVICE_ATTR_RO(commands);
333
Dan Williamsefbf6f52017-09-25 10:24:26 -0700334static ssize_t flags_show(struct device *dev,
335 struct device_attribute *attr, char *buf)
336{
337 struct nvdimm *nvdimm = to_nvdimm(dev);
338
339 return sprintf(buf, "%s%s\n",
340 test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
341 test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
342}
343static DEVICE_ATTR_RO(flags);
344
Dan Williamseaf96152015-05-01 13:11:27 -0400345static ssize_t state_show(struct device *dev, struct device_attribute *attr,
346 char *buf)
347{
348 struct nvdimm *nvdimm = to_nvdimm(dev);
349
350 /*
351 * The state may be in the process of changing, userspace should
352 * quiesce probing if it wants a static answer
353 */
354 nvdimm_bus_lock(dev);
355 nvdimm_bus_unlock(dev);
356 return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
357 ? "active" : "idle");
358}
359static DEVICE_ATTR_RO(state);
360
Dan Williams0ba1c632015-05-30 12:35:36 -0400361static ssize_t available_slots_show(struct device *dev,
362 struct device_attribute *attr, char *buf)
363{
364 struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
365 ssize_t rc;
366 u32 nfree;
367
368 if (!ndd)
369 return -ENXIO;
370
371 nvdimm_bus_lock(dev);
372 nfree = nd_label_nfree(ndd);
373 if (nfree - 1 > nfree) {
374 dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
375 nfree = 0;
376 } else
377 nfree--;
378 rc = sprintf(buf, "%d\n", nfree);
379 nvdimm_bus_unlock(dev);
380 return rc;
381}
382static DEVICE_ATTR_RO(available_slots);
383
Dan Williams62232e452015-06-08 14:27:06 -0400384static struct attribute *nvdimm_attributes[] = {
Dan Williamseaf96152015-05-01 13:11:27 -0400385 &dev_attr_state.attr,
Dan Williamsefbf6f52017-09-25 10:24:26 -0700386 &dev_attr_flags.attr,
Dan Williams62232e452015-06-08 14:27:06 -0400387 &dev_attr_commands.attr,
Dan Williams0ba1c632015-05-30 12:35:36 -0400388 &dev_attr_available_slots.attr,
Dan Williams62232e452015-06-08 14:27:06 -0400389 NULL,
390};
391
392struct attribute_group nvdimm_attribute_group = {
393 .attrs = nvdimm_attributes,
394};
395EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
396
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400397struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
Dan Williams62232e452015-06-08 14:27:06 -0400398 const struct attribute_group **groups, unsigned long flags,
Dan Williamse5ae3b22016-06-07 17:00:04 -0700399 unsigned long cmd_mask, int num_flush,
400 struct resource *flush_wpq)
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400401{
402 struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
403 struct device *dev;
404
405 if (!nvdimm)
406 return NULL;
407
408 nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
409 if (nvdimm->id < 0) {
410 kfree(nvdimm);
411 return NULL;
412 }
413 nvdimm->provider_data = provider_data;
414 nvdimm->flags = flags;
Dan Williamse3654ec2016-04-28 16:17:07 -0700415 nvdimm->cmd_mask = cmd_mask;
Dan Williamse5ae3b22016-06-07 17:00:04 -0700416 nvdimm->num_flush = num_flush;
417 nvdimm->flush_wpq = flush_wpq;
Dan Williamseaf96152015-05-01 13:11:27 -0400418 atomic_set(&nvdimm->busy, 0);
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400419 dev = &nvdimm->dev;
420 dev_set_name(dev, "nmem%d", nvdimm->id);
421 dev->parent = &nvdimm_bus->dev;
422 dev->type = &nvdimm_device_type;
Dan Williams62232e452015-06-08 14:27:06 -0400423 dev->devt = MKDEV(nvdimm_major, nvdimm->id);
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400424 dev->groups = groups;
Dan Williams4d88a972015-05-31 14:41:48 -0400425 nd_device_register(dev);
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400426
427 return nvdimm;
428}
429EXPORT_SYMBOL_GPL(nvdimm_create);
Dan Williams4d88a972015-05-31 14:41:48 -0400430
Dan Williams762d0672016-10-04 16:09:59 -0700431int alias_dpa_busy(struct device *dev, void *data)
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700432{
Dan Williamsfe514732017-04-04 15:08:36 -0700433 resource_size_t map_end, blk_start, new;
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700434 struct blk_alloc_info *info = data;
435 struct nd_mapping *nd_mapping;
436 struct nd_region *nd_region;
437 struct nvdimm_drvdata *ndd;
438 struct resource *res;
439 int i;
440
Dan Williamsc9e582a2017-05-29 23:12:19 -0700441 if (!is_memory(dev))
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700442 return 0;
443
444 nd_region = to_nd_region(dev);
445 for (i = 0; i < nd_region->ndr_mappings; i++) {
446 nd_mapping = &nd_region->mapping[i];
447 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
448 break;
449 }
450
451 if (i >= nd_region->ndr_mappings)
452 return 0;
453
454 ndd = to_ndd(nd_mapping);
455 map_end = nd_mapping->start + nd_mapping->size - 1;
456 blk_start = nd_mapping->start;
Dan Williams762d0672016-10-04 16:09:59 -0700457
458 /*
459 * In the allocation case ->res is set to free space that we are
460 * looking to validate against PMEM aliasing collision rules
461 * (i.e. BLK is allocated after all aliased PMEM).
462 */
463 if (info->res) {
464 if (info->res->start >= nd_mapping->start
465 && info->res->start < map_end)
466 /* pass */;
467 else
468 return 0;
469 }
470
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700471 retry:
472 /*
473 * Find the free dpa from the end of the last pmem allocation to
Dan Williamsfe514732017-04-04 15:08:36 -0700474 * the end of the interleave-set mapping.
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700475 */
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700476 for_each_dpa_resource(ndd, res) {
Dan Williamsfe514732017-04-04 15:08:36 -0700477 if (strncmp(res->name, "pmem", 4) != 0)
478 continue;
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700479 if ((res->start >= blk_start && res->start < map_end)
480 || (res->end >= blk_start
481 && res->end <= map_end)) {
Dan Williamsfe514732017-04-04 15:08:36 -0700482 new = max(blk_start, min(map_end + 1, res->end + 1));
483 if (new != blk_start) {
484 blk_start = new;
485 goto retry;
486 }
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700487 }
488 }
489
Dan Williams762d0672016-10-04 16:09:59 -0700490 /* update the free space range with the probed blk_start */
491 if (info->res && blk_start > info->res->start) {
492 info->res->start = max(info->res->start, blk_start);
493 if (info->res->start > info->res->end)
494 info->res->end = info->res->start - 1;
495 return 1;
496 }
497
Dan Williamsfe514732017-04-04 15:08:36 -0700498 info->available -= blk_start - nd_mapping->start;
Dan Williams762d0672016-10-04 16:09:59 -0700499
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700500 return 0;
501}
502
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400503/**
Dan Williams1b40e092015-05-01 13:34:01 -0400504 * nd_blk_available_dpa - account the unused dpa of BLK region
505 * @nd_mapping: container of dpa-resource-root + labels
506 *
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700507 * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
508 * we arrange for them to never start at an lower dpa than the last
509 * PMEM allocation in an aliased region.
Dan Williams1b40e092015-05-01 13:34:01 -0400510 */
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700511resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
Dan Williams1b40e092015-05-01 13:34:01 -0400512{
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700513 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
514 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
Dan Williams1b40e092015-05-01 13:34:01 -0400515 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700516 struct blk_alloc_info info = {
517 .nd_mapping = nd_mapping,
518 .available = nd_mapping->size,
Dan Williams762d0672016-10-04 16:09:59 -0700519 .res = NULL,
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700520 };
Dan Williams1b40e092015-05-01 13:34:01 -0400521 struct resource *res;
522
523 if (!ndd)
524 return 0;
525
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700526 device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
Dan Williams1b40e092015-05-01 13:34:01 -0400527
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700528 /* now account for busy blk allocations in unaliased dpa */
529 for_each_dpa_resource(ndd, res) {
530 if (strncmp(res->name, "blk", 3) != 0)
531 continue;
Dan Williamsfe514732017-04-04 15:08:36 -0700532 info.available -= resource_size(res);
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700533 }
534
535 return info.available;
Dan Williams1b40e092015-05-01 13:34:01 -0400536}
537
538/**
Keith Busch12e31292018-07-24 15:07:57 -0600539 * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
540 * contiguous unallocated dpa range.
541 * @nd_region: constrain available space check to this reference region
542 * @nd_mapping: container of dpa-resource-root + labels
543 */
544resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
545 struct nd_mapping *nd_mapping)
546{
547 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
548 struct nvdimm_bus *nvdimm_bus;
549 resource_size_t max = 0;
550 struct resource *res;
551
552 /* if a dimm is disabled the available capacity is zero */
553 if (!ndd)
554 return 0;
555
556 nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
557 if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
558 return 0;
559 for_each_dpa_resource(ndd, res) {
560 if (strcmp(res->name, "pmem-reserve") != 0)
561 continue;
562 if (resource_size(res) > max)
563 max = resource_size(res);
564 }
565 release_free_pmem(nvdimm_bus, nd_mapping);
566 return max;
567}
568
569/**
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400570 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
571 * @nd_mapping: container of dpa-resource-root + labels
572 * @nd_region: constrain available space check to this reference region
573 * @overlap: calculate available space assuming this level of overlap
574 *
575 * Validate that a PMEM label, if present, aligns with the start of an
576 * interleave set and truncate the available size at the lowest BLK
577 * overlap point.
578 *
579 * The expectation is that this routine is called multiple times as it
580 * probes for the largest BLK encroachment for any single member DIMM of
581 * the interleave set. Once that value is determined the PMEM-limit for
582 * the set can be established.
583 */
584resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
585 struct nd_mapping *nd_mapping, resource_size_t *overlap)
586{
587 resource_size_t map_start, map_end, busy = 0, available, blk_start;
588 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
589 struct resource *res;
590 const char *reason;
591
592 if (!ndd)
593 return 0;
594
595 map_start = nd_mapping->start;
596 map_end = map_start + nd_mapping->size - 1;
597 blk_start = max(map_start, map_end + 1 - *overlap);
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700598 for_each_dpa_resource(ndd, res) {
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400599 if (res->start >= map_start && res->start < map_end) {
600 if (strncmp(res->name, "blk", 3) == 0)
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700601 blk_start = min(blk_start,
602 max(map_start, res->start));
603 else if (res->end > map_end) {
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400604 reason = "misaligned to iset";
605 goto err;
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700606 } else
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400607 busy += resource_size(res);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400608 } else if (res->end >= map_start && res->end <= map_end) {
609 if (strncmp(res->name, "blk", 3) == 0) {
610 /*
611 * If a BLK allocation overlaps the start of
612 * PMEM the entire interleave set may now only
613 * be used for BLK.
614 */
615 blk_start = map_start;
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700616 } else
617 busy += resource_size(res);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400618 } else if (map_start > res->start && map_start < res->end) {
619 /* total eclipse of the mapping */
620 busy += nd_mapping->size;
621 blk_start = map_start;
622 }
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700623 }
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400624
625 *overlap = map_end + 1 - blk_start;
626 available = blk_start - map_start;
627 if (busy < available)
628 return available - busy;
629 return 0;
630
631 err:
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400632 nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
633 return 0;
634}
635
Dan Williams4a826c82015-06-09 16:09:36 -0400636void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
637{
638 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
639 kfree(res->name);
640 __release_region(&ndd->dpa, res->start, resource_size(res));
641}
642
643struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
644 struct nd_label_id *label_id, resource_size_t start,
645 resource_size_t n)
646{
647 char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
648 struct resource *res;
649
650 if (!name)
651 return NULL;
652
653 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
654 res = __request_region(&ndd->dpa, start, n, name, 0);
655 if (!res)
656 kfree(name);
657 return res;
658}
659
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400660/**
661 * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
662 * @nvdimm: container of dpa-resource-root + labels
663 * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
664 */
665resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
666 struct nd_label_id *label_id)
667{
668 resource_size_t allocated = 0;
669 struct resource *res;
670
671 for_each_dpa_resource(ndd, res)
672 if (strcmp(res->name, label_id->id) == 0)
673 allocated += resource_size(res);
674
675 return allocated;
676}
677
Dan Williams4d88a972015-05-31 14:41:48 -0400678static int count_dimms(struct device *dev, void *c)
679{
680 int *count = c;
681
682 if (is_nvdimm(dev))
683 (*count)++;
684 return 0;
685}
686
687int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
688{
689 int count = 0;
690 /* Flush any possible dimm registration failures */
691 nd_synchronize();
692
693 device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
Dan Williams426824d2018-03-05 16:39:31 -0800694 dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
Dan Williams4d88a972015-05-31 14:41:48 -0400695 if (count != dimm_count)
696 return -ENXIO;
697 return 0;
698}
699EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
Dan Williamsb354aba2016-05-17 20:24:16 -0700700
701void __exit nvdimm_devs_exit(void)
702{
703 ida_destroy(&dimm_ida);
704}