blob: 1593e1806b16c6b413ea1e5555987b88c6740286 [file] [log] [blame]
Dan Williams1f7df6f2015-06-09 20:13:14 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
Dan Williamseaf96152015-05-01 13:11:27 -040013#include <linux/scatterlist.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040014#include <linux/highmem.h>
Dan Williamseaf96152015-05-01 13:11:27 -040015#include <linux/sched.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040016#include <linux/slab.h>
Dan Williams0c27af62016-05-27 09:23:01 -070017#include <linux/hash.h>
Dan Williamseaf96152015-05-01 13:11:27 -040018#include <linux/sort.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040019#include <linux/io.h>
Dan Williamsbf9bccc2015-06-17 17:14:46 -040020#include <linux/nd.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040021#include "nd-core.h"
22#include "nd.h"
23
Dan Williamsf284a4f2016-07-07 19:44:50 -070024/*
25 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
26 * irrelevant.
27 */
28#include <linux/io-64-nonatomic-hi-lo.h>
29
Dan Williams1f7df6f2015-06-09 20:13:14 -040030static DEFINE_IDA(region_ida);
Dan Williams0c27af62016-05-27 09:23:01 -070031static DEFINE_PER_CPU(int, flush_idx);
Dan Williams1f7df6f2015-06-09 20:13:14 -040032
Dan Williamse5ae3b22016-06-07 17:00:04 -070033static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
34 struct nd_region_data *ndrd)
35{
36 int i, j;
37
38 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
39 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
Dan Williams595c7302016-09-23 17:53:52 -070040 for (i = 0; i < (1 << ndrd->hints_shift); i++) {
Dan Williamse5ae3b22016-06-07 17:00:04 -070041 struct resource *res = &nvdimm->flush_wpq[i];
42 unsigned long pfn = PHYS_PFN(res->start);
43 void __iomem *flush_page;
44
45 /* check if flush hints share a page */
46 for (j = 0; j < i; j++) {
47 struct resource *res_j = &nvdimm->flush_wpq[j];
48 unsigned long pfn_j = PHYS_PFN(res_j->start);
49
50 if (pfn == pfn_j)
51 break;
52 }
53
54 if (j < i)
55 flush_page = (void __iomem *) ((unsigned long)
Dan Williams595c7302016-09-23 17:53:52 -070056 ndrd_get_flush_wpq(ndrd, dimm, j)
57 & PAGE_MASK);
Dan Williamse5ae3b22016-06-07 17:00:04 -070058 else
59 flush_page = devm_nvdimm_ioremap(dev,
Oliver O'Halloran480b6832016-09-19 20:19:00 +100060 PFN_PHYS(pfn), PAGE_SIZE);
Dan Williamse5ae3b22016-06-07 17:00:04 -070061 if (!flush_page)
62 return -ENXIO;
Dan Williams595c7302016-09-23 17:53:52 -070063 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
64 + (res->start & ~PAGE_MASK));
Dan Williamse5ae3b22016-06-07 17:00:04 -070065 }
66
67 return 0;
68}
69
70int nd_region_activate(struct nd_region *nd_region)
71{
Dave Jiangdb580282016-09-26 11:06:50 -070072 int i, j, num_flush = 0;
Dan Williamse5ae3b22016-06-07 17:00:04 -070073 struct nd_region_data *ndrd;
74 struct device *dev = &nd_region->dev;
75 size_t flush_data_size = sizeof(void *);
76
77 nvdimm_bus_lock(&nd_region->dev);
78 for (i = 0; i < nd_region->ndr_mappings; i++) {
79 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
80 struct nvdimm *nvdimm = nd_mapping->nvdimm;
81
82 /* at least one null hint slot per-dimm for the "no-hint" case */
83 flush_data_size += sizeof(void *);
Dan Williams0c27af62016-05-27 09:23:01 -070084 num_flush = min_not_zero(num_flush, nvdimm->num_flush);
Dan Williamse5ae3b22016-06-07 17:00:04 -070085 if (!nvdimm->num_flush)
86 continue;
87 flush_data_size += nvdimm->num_flush * sizeof(void *);
88 }
89 nvdimm_bus_unlock(&nd_region->dev);
90
91 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
92 if (!ndrd)
93 return -ENOMEM;
94 dev_set_drvdata(dev, ndrd);
95
Dan Williams595c7302016-09-23 17:53:52 -070096 if (!num_flush)
97 return 0;
98
99 ndrd->hints_shift = ilog2(num_flush);
Dan Williamse5ae3b22016-06-07 17:00:04 -0700100 for (i = 0; i < nd_region->ndr_mappings; i++) {
101 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
102 struct nvdimm *nvdimm = nd_mapping->nvdimm;
103 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
104
105 if (rc)
106 return rc;
107 }
108
Dave Jiangdb580282016-09-26 11:06:50 -0700109 /*
110 * Clear out entries that are duplicates. This should prevent the
111 * extra flushings.
112 */
113 for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
114 /* ignore if NULL already */
115 if (!ndrd_get_flush_wpq(ndrd, i, 0))
116 continue;
117
118 for (j = i + 1; j < nd_region->ndr_mappings; j++)
119 if (ndrd_get_flush_wpq(ndrd, i, 0) ==
120 ndrd_get_flush_wpq(ndrd, j, 0))
121 ndrd_set_flush_wpq(ndrd, j, 0, NULL);
122 }
123
Dan Williamse5ae3b22016-06-07 17:00:04 -0700124 return 0;
125}
126
Dan Williams1f7df6f2015-06-09 20:13:14 -0400127static void nd_region_release(struct device *dev)
128{
129 struct nd_region *nd_region = to_nd_region(dev);
130 u16 i;
131
132 for (i = 0; i < nd_region->ndr_mappings; i++) {
133 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
134 struct nvdimm *nvdimm = nd_mapping->nvdimm;
135
136 put_device(&nvdimm->dev);
137 }
Vishal Verma5212e112015-06-25 04:20:32 -0400138 free_percpu(nd_region->lane);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400139 ida_simple_remove(&region_ida, nd_region->id);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400140 if (is_nd_blk(dev))
141 kfree(to_nd_blk_region(dev));
142 else
143 kfree(nd_region);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400144}
145
146static struct device_type nd_blk_device_type = {
147 .name = "nd_blk",
148 .release = nd_region_release,
149};
150
151static struct device_type nd_pmem_device_type = {
152 .name = "nd_pmem",
153 .release = nd_region_release,
154};
155
156static struct device_type nd_volatile_device_type = {
157 .name = "nd_volatile",
158 .release = nd_region_release,
159};
160
Dan Williams3d880022015-05-31 15:02:11 -0400161bool is_nd_pmem(struct device *dev)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400162{
163 return dev ? dev->type == &nd_pmem_device_type : false;
164}
165
Dan Williams3d880022015-05-31 15:02:11 -0400166bool is_nd_blk(struct device *dev)
167{
168 return dev ? dev->type == &nd_blk_device_type : false;
169}
170
Dan Williamsc9e582a2017-05-29 23:12:19 -0700171bool is_nd_volatile(struct device *dev)
172{
173 return dev ? dev->type == &nd_volatile_device_type : false;
174}
175
Dan Williams1f7df6f2015-06-09 20:13:14 -0400176struct nd_region *to_nd_region(struct device *dev)
177{
178 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
179
180 WARN_ON(dev->type->release != nd_region_release);
181 return nd_region;
182}
183EXPORT_SYMBOL_GPL(to_nd_region);
184
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400185struct nd_blk_region *to_nd_blk_region(struct device *dev)
186{
187 struct nd_region *nd_region = to_nd_region(dev);
188
189 WARN_ON(!is_nd_blk(dev));
190 return container_of(nd_region, struct nd_blk_region, nd_region);
191}
192EXPORT_SYMBOL_GPL(to_nd_blk_region);
193
194void *nd_region_provider_data(struct nd_region *nd_region)
195{
196 return nd_region->provider_data;
197}
198EXPORT_SYMBOL_GPL(nd_region_provider_data);
199
200void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
201{
202 return ndbr->blk_provider_data;
203}
204EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
205
206void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
207{
208 ndbr->blk_provider_data = data;
209}
210EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
211
Dan Williams3d880022015-05-31 15:02:11 -0400212/**
213 * nd_region_to_nstype() - region to an integer namespace type
214 * @nd_region: region-device to interrogate
215 *
216 * This is the 'nstype' attribute of a region as well, an input to the
217 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
218 * namespace devices with namespace drivers.
219 */
220int nd_region_to_nstype(struct nd_region *nd_region)
221{
Dan Williamsc9e582a2017-05-29 23:12:19 -0700222 if (is_memory(&nd_region->dev)) {
Dan Williams3d880022015-05-31 15:02:11 -0400223 u16 i, alias;
224
225 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
226 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
227 struct nvdimm *nvdimm = nd_mapping->nvdimm;
228
Dan Williams8f078b32017-05-04 14:01:24 -0700229 if (test_bit(NDD_ALIASING, &nvdimm->flags))
Dan Williams3d880022015-05-31 15:02:11 -0400230 alias++;
231 }
232 if (alias)
233 return ND_DEVICE_NAMESPACE_PMEM;
234 else
235 return ND_DEVICE_NAMESPACE_IO;
236 } else if (is_nd_blk(&nd_region->dev)) {
237 return ND_DEVICE_NAMESPACE_BLK;
238 }
239
240 return 0;
241}
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400242EXPORT_SYMBOL(nd_region_to_nstype);
243
Dan Williams1f7df6f2015-06-09 20:13:14 -0400244static ssize_t size_show(struct device *dev,
245 struct device_attribute *attr, char *buf)
246{
247 struct nd_region *nd_region = to_nd_region(dev);
248 unsigned long long size = 0;
249
Dan Williamsc9e582a2017-05-29 23:12:19 -0700250 if (is_memory(dev)) {
Dan Williams1f7df6f2015-06-09 20:13:14 -0400251 size = nd_region->ndr_size;
252 } else if (nd_region->ndr_mappings == 1) {
253 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
254
255 size = nd_mapping->size;
256 }
257
258 return sprintf(buf, "%llu\n", size);
259}
260static DEVICE_ATTR_RO(size);
261
Dan Williamsab630892017-04-21 13:28:12 -0700262static ssize_t deep_flush_show(struct device *dev,
263 struct device_attribute *attr, char *buf)
264{
265 struct nd_region *nd_region = to_nd_region(dev);
266
267 /*
268 * NOTE: in the nvdimm_has_flush() error case this attribute is
269 * not visible.
270 */
271 return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
272}
273
274static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
275 const char *buf, size_t len)
276{
277 bool flush;
278 int rc = strtobool(buf, &flush);
279 struct nd_region *nd_region = to_nd_region(dev);
280
281 if (rc)
282 return rc;
283 if (!flush)
284 return -EINVAL;
285 nvdimm_flush(nd_region);
286
287 return len;
288}
289static DEVICE_ATTR_RW(deep_flush);
290
Dan Williams1f7df6f2015-06-09 20:13:14 -0400291static ssize_t mappings_show(struct device *dev,
292 struct device_attribute *attr, char *buf)
293{
294 struct nd_region *nd_region = to_nd_region(dev);
295
296 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
297}
298static DEVICE_ATTR_RO(mappings);
299
Dan Williams3d880022015-05-31 15:02:11 -0400300static ssize_t nstype_show(struct device *dev,
301 struct device_attribute *attr, char *buf)
302{
303 struct nd_region *nd_region = to_nd_region(dev);
304
305 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
306}
307static DEVICE_ATTR_RO(nstype);
308
Dan Williamseaf96152015-05-01 13:11:27 -0400309static ssize_t set_cookie_show(struct device *dev,
310 struct device_attribute *attr, char *buf)
311{
312 struct nd_region *nd_region = to_nd_region(dev);
313 struct nd_interleave_set *nd_set = nd_region->nd_set;
Dan Williamsc12c48c2017-06-04 10:59:15 +0900314 ssize_t rc = 0;
Dan Williamseaf96152015-05-01 13:11:27 -0400315
Dan Williamsc9e582a2017-05-29 23:12:19 -0700316 if (is_memory(dev) && nd_set)
Dan Williamseaf96152015-05-01 13:11:27 -0400317 /* pass, should be precluded by region_visible */;
318 else
319 return -ENXIO;
320
Dan Williamsc12c48c2017-06-04 10:59:15 +0900321 /*
322 * The cookie to show depends on which specification of the
323 * labels we are using. If there are not labels then default to
324 * the v1.1 namespace label cookie definition. To read all this
325 * data we need to wait for probing to settle.
326 */
327 device_lock(dev);
328 nvdimm_bus_lock(dev);
329 wait_nvdimm_bus_probe_idle(dev);
330 if (nd_region->ndr_mappings) {
331 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
332 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
333
334 if (ndd) {
335 struct nd_namespace_index *nsindex;
336
337 nsindex = to_namespace_index(ndd, ndd->ns_current);
338 rc = sprintf(buf, "%#llx\n",
339 nd_region_interleave_set_cookie(nd_region,
340 nsindex));
341 }
342 }
343 nvdimm_bus_unlock(dev);
344 device_unlock(dev);
345
346 if (rc)
347 return rc;
348 return sprintf(buf, "%#llx\n", nd_set->cookie1);
Dan Williamseaf96152015-05-01 13:11:27 -0400349}
350static DEVICE_ATTR_RO(set_cookie);
351
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400352resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
353{
354 resource_size_t blk_max_overlap = 0, available, overlap;
355 int i;
356
357 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
358
359 retry:
360 available = 0;
361 overlap = blk_max_overlap;
362 for (i = 0; i < nd_region->ndr_mappings; i++) {
363 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
364 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
365
366 /* if a dimm is disabled the available capacity is zero */
367 if (!ndd)
368 return 0;
369
Dan Williamsc9e582a2017-05-29 23:12:19 -0700370 if (is_memory(&nd_region->dev)) {
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400371 available += nd_pmem_available_dpa(nd_region,
372 nd_mapping, &overlap);
373 if (overlap > blk_max_overlap) {
374 blk_max_overlap = overlap;
375 goto retry;
376 }
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700377 } else if (is_nd_blk(&nd_region->dev))
378 available += nd_blk_available_dpa(nd_region);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400379 }
380
381 return available;
382}
383
384static ssize_t available_size_show(struct device *dev,
385 struct device_attribute *attr, char *buf)
386{
387 struct nd_region *nd_region = to_nd_region(dev);
388 unsigned long long available = 0;
389
390 /*
391 * Flush in-flight updates and grab a snapshot of the available
392 * size. Of course, this value is potentially invalidated the
393 * memory nvdimm_bus_lock() is dropped, but that's userspace's
394 * problem to not race itself.
395 */
396 nvdimm_bus_lock(dev);
397 wait_nvdimm_bus_probe_idle(dev);
398 available = nd_region_available_dpa(nd_region);
399 nvdimm_bus_unlock(dev);
400
401 return sprintf(buf, "%llu\n", available);
402}
403static DEVICE_ATTR_RO(available_size);
404
Dan Williams3d880022015-05-31 15:02:11 -0400405static ssize_t init_namespaces_show(struct device *dev,
406 struct device_attribute *attr, char *buf)
407{
Dan Williamse5ae3b22016-06-07 17:00:04 -0700408 struct nd_region_data *ndrd = dev_get_drvdata(dev);
Dan Williams3d880022015-05-31 15:02:11 -0400409 ssize_t rc;
410
411 nvdimm_bus_lock(dev);
Dan Williamse5ae3b22016-06-07 17:00:04 -0700412 if (ndrd)
413 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
Dan Williams3d880022015-05-31 15:02:11 -0400414 else
415 rc = -ENXIO;
416 nvdimm_bus_unlock(dev);
417
418 return rc;
419}
420static DEVICE_ATTR_RO(init_namespaces);
421
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400422static ssize_t namespace_seed_show(struct device *dev,
423 struct device_attribute *attr, char *buf)
424{
425 struct nd_region *nd_region = to_nd_region(dev);
426 ssize_t rc;
427
428 nvdimm_bus_lock(dev);
429 if (nd_region->ns_seed)
430 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
431 else
432 rc = sprintf(buf, "\n");
433 nvdimm_bus_unlock(dev);
434 return rc;
435}
436static DEVICE_ATTR_RO(namespace_seed);
437
Dan Williams8c2f7e82015-06-25 04:20:04 -0400438static ssize_t btt_seed_show(struct device *dev,
439 struct device_attribute *attr, char *buf)
440{
441 struct nd_region *nd_region = to_nd_region(dev);
442 ssize_t rc;
443
444 nvdimm_bus_lock(dev);
445 if (nd_region->btt_seed)
446 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
447 else
448 rc = sprintf(buf, "\n");
449 nvdimm_bus_unlock(dev);
450
451 return rc;
452}
453static DEVICE_ATTR_RO(btt_seed);
454
Dan Williamse1455742015-07-30 17:57:47 -0400455static ssize_t pfn_seed_show(struct device *dev,
456 struct device_attribute *attr, char *buf)
457{
458 struct nd_region *nd_region = to_nd_region(dev);
459 ssize_t rc;
460
461 nvdimm_bus_lock(dev);
462 if (nd_region->pfn_seed)
463 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
464 else
465 rc = sprintf(buf, "\n");
466 nvdimm_bus_unlock(dev);
467
468 return rc;
469}
470static DEVICE_ATTR_RO(pfn_seed);
471
Dan Williamscd034122016-03-11 10:15:36 -0800472static ssize_t dax_seed_show(struct device *dev,
473 struct device_attribute *attr, char *buf)
474{
475 struct nd_region *nd_region = to_nd_region(dev);
476 ssize_t rc;
477
478 nvdimm_bus_lock(dev);
479 if (nd_region->dax_seed)
480 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
481 else
482 rc = sprintf(buf, "\n");
483 nvdimm_bus_unlock(dev);
484
485 return rc;
486}
487static DEVICE_ATTR_RO(dax_seed);
488
Dan Williams58138822015-06-23 20:08:34 -0400489static ssize_t read_only_show(struct device *dev,
490 struct device_attribute *attr, char *buf)
491{
492 struct nd_region *nd_region = to_nd_region(dev);
493
494 return sprintf(buf, "%d\n", nd_region->ro);
495}
496
497static ssize_t read_only_store(struct device *dev,
498 struct device_attribute *attr, const char *buf, size_t len)
499{
500 bool ro;
501 int rc = strtobool(buf, &ro);
502 struct nd_region *nd_region = to_nd_region(dev);
503
504 if (rc)
505 return rc;
506
507 nd_region->ro = ro;
508 return len;
509}
510static DEVICE_ATTR_RW(read_only);
511
Dan Williams23f49842017-04-29 15:24:03 -0700512static ssize_t region_badblocks_show(struct device *dev,
Dave Jiang6a6bef92017-04-07 15:33:20 -0700513 struct device_attribute *attr, char *buf)
514{
515 struct nd_region *nd_region = to_nd_region(dev);
516
517 return badblocks_show(&nd_region->bb, buf, 0);
518}
Dan Williams23f49842017-04-29 15:24:03 -0700519
520static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
Dave Jiang6a6bef92017-04-07 15:33:20 -0700521
Dave Jiang802f4be2017-04-07 15:33:25 -0700522static ssize_t resource_show(struct device *dev,
523 struct device_attribute *attr, char *buf)
524{
525 struct nd_region *nd_region = to_nd_region(dev);
526
527 return sprintf(buf, "%#llx\n", nd_region->ndr_start);
528}
529static DEVICE_ATTR_RO(resource);
530
Dave Jiang96c3a232018-01-31 12:45:49 -0700531static ssize_t persistence_domain_show(struct device *dev,
532 struct device_attribute *attr, char *buf)
533{
534 struct nd_region *nd_region = to_nd_region(dev);
Dave Jiang96c3a232018-01-31 12:45:49 -0700535
Dan Williamsfe9a5522018-03-21 15:12:07 -0700536 if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
537 return sprintf(buf, "cpu_cache\n");
538 else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
539 return sprintf(buf, "memory_controller\n");
540 else
541 return sprintf(buf, "\n");
Dave Jiang96c3a232018-01-31 12:45:49 -0700542}
543static DEVICE_ATTR_RO(persistence_domain);
544
Dan Williams1f7df6f2015-06-09 20:13:14 -0400545static struct attribute *nd_region_attributes[] = {
546 &dev_attr_size.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400547 &dev_attr_nstype.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400548 &dev_attr_mappings.attr,
Dan Williams8c2f7e82015-06-25 04:20:04 -0400549 &dev_attr_btt_seed.attr,
Dan Williamse1455742015-07-30 17:57:47 -0400550 &dev_attr_pfn_seed.attr,
Dan Williamscd034122016-03-11 10:15:36 -0800551 &dev_attr_dax_seed.attr,
Dan Williamsab630892017-04-21 13:28:12 -0700552 &dev_attr_deep_flush.attr,
Dan Williams58138822015-06-23 20:08:34 -0400553 &dev_attr_read_only.attr,
Dan Williamseaf96152015-05-01 13:11:27 -0400554 &dev_attr_set_cookie.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400555 &dev_attr_available_size.attr,
556 &dev_attr_namespace_seed.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400557 &dev_attr_init_namespaces.attr,
Dan Williams23f49842017-04-29 15:24:03 -0700558 &dev_attr_badblocks.attr,
Dave Jiang802f4be2017-04-07 15:33:25 -0700559 &dev_attr_resource.attr,
Dave Jiang96c3a232018-01-31 12:45:49 -0700560 &dev_attr_persistence_domain.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400561 NULL,
562};
563
Dan Williamseaf96152015-05-01 13:11:27 -0400564static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
565{
566 struct device *dev = container_of(kobj, typeof(*dev), kobj);
567 struct nd_region *nd_region = to_nd_region(dev);
568 struct nd_interleave_set *nd_set = nd_region->nd_set;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400569 int type = nd_region_to_nstype(nd_region);
Dan Williamseaf96152015-05-01 13:11:27 -0400570
Dan Williamsc9e582a2017-05-29 23:12:19 -0700571 if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
Dmitry Krivenok6bb691a2015-12-02 09:39:29 +0300572 return 0;
573
Dan Williamsc9e582a2017-05-29 23:12:19 -0700574 if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
Dan Williamscd034122016-03-11 10:15:36 -0800575 return 0;
576
Dan Williams23f49842017-04-29 15:24:03 -0700577 if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
Dave Jiang6a6bef92017-04-07 15:33:20 -0700578 return 0;
579
Dan Williamsb8ff9812017-09-26 11:17:52 -0700580 if (a == &dev_attr_resource.attr) {
581 if (is_nd_pmem(dev))
582 return 0400;
583 else
584 return 0;
585 }
Dave Jiang802f4be2017-04-07 15:33:25 -0700586
Dan Williamsab630892017-04-21 13:28:12 -0700587 if (a == &dev_attr_deep_flush.attr) {
588 int has_flush = nvdimm_has_flush(nd_region);
589
590 if (has_flush == 1)
591 return a->mode;
592 else if (has_flush == 0)
593 return 0444;
594 else
595 return 0;
596 }
597
Dan Williams896196dc2018-03-21 14:06:23 -0700598 if (a == &dev_attr_persistence_domain.attr) {
599 if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
600 | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
601 return 0;
602 return a->mode;
603 }
604
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400605 if (a != &dev_attr_set_cookie.attr
606 && a != &dev_attr_available_size.attr)
Dan Williamseaf96152015-05-01 13:11:27 -0400607 return a->mode;
608
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400609 if ((type == ND_DEVICE_NAMESPACE_PMEM
610 || type == ND_DEVICE_NAMESPACE_BLK)
611 && a == &dev_attr_available_size.attr)
612 return a->mode;
Dan Williamsc9e582a2017-05-29 23:12:19 -0700613 else if (is_memory(dev) && nd_set)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400614 return a->mode;
Dan Williamseaf96152015-05-01 13:11:27 -0400615
616 return 0;
617}
618
Dan Williams1f7df6f2015-06-09 20:13:14 -0400619struct attribute_group nd_region_attribute_group = {
620 .attrs = nd_region_attributes,
Dan Williamseaf96152015-05-01 13:11:27 -0400621 .is_visible = region_visible,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400622};
623EXPORT_SYMBOL_GPL(nd_region_attribute_group);
624
Dan Williamsc12c48c2017-06-04 10:59:15 +0900625u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
626 struct nd_namespace_index *nsindex)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400627{
628 struct nd_interleave_set *nd_set = nd_region->nd_set;
629
Dan Williamsc12c48c2017-06-04 10:59:15 +0900630 if (!nd_set)
631 return 0;
632
633 if (nsindex && __le16_to_cpu(nsindex->major) == 1
634 && __le16_to_cpu(nsindex->minor) == 1)
635 return nd_set->cookie1;
636 return nd_set->cookie2;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400637}
638
Dan Williams86ef58a2017-02-28 18:32:48 -0800639u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
640{
641 struct nd_interleave_set *nd_set = nd_region->nd_set;
642
643 if (nd_set)
644 return nd_set->altcookie;
645 return 0;
646}
647
Dan Williamsae8219f2016-09-19 16:04:21 -0700648void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
649{
650 struct nd_label_ent *label_ent, *e;
651
Dan Williams9cf8bd52016-12-15 20:04:31 -0800652 lockdep_assert_held(&nd_mapping->lock);
Dan Williamsae8219f2016-09-19 16:04:21 -0700653 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
654 list_del(&label_ent->list);
655 kfree(label_ent);
656 }
657}
658
Dan Williamseaf96152015-05-01 13:11:27 -0400659/*
660 * Upon successful probe/remove, take/release a reference on the
Dan Williams8c2f7e82015-06-25 04:20:04 -0400661 * associated interleave set (if present), and plant new btt + namespace
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400662 * seeds. Also, on the removal of a BLK region, notify the provider to
663 * disable the region.
Dan Williamseaf96152015-05-01 13:11:27 -0400664 */
665static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
666 struct device *dev, bool probe)
667{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400668 struct nd_region *nd_region;
669
Dan Williamsc9e582a2017-05-29 23:12:19 -0700670 if (!probe && is_nd_region(dev)) {
Dan Williamseaf96152015-05-01 13:11:27 -0400671 int i;
672
Dan Williams8c2f7e82015-06-25 04:20:04 -0400673 nd_region = to_nd_region(dev);
Dan Williamseaf96152015-05-01 13:11:27 -0400674 for (i = 0; i < nd_region->ndr_mappings; i++) {
675 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400676 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
Dan Williamseaf96152015-05-01 13:11:27 -0400677 struct nvdimm *nvdimm = nd_mapping->nvdimm;
678
Dan Williamsae8219f2016-09-19 16:04:21 -0700679 mutex_lock(&nd_mapping->lock);
680 nd_mapping_free_labels(nd_mapping);
681 mutex_unlock(&nd_mapping->lock);
682
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400683 put_ndd(ndd);
684 nd_mapping->ndd = NULL;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400685 if (ndd)
686 atomic_dec(&nvdimm->busy);
Dan Williamseaf96152015-05-01 13:11:27 -0400687 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400688 }
Dan Williamsc9e582a2017-05-29 23:12:19 -0700689 if (dev->parent && is_nd_region(dev->parent) && probe) {
Dan Williams8c2f7e82015-06-25 04:20:04 -0400690 nd_region = to_nd_region(dev->parent);
Dan Williams1b40e092015-05-01 13:34:01 -0400691 nvdimm_bus_lock(dev);
692 if (nd_region->ns_seed == dev)
Dan Williams98a29c32016-09-30 15:28:27 -0700693 nd_region_create_ns_seed(nd_region);
Dan Williams1b40e092015-05-01 13:34:01 -0400694 nvdimm_bus_unlock(dev);
Dan Williamseaf96152015-05-01 13:11:27 -0400695 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400696 if (is_nd_btt(dev) && probe) {
Dan Williams8ca24352015-07-24 23:42:34 -0400697 struct nd_btt *nd_btt = to_nd_btt(dev);
698
Dan Williams8c2f7e82015-06-25 04:20:04 -0400699 nd_region = to_nd_region(dev->parent);
700 nvdimm_bus_lock(dev);
701 if (nd_region->btt_seed == dev)
702 nd_region_create_btt_seed(nd_region);
Dan Williams98a29c32016-09-30 15:28:27 -0700703 if (nd_region->ns_seed == &nd_btt->ndns->dev)
704 nd_region_create_ns_seed(nd_region);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400705 nvdimm_bus_unlock(dev);
706 }
Dan Williams2dc43332015-12-13 11:41:36 -0800707 if (is_nd_pfn(dev) && probe) {
Dan Williams98a29c32016-09-30 15:28:27 -0700708 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
709
Dan Williams2dc43332015-12-13 11:41:36 -0800710 nd_region = to_nd_region(dev->parent);
711 nvdimm_bus_lock(dev);
712 if (nd_region->pfn_seed == dev)
713 nd_region_create_pfn_seed(nd_region);
Dan Williams98a29c32016-09-30 15:28:27 -0700714 if (nd_region->ns_seed == &nd_pfn->ndns->dev)
715 nd_region_create_ns_seed(nd_region);
Dan Williams2dc43332015-12-13 11:41:36 -0800716 nvdimm_bus_unlock(dev);
717 }
Dan Williamscd034122016-03-11 10:15:36 -0800718 if (is_nd_dax(dev) && probe) {
Dan Williams98a29c32016-09-30 15:28:27 -0700719 struct nd_dax *nd_dax = to_nd_dax(dev);
720
Dan Williamscd034122016-03-11 10:15:36 -0800721 nd_region = to_nd_region(dev->parent);
722 nvdimm_bus_lock(dev);
723 if (nd_region->dax_seed == dev)
724 nd_region_create_dax_seed(nd_region);
Dan Williams98a29c32016-09-30 15:28:27 -0700725 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
726 nd_region_create_ns_seed(nd_region);
Dan Williamscd034122016-03-11 10:15:36 -0800727 nvdimm_bus_unlock(dev);
728 }
Dan Williamseaf96152015-05-01 13:11:27 -0400729}
730
731void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
732{
733 nd_region_notify_driver_action(nvdimm_bus, dev, true);
734}
735
736void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
737{
738 nd_region_notify_driver_action(nvdimm_bus, dev, false);
739}
740
Dan Williams1f7df6f2015-06-09 20:13:14 -0400741static ssize_t mappingN(struct device *dev, char *buf, int n)
742{
743 struct nd_region *nd_region = to_nd_region(dev);
744 struct nd_mapping *nd_mapping;
745 struct nvdimm *nvdimm;
746
747 if (n >= nd_region->ndr_mappings)
748 return -ENXIO;
749 nd_mapping = &nd_region->mapping[n];
750 nvdimm = nd_mapping->nvdimm;
751
Dan Williams401c0a12017-08-04 17:20:16 -0700752 return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev),
753 nd_mapping->start, nd_mapping->size,
754 nd_mapping->position);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400755}
756
757#define REGION_MAPPING(idx) \
758static ssize_t mapping##idx##_show(struct device *dev, \
759 struct device_attribute *attr, char *buf) \
760{ \
761 return mappingN(dev, buf, idx); \
762} \
763static DEVICE_ATTR_RO(mapping##idx)
764
765/*
766 * 32 should be enough for a while, even in the presence of socket
767 * interleave a 32-way interleave set is a degenerate case.
768 */
769REGION_MAPPING(0);
770REGION_MAPPING(1);
771REGION_MAPPING(2);
772REGION_MAPPING(3);
773REGION_MAPPING(4);
774REGION_MAPPING(5);
775REGION_MAPPING(6);
776REGION_MAPPING(7);
777REGION_MAPPING(8);
778REGION_MAPPING(9);
779REGION_MAPPING(10);
780REGION_MAPPING(11);
781REGION_MAPPING(12);
782REGION_MAPPING(13);
783REGION_MAPPING(14);
784REGION_MAPPING(15);
785REGION_MAPPING(16);
786REGION_MAPPING(17);
787REGION_MAPPING(18);
788REGION_MAPPING(19);
789REGION_MAPPING(20);
790REGION_MAPPING(21);
791REGION_MAPPING(22);
792REGION_MAPPING(23);
793REGION_MAPPING(24);
794REGION_MAPPING(25);
795REGION_MAPPING(26);
796REGION_MAPPING(27);
797REGION_MAPPING(28);
798REGION_MAPPING(29);
799REGION_MAPPING(30);
800REGION_MAPPING(31);
801
802static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
803{
804 struct device *dev = container_of(kobj, struct device, kobj);
805 struct nd_region *nd_region = to_nd_region(dev);
806
807 if (n < nd_region->ndr_mappings)
808 return a->mode;
809 return 0;
810}
811
812static struct attribute *mapping_attributes[] = {
813 &dev_attr_mapping0.attr,
814 &dev_attr_mapping1.attr,
815 &dev_attr_mapping2.attr,
816 &dev_attr_mapping3.attr,
817 &dev_attr_mapping4.attr,
818 &dev_attr_mapping5.attr,
819 &dev_attr_mapping6.attr,
820 &dev_attr_mapping7.attr,
821 &dev_attr_mapping8.attr,
822 &dev_attr_mapping9.attr,
823 &dev_attr_mapping10.attr,
824 &dev_attr_mapping11.attr,
825 &dev_attr_mapping12.attr,
826 &dev_attr_mapping13.attr,
827 &dev_attr_mapping14.attr,
828 &dev_attr_mapping15.attr,
829 &dev_attr_mapping16.attr,
830 &dev_attr_mapping17.attr,
831 &dev_attr_mapping18.attr,
832 &dev_attr_mapping19.attr,
833 &dev_attr_mapping20.attr,
834 &dev_attr_mapping21.attr,
835 &dev_attr_mapping22.attr,
836 &dev_attr_mapping23.attr,
837 &dev_attr_mapping24.attr,
838 &dev_attr_mapping25.attr,
839 &dev_attr_mapping26.attr,
840 &dev_attr_mapping27.attr,
841 &dev_attr_mapping28.attr,
842 &dev_attr_mapping29.attr,
843 &dev_attr_mapping30.attr,
844 &dev_attr_mapping31.attr,
845 NULL,
846};
847
848struct attribute_group nd_mapping_attribute_group = {
849 .is_visible = mapping_visible,
850 .attrs = mapping_attributes,
851};
852EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
853
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400854int nd_blk_region_init(struct nd_region *nd_region)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400855{
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400856 struct device *dev = &nd_region->dev;
857 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
858
859 if (!is_nd_blk(dev))
860 return 0;
861
862 if (nd_region->ndr_mappings < 1) {
Dan Williamsd5d51fe2017-06-29 09:02:10 -0700863 dev_dbg(dev, "invalid BLK region\n");
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400864 return -ENXIO;
865 }
866
867 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400868}
Dan Williams1f7df6f2015-06-09 20:13:14 -0400869
Vishal Verma5212e112015-06-25 04:20:32 -0400870/**
871 * nd_region_acquire_lane - allocate and lock a lane
872 * @nd_region: region id and number of lanes possible
873 *
874 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
875 * We optimize for the common case where there are 256 lanes, one
876 * per-cpu. For larger systems we need to lock to share lanes. For now
877 * this implementation assumes the cost of maintaining an allocator for
878 * free lanes is on the order of the lock hold time, so it implements a
879 * static lane = cpu % num_lanes mapping.
880 *
881 * In the case of a BTT instance on top of a BLK namespace a lane may be
882 * acquired recursively. We lock on the first instance.
883 *
884 * In the case of a BTT instance on top of PMEM, we only acquire a lane
885 * for the BTT metadata updates.
886 */
887unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
888{
889 unsigned int cpu, lane;
890
891 cpu = get_cpu();
892 if (nd_region->num_lanes < nr_cpu_ids) {
893 struct nd_percpu_lane *ndl_lock, *ndl_count;
894
895 lane = cpu % nd_region->num_lanes;
896 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
897 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
898 if (ndl_count->count++ == 0)
899 spin_lock(&ndl_lock->lock);
900 } else
901 lane = cpu;
902
903 return lane;
904}
905EXPORT_SYMBOL(nd_region_acquire_lane);
906
907void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
908{
909 if (nd_region->num_lanes < nr_cpu_ids) {
910 unsigned int cpu = get_cpu();
911 struct nd_percpu_lane *ndl_lock, *ndl_count;
912
913 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
914 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
915 if (--ndl_count->count == 0)
916 spin_unlock(&ndl_lock->lock);
917 put_cpu();
918 }
919 put_cpu();
920}
921EXPORT_SYMBOL(nd_region_release_lane);
922
Dan Williams1f7df6f2015-06-09 20:13:14 -0400923static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
924 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
925 const char *caller)
926{
927 struct nd_region *nd_region;
928 struct device *dev;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400929 void *region_buf;
Vishal Verma5212e112015-06-25 04:20:32 -0400930 unsigned int i;
Dan Williams58138822015-06-23 20:08:34 -0400931 int ro = 0;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400932
933 for (i = 0; i < ndr_desc->num_mappings; i++) {
Dan Williams44c462e2016-09-19 16:38:50 -0700934 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
935 struct nvdimm *nvdimm = mapping->nvdimm;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400936
Dan Williams44c462e2016-09-19 16:38:50 -0700937 if ((mapping->start | mapping->size) % SZ_4K) {
Dan Williams1f7df6f2015-06-09 20:13:14 -0400938 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
939 caller, dev_name(&nvdimm->dev), i);
940
941 return NULL;
942 }
Dan Williams58138822015-06-23 20:08:34 -0400943
Dan Williams8f078b32017-05-04 14:01:24 -0700944 if (test_bit(NDD_UNARMED, &nvdimm->flags))
Dan Williams58138822015-06-23 20:08:34 -0400945 ro = 1;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400946 }
947
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400948 if (dev_type == &nd_blk_device_type) {
949 struct nd_blk_region_desc *ndbr_desc;
950 struct nd_blk_region *ndbr;
951
952 ndbr_desc = to_blk_region_desc(ndr_desc);
953 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
954 * ndr_desc->num_mappings,
955 GFP_KERNEL);
956 if (ndbr) {
957 nd_region = &ndbr->nd_region;
958 ndbr->enable = ndbr_desc->enable;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400959 ndbr->do_io = ndbr_desc->do_io;
960 }
961 region_buf = ndbr;
962 } else {
963 nd_region = kzalloc(sizeof(struct nd_region)
964 + sizeof(struct nd_mapping)
965 * ndr_desc->num_mappings,
966 GFP_KERNEL);
967 region_buf = nd_region;
968 }
969
970 if (!region_buf)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400971 return NULL;
972 nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
Vishal Verma5212e112015-06-25 04:20:32 -0400973 if (nd_region->id < 0)
974 goto err_id;
975
976 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
977 if (!nd_region->lane)
978 goto err_percpu;
979
980 for (i = 0; i < nr_cpu_ids; i++) {
981 struct nd_percpu_lane *ndl;
982
983 ndl = per_cpu_ptr(nd_region->lane, i);
984 spin_lock_init(&ndl->lock);
985 ndl->count = 0;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400986 }
987
Dan Williams1f7df6f2015-06-09 20:13:14 -0400988 for (i = 0; i < ndr_desc->num_mappings; i++) {
Dan Williams44c462e2016-09-19 16:38:50 -0700989 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
990 struct nvdimm *nvdimm = mapping->nvdimm;
991
992 nd_region->mapping[i].nvdimm = nvdimm;
993 nd_region->mapping[i].start = mapping->start;
994 nd_region->mapping[i].size = mapping->size;
Dan Williams401c0a12017-08-04 17:20:16 -0700995 nd_region->mapping[i].position = mapping->position;
Dan Williamsae8219f2016-09-19 16:04:21 -0700996 INIT_LIST_HEAD(&nd_region->mapping[i].labels);
997 mutex_init(&nd_region->mapping[i].lock);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400998
999 get_device(&nvdimm->dev);
1000 }
1001 nd_region->ndr_mappings = ndr_desc->num_mappings;
1002 nd_region->provider_data = ndr_desc->provider_data;
Dan Williamseaf96152015-05-01 13:11:27 -04001003 nd_region->nd_set = ndr_desc->nd_set;
Vishal Verma5212e112015-06-25 04:20:32 -04001004 nd_region->num_lanes = ndr_desc->num_lanes;
Dan Williams004f1af2015-08-24 19:20:23 -04001005 nd_region->flags = ndr_desc->flags;
Dan Williams58138822015-06-23 20:08:34 -04001006 nd_region->ro = ro;
Toshi Kani41d7a6d2015-06-19 12:18:33 -06001007 nd_region->numa_node = ndr_desc->numa_node;
Dan Williams1b40e092015-05-01 13:34:01 -04001008 ida_init(&nd_region->ns_ida);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001009 ida_init(&nd_region->btt_ida);
Dan Williamse1455742015-07-30 17:57:47 -04001010 ida_init(&nd_region->pfn_ida);
Dan Williamscd034122016-03-11 10:15:36 -08001011 ida_init(&nd_region->dax_ida);
Dan Williams1f7df6f2015-06-09 20:13:14 -04001012 dev = &nd_region->dev;
1013 dev_set_name(dev, "region%d", nd_region->id);
1014 dev->parent = &nvdimm_bus->dev;
1015 dev->type = dev_type;
1016 dev->groups = ndr_desc->attr_groups;
1017 nd_region->ndr_size = resource_size(ndr_desc->res);
1018 nd_region->ndr_start = ndr_desc->res->start;
1019 nd_device_register(dev);
1020
1021 return nd_region;
Vishal Verma5212e112015-06-25 04:20:32 -04001022
1023 err_percpu:
1024 ida_simple_remove(&region_ida, nd_region->id);
1025 err_id:
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001026 kfree(region_buf);
Vishal Verma5212e112015-06-25 04:20:32 -04001027 return NULL;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001028}
1029
1030struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
1031 struct nd_region_desc *ndr_desc)
1032{
Vishal Verma5212e112015-06-25 04:20:32 -04001033 ndr_desc->num_lanes = ND_MAX_LANES;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001034 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
1035 __func__);
1036}
1037EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
1038
1039struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
1040 struct nd_region_desc *ndr_desc)
1041{
1042 if (ndr_desc->num_mappings > 1)
1043 return NULL;
Vishal Verma5212e112015-06-25 04:20:32 -04001044 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
Dan Williams1f7df6f2015-06-09 20:13:14 -04001045 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
1046 __func__);
1047}
1048EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
1049
1050struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
1051 struct nd_region_desc *ndr_desc)
1052{
Vishal Verma5212e112015-06-25 04:20:32 -04001053 ndr_desc->num_lanes = ND_MAX_LANES;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001054 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
1055 __func__);
1056}
1057EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
Dan Williamsb354aba2016-05-17 20:24:16 -07001058
Dan Williamsf284a4f2016-07-07 19:44:50 -07001059/**
1060 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
1061 * @nd_region: blk or interleaved pmem region
1062 */
1063void nvdimm_flush(struct nd_region *nd_region)
1064{
1065 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
Dan Williams0c27af62016-05-27 09:23:01 -07001066 int i, idx;
1067
1068 /*
1069 * Try to encourage some diversity in flush hint addresses
1070 * across cpus assuming a limited number of flush hints.
1071 */
1072 idx = this_cpu_read(flush_idx);
1073 idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
Dan Williamsf284a4f2016-07-07 19:44:50 -07001074
1075 /*
1076 * The first wmb() is needed to 'sfence' all previous writes
1077 * such that they are architecturally visible for the platform
1078 * buffer flush. Note that we've already arranged for pmem
Dan Williams0aed55a2017-05-29 12:22:50 -07001079 * writes to avoid the cache via memcpy_flushcache(). The final
1080 * wmb() ensures ordering for the NVDIMM flush write.
Dan Williamsf284a4f2016-07-07 19:44:50 -07001081 */
1082 wmb();
1083 for (i = 0; i < nd_region->ndr_mappings; i++)
Dan Williams595c7302016-09-23 17:53:52 -07001084 if (ndrd_get_flush_wpq(ndrd, i, 0))
1085 writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
Dan Williamsf284a4f2016-07-07 19:44:50 -07001086 wmb();
1087}
1088EXPORT_SYMBOL_GPL(nvdimm_flush);
1089
1090/**
1091 * nvdimm_has_flush - determine write flushing requirements
1092 * @nd_region: blk or interleaved pmem region
1093 *
1094 * Returns 1 if writes require flushing
1095 * Returns 0 if writes do not require flushing
1096 * Returns -ENXIO if flushing capability can not be determined
1097 */
1098int nvdimm_has_flush(struct nd_region *nd_region)
1099{
Dan Williamsf284a4f2016-07-07 19:44:50 -07001100 int i;
1101
Dan Williamsc00b3962017-05-29 23:11:57 -07001102 /* no nvdimm or pmem api == flushing capability unknown */
1103 if (nd_region->ndr_mappings == 0
1104 || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
Dan Williamsf284a4f2016-07-07 19:44:50 -07001105 return -ENXIO;
1106
Dan Williamsbc042fd2017-04-24 15:43:05 -07001107 for (i = 0; i < nd_region->ndr_mappings; i++) {
1108 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1109 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1110
1111 /* flush hints present / available */
1112 if (nvdimm->num_flush)
Dan Williamsf284a4f2016-07-07 19:44:50 -07001113 return 1;
Dan Williamsbc042fd2017-04-24 15:43:05 -07001114 }
Dan Williamsf284a4f2016-07-07 19:44:50 -07001115
1116 /*
1117 * The platform defines dimm devices without hints, assume
1118 * platform persistence mechanism like ADR
1119 */
1120 return 0;
1121}
1122EXPORT_SYMBOL_GPL(nvdimm_has_flush);
1123
Dan Williams0b277962017-06-09 09:46:50 -07001124int nvdimm_has_cache(struct nd_region *nd_region)
1125{
1126 return is_nd_pmem(&nd_region->dev);
1127}
1128EXPORT_SYMBOL_GPL(nvdimm_has_cache);
1129
Dan Williamsb354aba2016-05-17 20:24:16 -07001130void __exit nd_region_devs_exit(void)
1131{
1132 ida_destroy(&region_ida);
1133}