blob: af30cbe7a8ea26fd5eb643af56e1a9bffaeda7ff [file] [log] [blame]
Thomas Gleixner5b497af2019-05-29 07:18:09 -07001// SPDX-License-Identifier: GPL-2.0-only
Dan Williams1f7df6f2015-06-09 20:13:14 -04002/*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
Dan Williams1f7df6f2015-06-09 20:13:14 -04004 */
Dan Williamseaf96152015-05-01 13:11:27 -04005#include <linux/scatterlist.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -04006#include <linux/highmem.h>
Dan Williamseaf96152015-05-01 13:11:27 -04007#include <linux/sched.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -04008#include <linux/slab.h>
Dan Williams0c27af62016-05-27 09:23:01 -07009#include <linux/hash.h>
Dan Williamseaf96152015-05-01 13:11:27 -040010#include <linux/sort.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040011#include <linux/io.h>
Dan Williamsbf9bccc2015-06-17 17:14:46 -040012#include <linux/nd.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040013#include "nd-core.h"
14#include "nd.h"
15
Dan Williamsf284a4f2016-07-07 19:44:50 -070016/*
17 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
18 * irrelevant.
19 */
20#include <linux/io-64-nonatomic-hi-lo.h>
21
Dan Williams1f7df6f2015-06-09 20:13:14 -040022static DEFINE_IDA(region_ida);
Dan Williams0c27af62016-05-27 09:23:01 -070023static DEFINE_PER_CPU(int, flush_idx);
Dan Williams1f7df6f2015-06-09 20:13:14 -040024
Dan Williamse5ae3b22016-06-07 17:00:04 -070025static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
26 struct nd_region_data *ndrd)
27{
28 int i, j;
29
30 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
31 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
Dan Williams595c7302016-09-23 17:53:52 -070032 for (i = 0; i < (1 << ndrd->hints_shift); i++) {
Dan Williamse5ae3b22016-06-07 17:00:04 -070033 struct resource *res = &nvdimm->flush_wpq[i];
34 unsigned long pfn = PHYS_PFN(res->start);
35 void __iomem *flush_page;
36
37 /* check if flush hints share a page */
38 for (j = 0; j < i; j++) {
39 struct resource *res_j = &nvdimm->flush_wpq[j];
40 unsigned long pfn_j = PHYS_PFN(res_j->start);
41
42 if (pfn == pfn_j)
43 break;
44 }
45
46 if (j < i)
47 flush_page = (void __iomem *) ((unsigned long)
Dan Williams595c7302016-09-23 17:53:52 -070048 ndrd_get_flush_wpq(ndrd, dimm, j)
49 & PAGE_MASK);
Dan Williamse5ae3b22016-06-07 17:00:04 -070050 else
51 flush_page = devm_nvdimm_ioremap(dev,
Oliver O'Halloran480b6832016-09-19 20:19:00 +100052 PFN_PHYS(pfn), PAGE_SIZE);
Dan Williamse5ae3b22016-06-07 17:00:04 -070053 if (!flush_page)
54 return -ENXIO;
Dan Williams595c7302016-09-23 17:53:52 -070055 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
56 + (res->start & ~PAGE_MASK));
Dan Williamse5ae3b22016-06-07 17:00:04 -070057 }
58
59 return 0;
60}
61
62int nd_region_activate(struct nd_region *nd_region)
63{
Dave Jiangdb580282016-09-26 11:06:50 -070064 int i, j, num_flush = 0;
Dan Williamse5ae3b22016-06-07 17:00:04 -070065 struct nd_region_data *ndrd;
66 struct device *dev = &nd_region->dev;
67 size_t flush_data_size = sizeof(void *);
68
69 nvdimm_bus_lock(&nd_region->dev);
70 for (i = 0; i < nd_region->ndr_mappings; i++) {
71 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
72 struct nvdimm *nvdimm = nd_mapping->nvdimm;
73
Dave Jiang7d988092018-12-13 15:36:18 -070074 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
75 nvdimm_bus_unlock(&nd_region->dev);
76 return -EBUSY;
77 }
78
Dan Williamse5ae3b22016-06-07 17:00:04 -070079 /* at least one null hint slot per-dimm for the "no-hint" case */
80 flush_data_size += sizeof(void *);
Dan Williams0c27af62016-05-27 09:23:01 -070081 num_flush = min_not_zero(num_flush, nvdimm->num_flush);
Dan Williamse5ae3b22016-06-07 17:00:04 -070082 if (!nvdimm->num_flush)
83 continue;
84 flush_data_size += nvdimm->num_flush * sizeof(void *);
85 }
86 nvdimm_bus_unlock(&nd_region->dev);
87
88 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
89 if (!ndrd)
90 return -ENOMEM;
91 dev_set_drvdata(dev, ndrd);
92
Dan Williams595c7302016-09-23 17:53:52 -070093 if (!num_flush)
94 return 0;
95
96 ndrd->hints_shift = ilog2(num_flush);
Dan Williamse5ae3b22016-06-07 17:00:04 -070097 for (i = 0; i < nd_region->ndr_mappings; i++) {
98 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
99 struct nvdimm *nvdimm = nd_mapping->nvdimm;
100 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
101
102 if (rc)
103 return rc;
104 }
105
Dave Jiangdb580282016-09-26 11:06:50 -0700106 /*
107 * Clear out entries that are duplicates. This should prevent the
108 * extra flushings.
109 */
110 for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
111 /* ignore if NULL already */
112 if (!ndrd_get_flush_wpq(ndrd, i, 0))
113 continue;
114
115 for (j = i + 1; j < nd_region->ndr_mappings; j++)
116 if (ndrd_get_flush_wpq(ndrd, i, 0) ==
117 ndrd_get_flush_wpq(ndrd, j, 0))
118 ndrd_set_flush_wpq(ndrd, j, 0, NULL);
119 }
120
Dan Williamse5ae3b22016-06-07 17:00:04 -0700121 return 0;
122}
123
Dan Williams1f7df6f2015-06-09 20:13:14 -0400124static void nd_region_release(struct device *dev)
125{
126 struct nd_region *nd_region = to_nd_region(dev);
127 u16 i;
128
129 for (i = 0; i < nd_region->ndr_mappings; i++) {
130 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
131 struct nvdimm *nvdimm = nd_mapping->nvdimm;
132
133 put_device(&nvdimm->dev);
134 }
Vishal Verma5212e112015-06-25 04:20:32 -0400135 free_percpu(nd_region->lane);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400136 ida_simple_remove(&region_ida, nd_region->id);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400137 if (is_nd_blk(dev))
138 kfree(to_nd_blk_region(dev));
139 else
140 kfree(nd_region);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400141}
142
143static struct device_type nd_blk_device_type = {
144 .name = "nd_blk",
145 .release = nd_region_release,
146};
147
148static struct device_type nd_pmem_device_type = {
149 .name = "nd_pmem",
150 .release = nd_region_release,
151};
152
153static struct device_type nd_volatile_device_type = {
154 .name = "nd_volatile",
155 .release = nd_region_release,
156};
157
Dan Williams3d880022015-05-31 15:02:11 -0400158bool is_nd_pmem(struct device *dev)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400159{
160 return dev ? dev->type == &nd_pmem_device_type : false;
161}
162
Dan Williams3d880022015-05-31 15:02:11 -0400163bool is_nd_blk(struct device *dev)
164{
165 return dev ? dev->type == &nd_blk_device_type : false;
166}
167
Dan Williamsc9e582a2017-05-29 23:12:19 -0700168bool is_nd_volatile(struct device *dev)
169{
170 return dev ? dev->type == &nd_volatile_device_type : false;
171}
172
Dan Williams1f7df6f2015-06-09 20:13:14 -0400173struct nd_region *to_nd_region(struct device *dev)
174{
175 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
176
177 WARN_ON(dev->type->release != nd_region_release);
178 return nd_region;
179}
180EXPORT_SYMBOL_GPL(to_nd_region);
181
Dan Williams243f29f2018-04-02 13:14:25 -0700182struct device *nd_region_dev(struct nd_region *nd_region)
183{
184 if (!nd_region)
185 return NULL;
186 return &nd_region->dev;
187}
188EXPORT_SYMBOL_GPL(nd_region_dev);
189
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400190struct nd_blk_region *to_nd_blk_region(struct device *dev)
191{
192 struct nd_region *nd_region = to_nd_region(dev);
193
194 WARN_ON(!is_nd_blk(dev));
195 return container_of(nd_region, struct nd_blk_region, nd_region);
196}
197EXPORT_SYMBOL_GPL(to_nd_blk_region);
198
199void *nd_region_provider_data(struct nd_region *nd_region)
200{
201 return nd_region->provider_data;
202}
203EXPORT_SYMBOL_GPL(nd_region_provider_data);
204
205void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
206{
207 return ndbr->blk_provider_data;
208}
209EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
210
211void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
212{
213 ndbr->blk_provider_data = data;
214}
215EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
216
Dan Williams3d880022015-05-31 15:02:11 -0400217/**
218 * nd_region_to_nstype() - region to an integer namespace type
219 * @nd_region: region-device to interrogate
220 *
221 * This is the 'nstype' attribute of a region as well, an input to the
222 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
223 * namespace devices with namespace drivers.
224 */
225int nd_region_to_nstype(struct nd_region *nd_region)
226{
Dan Williamsc9e582a2017-05-29 23:12:19 -0700227 if (is_memory(&nd_region->dev)) {
Dan Williams3d880022015-05-31 15:02:11 -0400228 u16 i, alias;
229
230 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
231 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
232 struct nvdimm *nvdimm = nd_mapping->nvdimm;
233
Dan Williams8f078b32017-05-04 14:01:24 -0700234 if (test_bit(NDD_ALIASING, &nvdimm->flags))
Dan Williams3d880022015-05-31 15:02:11 -0400235 alias++;
236 }
237 if (alias)
238 return ND_DEVICE_NAMESPACE_PMEM;
239 else
240 return ND_DEVICE_NAMESPACE_IO;
241 } else if (is_nd_blk(&nd_region->dev)) {
242 return ND_DEVICE_NAMESPACE_BLK;
243 }
244
245 return 0;
246}
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400247EXPORT_SYMBOL(nd_region_to_nstype);
248
Dan Williams1f7df6f2015-06-09 20:13:14 -0400249static ssize_t size_show(struct device *dev,
250 struct device_attribute *attr, char *buf)
251{
252 struct nd_region *nd_region = to_nd_region(dev);
253 unsigned long long size = 0;
254
Dan Williamsc9e582a2017-05-29 23:12:19 -0700255 if (is_memory(dev)) {
Dan Williams1f7df6f2015-06-09 20:13:14 -0400256 size = nd_region->ndr_size;
257 } else if (nd_region->ndr_mappings == 1) {
258 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
259
260 size = nd_mapping->size;
261 }
262
263 return sprintf(buf, "%llu\n", size);
264}
265static DEVICE_ATTR_RO(size);
266
Dan Williamsab630892017-04-21 13:28:12 -0700267static ssize_t deep_flush_show(struct device *dev,
268 struct device_attribute *attr, char *buf)
269{
270 struct nd_region *nd_region = to_nd_region(dev);
271
272 /*
273 * NOTE: in the nvdimm_has_flush() error case this attribute is
274 * not visible.
275 */
276 return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
277}
278
279static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
280 const char *buf, size_t len)
281{
282 bool flush;
283 int rc = strtobool(buf, &flush);
284 struct nd_region *nd_region = to_nd_region(dev);
285
286 if (rc)
287 return rc;
288 if (!flush)
289 return -EINVAL;
Pankaj Guptac5d43552019-07-05 19:33:22 +0530290 rc = nvdimm_flush(nd_region, NULL);
291 if (rc)
292 return rc;
Dan Williamsab630892017-04-21 13:28:12 -0700293
294 return len;
295}
296static DEVICE_ATTR_RW(deep_flush);
297
Dan Williams1f7df6f2015-06-09 20:13:14 -0400298static ssize_t mappings_show(struct device *dev,
299 struct device_attribute *attr, char *buf)
300{
301 struct nd_region *nd_region = to_nd_region(dev);
302
303 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
304}
305static DEVICE_ATTR_RO(mappings);
306
Dan Williams3d880022015-05-31 15:02:11 -0400307static ssize_t nstype_show(struct device *dev,
308 struct device_attribute *attr, char *buf)
309{
310 struct nd_region *nd_region = to_nd_region(dev);
311
312 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
313}
314static DEVICE_ATTR_RO(nstype);
315
Dan Williamseaf96152015-05-01 13:11:27 -0400316static ssize_t set_cookie_show(struct device *dev,
317 struct device_attribute *attr, char *buf)
318{
319 struct nd_region *nd_region = to_nd_region(dev);
320 struct nd_interleave_set *nd_set = nd_region->nd_set;
Dan Williamsc12c48c2017-06-04 10:59:15 +0900321 ssize_t rc = 0;
Dan Williamseaf96152015-05-01 13:11:27 -0400322
Dan Williamsc9e582a2017-05-29 23:12:19 -0700323 if (is_memory(dev) && nd_set)
Dan Williamseaf96152015-05-01 13:11:27 -0400324 /* pass, should be precluded by region_visible */;
325 else
326 return -ENXIO;
327
Dan Williamsc12c48c2017-06-04 10:59:15 +0900328 /*
329 * The cookie to show depends on which specification of the
330 * labels we are using. If there are not labels then default to
331 * the v1.1 namespace label cookie definition. To read all this
332 * data we need to wait for probing to settle.
333 */
Dan Williams87a30e12019-07-17 18:08:26 -0700334 nd_device_lock(dev);
Dan Williamsc12c48c2017-06-04 10:59:15 +0900335 nvdimm_bus_lock(dev);
336 wait_nvdimm_bus_probe_idle(dev);
337 if (nd_region->ndr_mappings) {
338 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
339 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
340
341 if (ndd) {
342 struct nd_namespace_index *nsindex;
343
344 nsindex = to_namespace_index(ndd, ndd->ns_current);
345 rc = sprintf(buf, "%#llx\n",
346 nd_region_interleave_set_cookie(nd_region,
347 nsindex));
348 }
349 }
350 nvdimm_bus_unlock(dev);
Dan Williams87a30e12019-07-17 18:08:26 -0700351 nd_device_unlock(dev);
Dan Williamsc12c48c2017-06-04 10:59:15 +0900352
353 if (rc)
354 return rc;
355 return sprintf(buf, "%#llx\n", nd_set->cookie1);
Dan Williamseaf96152015-05-01 13:11:27 -0400356}
357static DEVICE_ATTR_RO(set_cookie);
358
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400359resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
360{
361 resource_size_t blk_max_overlap = 0, available, overlap;
362 int i;
363
364 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
365
366 retry:
367 available = 0;
368 overlap = blk_max_overlap;
369 for (i = 0; i < nd_region->ndr_mappings; i++) {
370 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
371 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
372
373 /* if a dimm is disabled the available capacity is zero */
374 if (!ndd)
375 return 0;
376
Dan Williamsc9e582a2017-05-29 23:12:19 -0700377 if (is_memory(&nd_region->dev)) {
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400378 available += nd_pmem_available_dpa(nd_region,
379 nd_mapping, &overlap);
380 if (overlap > blk_max_overlap) {
381 blk_max_overlap = overlap;
382 goto retry;
383 }
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700384 } else if (is_nd_blk(&nd_region->dev))
385 available += nd_blk_available_dpa(nd_region);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400386 }
387
388 return available;
389}
390
Keith Busch12e31292018-07-24 15:07:57 -0600391resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
392{
393 resource_size_t available = 0;
394 int i;
395
396 if (is_memory(&nd_region->dev))
397 available = PHYS_ADDR_MAX;
398
399 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
400 for (i = 0; i < nd_region->ndr_mappings; i++) {
401 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
402
403 if (is_memory(&nd_region->dev))
404 available = min(available,
405 nd_pmem_max_contiguous_dpa(nd_region,
406 nd_mapping));
407 else if (is_nd_blk(&nd_region->dev))
408 available += nd_blk_available_dpa(nd_region);
409 }
410 if (is_memory(&nd_region->dev))
411 return available * nd_region->ndr_mappings;
412 return available;
413}
414
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400415static ssize_t available_size_show(struct device *dev,
416 struct device_attribute *attr, char *buf)
417{
418 struct nd_region *nd_region = to_nd_region(dev);
419 unsigned long long available = 0;
420
421 /*
422 * Flush in-flight updates and grab a snapshot of the available
423 * size. Of course, this value is potentially invalidated the
424 * memory nvdimm_bus_lock() is dropped, but that's userspace's
425 * problem to not race itself.
426 */
Dan Williams87a30e12019-07-17 18:08:26 -0700427 nd_device_lock(dev);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400428 nvdimm_bus_lock(dev);
429 wait_nvdimm_bus_probe_idle(dev);
430 available = nd_region_available_dpa(nd_region);
431 nvdimm_bus_unlock(dev);
Dan Williams87a30e12019-07-17 18:08:26 -0700432 nd_device_unlock(dev);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400433
434 return sprintf(buf, "%llu\n", available);
435}
436static DEVICE_ATTR_RO(available_size);
437
Keith Busch1e687222018-07-24 15:07:58 -0600438static ssize_t max_available_extent_show(struct device *dev,
439 struct device_attribute *attr, char *buf)
440{
441 struct nd_region *nd_region = to_nd_region(dev);
442 unsigned long long available = 0;
443
Dan Williams87a30e12019-07-17 18:08:26 -0700444 nd_device_lock(dev);
Keith Busch1e687222018-07-24 15:07:58 -0600445 nvdimm_bus_lock(dev);
446 wait_nvdimm_bus_probe_idle(dev);
447 available = nd_region_allocatable_dpa(nd_region);
448 nvdimm_bus_unlock(dev);
Dan Williams87a30e12019-07-17 18:08:26 -0700449 nd_device_unlock(dev);
Keith Busch1e687222018-07-24 15:07:58 -0600450
451 return sprintf(buf, "%llu\n", available);
452}
453static DEVICE_ATTR_RO(max_available_extent);
454
Dan Williams3d880022015-05-31 15:02:11 -0400455static ssize_t init_namespaces_show(struct device *dev,
456 struct device_attribute *attr, char *buf)
457{
Dan Williamse5ae3b22016-06-07 17:00:04 -0700458 struct nd_region_data *ndrd = dev_get_drvdata(dev);
Dan Williams3d880022015-05-31 15:02:11 -0400459 ssize_t rc;
460
461 nvdimm_bus_lock(dev);
Dan Williamse5ae3b22016-06-07 17:00:04 -0700462 if (ndrd)
463 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
Dan Williams3d880022015-05-31 15:02:11 -0400464 else
465 rc = -ENXIO;
466 nvdimm_bus_unlock(dev);
467
468 return rc;
469}
470static DEVICE_ATTR_RO(init_namespaces);
471
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400472static ssize_t namespace_seed_show(struct device *dev,
473 struct device_attribute *attr, char *buf)
474{
475 struct nd_region *nd_region = to_nd_region(dev);
476 ssize_t rc;
477
478 nvdimm_bus_lock(dev);
479 if (nd_region->ns_seed)
480 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
481 else
482 rc = sprintf(buf, "\n");
483 nvdimm_bus_unlock(dev);
484 return rc;
485}
486static DEVICE_ATTR_RO(namespace_seed);
487
Dan Williams8c2f7e82015-06-25 04:20:04 -0400488static ssize_t btt_seed_show(struct device *dev,
489 struct device_attribute *attr, char *buf)
490{
491 struct nd_region *nd_region = to_nd_region(dev);
492 ssize_t rc;
493
494 nvdimm_bus_lock(dev);
495 if (nd_region->btt_seed)
496 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
497 else
498 rc = sprintf(buf, "\n");
499 nvdimm_bus_unlock(dev);
500
501 return rc;
502}
503static DEVICE_ATTR_RO(btt_seed);
504
Dan Williamse1455742015-07-30 17:57:47 -0400505static ssize_t pfn_seed_show(struct device *dev,
506 struct device_attribute *attr, char *buf)
507{
508 struct nd_region *nd_region = to_nd_region(dev);
509 ssize_t rc;
510
511 nvdimm_bus_lock(dev);
512 if (nd_region->pfn_seed)
513 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
514 else
515 rc = sprintf(buf, "\n");
516 nvdimm_bus_unlock(dev);
517
518 return rc;
519}
520static DEVICE_ATTR_RO(pfn_seed);
521
Dan Williamscd034122016-03-11 10:15:36 -0800522static ssize_t dax_seed_show(struct device *dev,
523 struct device_attribute *attr, char *buf)
524{
525 struct nd_region *nd_region = to_nd_region(dev);
526 ssize_t rc;
527
528 nvdimm_bus_lock(dev);
529 if (nd_region->dax_seed)
530 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
531 else
532 rc = sprintf(buf, "\n");
533 nvdimm_bus_unlock(dev);
534
535 return rc;
536}
537static DEVICE_ATTR_RO(dax_seed);
538
Dan Williams58138822015-06-23 20:08:34 -0400539static ssize_t read_only_show(struct device *dev,
540 struct device_attribute *attr, char *buf)
541{
542 struct nd_region *nd_region = to_nd_region(dev);
543
544 return sprintf(buf, "%d\n", nd_region->ro);
545}
546
547static ssize_t read_only_store(struct device *dev,
548 struct device_attribute *attr, const char *buf, size_t len)
549{
550 bool ro;
551 int rc = strtobool(buf, &ro);
552 struct nd_region *nd_region = to_nd_region(dev);
553
554 if (rc)
555 return rc;
556
557 nd_region->ro = ro;
558 return len;
559}
560static DEVICE_ATTR_RW(read_only);
561
Dan Williams23f49842017-04-29 15:24:03 -0700562static ssize_t region_badblocks_show(struct device *dev,
Dave Jiang6a6bef92017-04-07 15:33:20 -0700563 struct device_attribute *attr, char *buf)
564{
565 struct nd_region *nd_region = to_nd_region(dev);
Dan Williams5d394ee2018-09-27 15:01:55 -0700566 ssize_t rc;
Dave Jiang6a6bef92017-04-07 15:33:20 -0700567
Dan Williams87a30e12019-07-17 18:08:26 -0700568 nd_device_lock(dev);
Dan Williams5d394ee2018-09-27 15:01:55 -0700569 if (dev->driver)
570 rc = badblocks_show(&nd_region->bb, buf, 0);
571 else
572 rc = -ENXIO;
Dan Williams87a30e12019-07-17 18:08:26 -0700573 nd_device_unlock(dev);
Dan Williams5d394ee2018-09-27 15:01:55 -0700574
575 return rc;
Dave Jiang6a6bef92017-04-07 15:33:20 -0700576}
Dan Williams23f49842017-04-29 15:24:03 -0700577static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
Dave Jiang6a6bef92017-04-07 15:33:20 -0700578
Dave Jiang802f4be2017-04-07 15:33:25 -0700579static ssize_t resource_show(struct device *dev,
580 struct device_attribute *attr, char *buf)
581{
582 struct nd_region *nd_region = to_nd_region(dev);
583
584 return sprintf(buf, "%#llx\n", nd_region->ndr_start);
585}
586static DEVICE_ATTR_RO(resource);
587
Dave Jiang96c3a232018-01-31 12:45:49 -0700588static ssize_t persistence_domain_show(struct device *dev,
589 struct device_attribute *attr, char *buf)
590{
591 struct nd_region *nd_region = to_nd_region(dev);
Dave Jiang96c3a232018-01-31 12:45:49 -0700592
Dan Williamsfe9a5522018-03-21 15:12:07 -0700593 if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
594 return sprintf(buf, "cpu_cache\n");
595 else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
596 return sprintf(buf, "memory_controller\n");
597 else
598 return sprintf(buf, "\n");
Dave Jiang96c3a232018-01-31 12:45:49 -0700599}
600static DEVICE_ATTR_RO(persistence_domain);
601
Dan Williams1f7df6f2015-06-09 20:13:14 -0400602static struct attribute *nd_region_attributes[] = {
603 &dev_attr_size.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400604 &dev_attr_nstype.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400605 &dev_attr_mappings.attr,
Dan Williams8c2f7e82015-06-25 04:20:04 -0400606 &dev_attr_btt_seed.attr,
Dan Williamse1455742015-07-30 17:57:47 -0400607 &dev_attr_pfn_seed.attr,
Dan Williamscd034122016-03-11 10:15:36 -0800608 &dev_attr_dax_seed.attr,
Dan Williamsab630892017-04-21 13:28:12 -0700609 &dev_attr_deep_flush.attr,
Dan Williams58138822015-06-23 20:08:34 -0400610 &dev_attr_read_only.attr,
Dan Williamseaf96152015-05-01 13:11:27 -0400611 &dev_attr_set_cookie.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400612 &dev_attr_available_size.attr,
Keith Busch1e687222018-07-24 15:07:58 -0600613 &dev_attr_max_available_extent.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400614 &dev_attr_namespace_seed.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400615 &dev_attr_init_namespaces.attr,
Dan Williams23f49842017-04-29 15:24:03 -0700616 &dev_attr_badblocks.attr,
Dave Jiang802f4be2017-04-07 15:33:25 -0700617 &dev_attr_resource.attr,
Dave Jiang96c3a232018-01-31 12:45:49 -0700618 &dev_attr_persistence_domain.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400619 NULL,
620};
621
Dan Williamseaf96152015-05-01 13:11:27 -0400622static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
623{
624 struct device *dev = container_of(kobj, typeof(*dev), kobj);
625 struct nd_region *nd_region = to_nd_region(dev);
626 struct nd_interleave_set *nd_set = nd_region->nd_set;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400627 int type = nd_region_to_nstype(nd_region);
Dan Williamseaf96152015-05-01 13:11:27 -0400628
Dan Williamsc9e582a2017-05-29 23:12:19 -0700629 if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
Dmitry Krivenok6bb691a2015-12-02 09:39:29 +0300630 return 0;
631
Dan Williamsc9e582a2017-05-29 23:12:19 -0700632 if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
Dan Williamscd034122016-03-11 10:15:36 -0800633 return 0;
634
Dan Williams23f49842017-04-29 15:24:03 -0700635 if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
Dave Jiang6a6bef92017-04-07 15:33:20 -0700636 return 0;
637
Dan Williamsb8ff9812017-09-26 11:17:52 -0700638 if (a == &dev_attr_resource.attr) {
639 if (is_nd_pmem(dev))
640 return 0400;
641 else
642 return 0;
643 }
Dave Jiang802f4be2017-04-07 15:33:25 -0700644
Dan Williamsab630892017-04-21 13:28:12 -0700645 if (a == &dev_attr_deep_flush.attr) {
646 int has_flush = nvdimm_has_flush(nd_region);
647
648 if (has_flush == 1)
649 return a->mode;
650 else if (has_flush == 0)
651 return 0444;
652 else
653 return 0;
654 }
655
Dan Williams896196dc2018-03-21 14:06:23 -0700656 if (a == &dev_attr_persistence_domain.attr) {
657 if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
658 | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
659 return 0;
660 return a->mode;
661 }
662
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400663 if (a != &dev_attr_set_cookie.attr
664 && a != &dev_attr_available_size.attr)
Dan Williamseaf96152015-05-01 13:11:27 -0400665 return a->mode;
666
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400667 if ((type == ND_DEVICE_NAMESPACE_PMEM
668 || type == ND_DEVICE_NAMESPACE_BLK)
669 && a == &dev_attr_available_size.attr)
670 return a->mode;
Dan Williamsc9e582a2017-05-29 23:12:19 -0700671 else if (is_memory(dev) && nd_set)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400672 return a->mode;
Dan Williamseaf96152015-05-01 13:11:27 -0400673
674 return 0;
675}
676
Dan Williams1f7df6f2015-06-09 20:13:14 -0400677struct attribute_group nd_region_attribute_group = {
678 .attrs = nd_region_attributes,
Dan Williamseaf96152015-05-01 13:11:27 -0400679 .is_visible = region_visible,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400680};
681EXPORT_SYMBOL_GPL(nd_region_attribute_group);
682
Dan Williamsc12c48c2017-06-04 10:59:15 +0900683u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
684 struct nd_namespace_index *nsindex)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400685{
686 struct nd_interleave_set *nd_set = nd_region->nd_set;
687
Dan Williamsc12c48c2017-06-04 10:59:15 +0900688 if (!nd_set)
689 return 0;
690
691 if (nsindex && __le16_to_cpu(nsindex->major) == 1
692 && __le16_to_cpu(nsindex->minor) == 1)
693 return nd_set->cookie1;
694 return nd_set->cookie2;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400695}
696
Dan Williams86ef58a2017-02-28 18:32:48 -0800697u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
698{
699 struct nd_interleave_set *nd_set = nd_region->nd_set;
700
701 if (nd_set)
702 return nd_set->altcookie;
703 return 0;
704}
705
Dan Williamsae8219f2016-09-19 16:04:21 -0700706void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
707{
708 struct nd_label_ent *label_ent, *e;
709
Dan Williams9cf8bd52016-12-15 20:04:31 -0800710 lockdep_assert_held(&nd_mapping->lock);
Dan Williamsae8219f2016-09-19 16:04:21 -0700711 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
712 list_del(&label_ent->list);
713 kfree(label_ent);
714 }
715}
716
Dan Williamseaf96152015-05-01 13:11:27 -0400717/*
718 * Upon successful probe/remove, take/release a reference on the
Dan Williams8c2f7e82015-06-25 04:20:04 -0400719 * associated interleave set (if present), and plant new btt + namespace
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400720 * seeds. Also, on the removal of a BLK region, notify the provider to
721 * disable the region.
Dan Williamseaf96152015-05-01 13:11:27 -0400722 */
723static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
724 struct device *dev, bool probe)
725{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400726 struct nd_region *nd_region;
727
Dan Williamsc9e582a2017-05-29 23:12:19 -0700728 if (!probe && is_nd_region(dev)) {
Dan Williamseaf96152015-05-01 13:11:27 -0400729 int i;
730
Dan Williams8c2f7e82015-06-25 04:20:04 -0400731 nd_region = to_nd_region(dev);
Dan Williamseaf96152015-05-01 13:11:27 -0400732 for (i = 0; i < nd_region->ndr_mappings; i++) {
733 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400734 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
Dan Williamseaf96152015-05-01 13:11:27 -0400735 struct nvdimm *nvdimm = nd_mapping->nvdimm;
736
Dan Williamsae8219f2016-09-19 16:04:21 -0700737 mutex_lock(&nd_mapping->lock);
738 nd_mapping_free_labels(nd_mapping);
739 mutex_unlock(&nd_mapping->lock);
740
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400741 put_ndd(ndd);
742 nd_mapping->ndd = NULL;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400743 if (ndd)
744 atomic_dec(&nvdimm->busy);
Dan Williamseaf96152015-05-01 13:11:27 -0400745 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400746 }
Dan Williamsc9e582a2017-05-29 23:12:19 -0700747 if (dev->parent && is_nd_region(dev->parent) && probe) {
Dan Williams8c2f7e82015-06-25 04:20:04 -0400748 nd_region = to_nd_region(dev->parent);
Dan Williams1b40e092015-05-01 13:34:01 -0400749 nvdimm_bus_lock(dev);
750 if (nd_region->ns_seed == dev)
Dan Williams98a29c32016-09-30 15:28:27 -0700751 nd_region_create_ns_seed(nd_region);
Dan Williams1b40e092015-05-01 13:34:01 -0400752 nvdimm_bus_unlock(dev);
Dan Williamseaf96152015-05-01 13:11:27 -0400753 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400754 if (is_nd_btt(dev) && probe) {
Dan Williams8ca24352015-07-24 23:42:34 -0400755 struct nd_btt *nd_btt = to_nd_btt(dev);
756
Dan Williams8c2f7e82015-06-25 04:20:04 -0400757 nd_region = to_nd_region(dev->parent);
758 nvdimm_bus_lock(dev);
759 if (nd_region->btt_seed == dev)
760 nd_region_create_btt_seed(nd_region);
Dan Williams98a29c32016-09-30 15:28:27 -0700761 if (nd_region->ns_seed == &nd_btt->ndns->dev)
762 nd_region_create_ns_seed(nd_region);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400763 nvdimm_bus_unlock(dev);
764 }
Dan Williams2dc43332015-12-13 11:41:36 -0800765 if (is_nd_pfn(dev) && probe) {
Dan Williams98a29c32016-09-30 15:28:27 -0700766 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
767
Dan Williams2dc43332015-12-13 11:41:36 -0800768 nd_region = to_nd_region(dev->parent);
769 nvdimm_bus_lock(dev);
770 if (nd_region->pfn_seed == dev)
771 nd_region_create_pfn_seed(nd_region);
Dan Williams98a29c32016-09-30 15:28:27 -0700772 if (nd_region->ns_seed == &nd_pfn->ndns->dev)
773 nd_region_create_ns_seed(nd_region);
Dan Williams2dc43332015-12-13 11:41:36 -0800774 nvdimm_bus_unlock(dev);
775 }
Dan Williamscd034122016-03-11 10:15:36 -0800776 if (is_nd_dax(dev) && probe) {
Dan Williams98a29c32016-09-30 15:28:27 -0700777 struct nd_dax *nd_dax = to_nd_dax(dev);
778
Dan Williamscd034122016-03-11 10:15:36 -0800779 nd_region = to_nd_region(dev->parent);
780 nvdimm_bus_lock(dev);
781 if (nd_region->dax_seed == dev)
782 nd_region_create_dax_seed(nd_region);
Dan Williams98a29c32016-09-30 15:28:27 -0700783 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
784 nd_region_create_ns_seed(nd_region);
Dan Williamscd034122016-03-11 10:15:36 -0800785 nvdimm_bus_unlock(dev);
786 }
Dan Williamseaf96152015-05-01 13:11:27 -0400787}
788
789void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
790{
791 nd_region_notify_driver_action(nvdimm_bus, dev, true);
792}
793
794void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
795{
796 nd_region_notify_driver_action(nvdimm_bus, dev, false);
797}
798
Dan Williams1f7df6f2015-06-09 20:13:14 -0400799static ssize_t mappingN(struct device *dev, char *buf, int n)
800{
801 struct nd_region *nd_region = to_nd_region(dev);
802 struct nd_mapping *nd_mapping;
803 struct nvdimm *nvdimm;
804
805 if (n >= nd_region->ndr_mappings)
806 return -ENXIO;
807 nd_mapping = &nd_region->mapping[n];
808 nvdimm = nd_mapping->nvdimm;
809
Dan Williams401c0a12017-08-04 17:20:16 -0700810 return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev),
811 nd_mapping->start, nd_mapping->size,
812 nd_mapping->position);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400813}
814
815#define REGION_MAPPING(idx) \
816static ssize_t mapping##idx##_show(struct device *dev, \
817 struct device_attribute *attr, char *buf) \
818{ \
819 return mappingN(dev, buf, idx); \
820} \
821static DEVICE_ATTR_RO(mapping##idx)
822
823/*
824 * 32 should be enough for a while, even in the presence of socket
825 * interleave a 32-way interleave set is a degenerate case.
826 */
827REGION_MAPPING(0);
828REGION_MAPPING(1);
829REGION_MAPPING(2);
830REGION_MAPPING(3);
831REGION_MAPPING(4);
832REGION_MAPPING(5);
833REGION_MAPPING(6);
834REGION_MAPPING(7);
835REGION_MAPPING(8);
836REGION_MAPPING(9);
837REGION_MAPPING(10);
838REGION_MAPPING(11);
839REGION_MAPPING(12);
840REGION_MAPPING(13);
841REGION_MAPPING(14);
842REGION_MAPPING(15);
843REGION_MAPPING(16);
844REGION_MAPPING(17);
845REGION_MAPPING(18);
846REGION_MAPPING(19);
847REGION_MAPPING(20);
848REGION_MAPPING(21);
849REGION_MAPPING(22);
850REGION_MAPPING(23);
851REGION_MAPPING(24);
852REGION_MAPPING(25);
853REGION_MAPPING(26);
854REGION_MAPPING(27);
855REGION_MAPPING(28);
856REGION_MAPPING(29);
857REGION_MAPPING(30);
858REGION_MAPPING(31);
859
860static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
861{
862 struct device *dev = container_of(kobj, struct device, kobj);
863 struct nd_region *nd_region = to_nd_region(dev);
864
865 if (n < nd_region->ndr_mappings)
866 return a->mode;
867 return 0;
868}
869
870static struct attribute *mapping_attributes[] = {
871 &dev_attr_mapping0.attr,
872 &dev_attr_mapping1.attr,
873 &dev_attr_mapping2.attr,
874 &dev_attr_mapping3.attr,
875 &dev_attr_mapping4.attr,
876 &dev_attr_mapping5.attr,
877 &dev_attr_mapping6.attr,
878 &dev_attr_mapping7.attr,
879 &dev_attr_mapping8.attr,
880 &dev_attr_mapping9.attr,
881 &dev_attr_mapping10.attr,
882 &dev_attr_mapping11.attr,
883 &dev_attr_mapping12.attr,
884 &dev_attr_mapping13.attr,
885 &dev_attr_mapping14.attr,
886 &dev_attr_mapping15.attr,
887 &dev_attr_mapping16.attr,
888 &dev_attr_mapping17.attr,
889 &dev_attr_mapping18.attr,
890 &dev_attr_mapping19.attr,
891 &dev_attr_mapping20.attr,
892 &dev_attr_mapping21.attr,
893 &dev_attr_mapping22.attr,
894 &dev_attr_mapping23.attr,
895 &dev_attr_mapping24.attr,
896 &dev_attr_mapping25.attr,
897 &dev_attr_mapping26.attr,
898 &dev_attr_mapping27.attr,
899 &dev_attr_mapping28.attr,
900 &dev_attr_mapping29.attr,
901 &dev_attr_mapping30.attr,
902 &dev_attr_mapping31.attr,
903 NULL,
904};
905
906struct attribute_group nd_mapping_attribute_group = {
907 .is_visible = mapping_visible,
908 .attrs = mapping_attributes,
909};
910EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
911
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400912int nd_blk_region_init(struct nd_region *nd_region)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400913{
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400914 struct device *dev = &nd_region->dev;
915 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
916
917 if (!is_nd_blk(dev))
918 return 0;
919
920 if (nd_region->ndr_mappings < 1) {
Dan Williamsd5d51fe2017-06-29 09:02:10 -0700921 dev_dbg(dev, "invalid BLK region\n");
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400922 return -ENXIO;
923 }
924
925 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400926}
Dan Williams1f7df6f2015-06-09 20:13:14 -0400927
Vishal Verma5212e112015-06-25 04:20:32 -0400928/**
929 * nd_region_acquire_lane - allocate and lock a lane
930 * @nd_region: region id and number of lanes possible
931 *
932 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
933 * We optimize for the common case where there are 256 lanes, one
934 * per-cpu. For larger systems we need to lock to share lanes. For now
935 * this implementation assumes the cost of maintaining an allocator for
936 * free lanes is on the order of the lock hold time, so it implements a
937 * static lane = cpu % num_lanes mapping.
938 *
939 * In the case of a BTT instance on top of a BLK namespace a lane may be
940 * acquired recursively. We lock on the first instance.
941 *
942 * In the case of a BTT instance on top of PMEM, we only acquire a lane
943 * for the BTT metadata updates.
944 */
945unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
946{
947 unsigned int cpu, lane;
948
949 cpu = get_cpu();
950 if (nd_region->num_lanes < nr_cpu_ids) {
951 struct nd_percpu_lane *ndl_lock, *ndl_count;
952
953 lane = cpu % nd_region->num_lanes;
954 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
955 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
956 if (ndl_count->count++ == 0)
957 spin_lock(&ndl_lock->lock);
958 } else
959 lane = cpu;
960
961 return lane;
962}
963EXPORT_SYMBOL(nd_region_acquire_lane);
964
965void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
966{
967 if (nd_region->num_lanes < nr_cpu_ids) {
968 unsigned int cpu = get_cpu();
969 struct nd_percpu_lane *ndl_lock, *ndl_count;
970
971 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
972 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
973 if (--ndl_count->count == 0)
974 spin_unlock(&ndl_lock->lock);
975 put_cpu();
976 }
977 put_cpu();
978}
979EXPORT_SYMBOL(nd_region_release_lane);
980
Dan Williams1f7df6f2015-06-09 20:13:14 -0400981static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
982 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
983 const char *caller)
984{
985 struct nd_region *nd_region;
986 struct device *dev;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400987 void *region_buf;
Vishal Verma5212e112015-06-25 04:20:32 -0400988 unsigned int i;
Dan Williams58138822015-06-23 20:08:34 -0400989 int ro = 0;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400990
991 for (i = 0; i < ndr_desc->num_mappings; i++) {
Dan Williams44c462e2016-09-19 16:38:50 -0700992 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
993 struct nvdimm *nvdimm = mapping->nvdimm;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400994
Dan Williams44c462e2016-09-19 16:38:50 -0700995 if ((mapping->start | mapping->size) % SZ_4K) {
Dan Williams1f7df6f2015-06-09 20:13:14 -0400996 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
997 caller, dev_name(&nvdimm->dev), i);
998
999 return NULL;
1000 }
Dan Williams58138822015-06-23 20:08:34 -04001001
Dan Williams8f078b32017-05-04 14:01:24 -07001002 if (test_bit(NDD_UNARMED, &nvdimm->flags))
Dan Williams58138822015-06-23 20:08:34 -04001003 ro = 1;
Dan Williamsd5d30d52019-02-02 16:35:26 -08001004
1005 if (test_bit(NDD_NOBLK, &nvdimm->flags)
1006 && dev_type == &nd_blk_device_type) {
1007 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
1008 caller, dev_name(&nvdimm->dev), i);
1009 return NULL;
1010 }
Dan Williams1f7df6f2015-06-09 20:13:14 -04001011 }
1012
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001013 if (dev_type == &nd_blk_device_type) {
1014 struct nd_blk_region_desc *ndbr_desc;
1015 struct nd_blk_region *ndbr;
1016
1017 ndbr_desc = to_blk_region_desc(ndr_desc);
1018 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
1019 * ndr_desc->num_mappings,
1020 GFP_KERNEL);
1021 if (ndbr) {
1022 nd_region = &ndbr->nd_region;
1023 ndbr->enable = ndbr_desc->enable;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001024 ndbr->do_io = ndbr_desc->do_io;
1025 }
1026 region_buf = ndbr;
1027 } else {
1028 nd_region = kzalloc(sizeof(struct nd_region)
1029 + sizeof(struct nd_mapping)
1030 * ndr_desc->num_mappings,
1031 GFP_KERNEL);
1032 region_buf = nd_region;
1033 }
1034
1035 if (!region_buf)
Dan Williams1f7df6f2015-06-09 20:13:14 -04001036 return NULL;
1037 nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
Vishal Verma5212e112015-06-25 04:20:32 -04001038 if (nd_region->id < 0)
1039 goto err_id;
1040
1041 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
1042 if (!nd_region->lane)
1043 goto err_percpu;
1044
1045 for (i = 0; i < nr_cpu_ids; i++) {
1046 struct nd_percpu_lane *ndl;
1047
1048 ndl = per_cpu_ptr(nd_region->lane, i);
1049 spin_lock_init(&ndl->lock);
1050 ndl->count = 0;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001051 }
1052
Dan Williams1f7df6f2015-06-09 20:13:14 -04001053 for (i = 0; i < ndr_desc->num_mappings; i++) {
Dan Williams44c462e2016-09-19 16:38:50 -07001054 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
1055 struct nvdimm *nvdimm = mapping->nvdimm;
1056
1057 nd_region->mapping[i].nvdimm = nvdimm;
1058 nd_region->mapping[i].start = mapping->start;
1059 nd_region->mapping[i].size = mapping->size;
Dan Williams401c0a12017-08-04 17:20:16 -07001060 nd_region->mapping[i].position = mapping->position;
Dan Williamsae8219f2016-09-19 16:04:21 -07001061 INIT_LIST_HEAD(&nd_region->mapping[i].labels);
1062 mutex_init(&nd_region->mapping[i].lock);
Dan Williams1f7df6f2015-06-09 20:13:14 -04001063
1064 get_device(&nvdimm->dev);
1065 }
1066 nd_region->ndr_mappings = ndr_desc->num_mappings;
1067 nd_region->provider_data = ndr_desc->provider_data;
Dan Williamseaf96152015-05-01 13:11:27 -04001068 nd_region->nd_set = ndr_desc->nd_set;
Vishal Verma5212e112015-06-25 04:20:32 -04001069 nd_region->num_lanes = ndr_desc->num_lanes;
Dan Williams004f1af2015-08-24 19:20:23 -04001070 nd_region->flags = ndr_desc->flags;
Dan Williams58138822015-06-23 20:08:34 -04001071 nd_region->ro = ro;
Toshi Kani41d7a6d2015-06-19 12:18:33 -06001072 nd_region->numa_node = ndr_desc->numa_node;
Dan Williams8fc5c732018-11-09 12:43:07 -08001073 nd_region->target_node = ndr_desc->target_node;
Dan Williams1b40e092015-05-01 13:34:01 -04001074 ida_init(&nd_region->ns_ida);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001075 ida_init(&nd_region->btt_ida);
Dan Williamse1455742015-07-30 17:57:47 -04001076 ida_init(&nd_region->pfn_ida);
Dan Williamscd034122016-03-11 10:15:36 -08001077 ida_init(&nd_region->dax_ida);
Dan Williams1f7df6f2015-06-09 20:13:14 -04001078 dev = &nd_region->dev;
1079 dev_set_name(dev, "region%d", nd_region->id);
1080 dev->parent = &nvdimm_bus->dev;
1081 dev->type = dev_type;
1082 dev->groups = ndr_desc->attr_groups;
Oliver O'Halloran1ff19f42018-04-06 15:21:13 +10001083 dev->of_node = ndr_desc->of_node;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001084 nd_region->ndr_size = resource_size(ndr_desc->res);
1085 nd_region->ndr_start = ndr_desc->res->start;
Pankaj Guptac5d43552019-07-05 19:33:22 +05301086 if (ndr_desc->flush)
1087 nd_region->flush = ndr_desc->flush;
1088 else
1089 nd_region->flush = NULL;
1090
Dan Williams1f7df6f2015-06-09 20:13:14 -04001091 nd_device_register(dev);
1092
1093 return nd_region;
Vishal Verma5212e112015-06-25 04:20:32 -04001094
1095 err_percpu:
1096 ida_simple_remove(&region_ida, nd_region->id);
1097 err_id:
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001098 kfree(region_buf);
Vishal Verma5212e112015-06-25 04:20:32 -04001099 return NULL;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001100}
1101
1102struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
1103 struct nd_region_desc *ndr_desc)
1104{
Vishal Verma5212e112015-06-25 04:20:32 -04001105 ndr_desc->num_lanes = ND_MAX_LANES;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001106 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
1107 __func__);
1108}
1109EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
1110
1111struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
1112 struct nd_region_desc *ndr_desc)
1113{
1114 if (ndr_desc->num_mappings > 1)
1115 return NULL;
Vishal Verma5212e112015-06-25 04:20:32 -04001116 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
Dan Williams1f7df6f2015-06-09 20:13:14 -04001117 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
1118 __func__);
1119}
1120EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
1121
1122struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
1123 struct nd_region_desc *ndr_desc)
1124{
Vishal Verma5212e112015-06-25 04:20:32 -04001125 ndr_desc->num_lanes = ND_MAX_LANES;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001126 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
1127 __func__);
1128}
1129EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
Dan Williamsb354aba2016-05-17 20:24:16 -07001130
Pankaj Guptac5d43552019-07-05 19:33:22 +05301131int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
1132{
1133 int rc = 0;
1134
1135 if (!nd_region->flush)
1136 rc = generic_nvdimm_flush(nd_region);
1137 else {
1138 if (nd_region->flush(nd_region, bio))
1139 rc = -EIO;
1140 }
1141
1142 return rc;
1143}
Dan Williamsf284a4f2016-07-07 19:44:50 -07001144/**
1145 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
1146 * @nd_region: blk or interleaved pmem region
1147 */
Pankaj Guptac5d43552019-07-05 19:33:22 +05301148int generic_nvdimm_flush(struct nd_region *nd_region)
Dan Williamsf284a4f2016-07-07 19:44:50 -07001149{
1150 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
Dan Williams0c27af62016-05-27 09:23:01 -07001151 int i, idx;
1152
1153 /*
1154 * Try to encourage some diversity in flush hint addresses
1155 * across cpus assuming a limited number of flush hints.
1156 */
1157 idx = this_cpu_read(flush_idx);
1158 idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
Dan Williamsf284a4f2016-07-07 19:44:50 -07001159
1160 /*
1161 * The first wmb() is needed to 'sfence' all previous writes
1162 * such that they are architecturally visible for the platform
1163 * buffer flush. Note that we've already arranged for pmem
Dan Williams0aed55a2017-05-29 12:22:50 -07001164 * writes to avoid the cache via memcpy_flushcache(). The final
1165 * wmb() ensures ordering for the NVDIMM flush write.
Dan Williamsf284a4f2016-07-07 19:44:50 -07001166 */
1167 wmb();
1168 for (i = 0; i < nd_region->ndr_mappings; i++)
Dan Williams595c7302016-09-23 17:53:52 -07001169 if (ndrd_get_flush_wpq(ndrd, i, 0))
1170 writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
Dan Williamsf284a4f2016-07-07 19:44:50 -07001171 wmb();
Pankaj Guptac5d43552019-07-05 19:33:22 +05301172
1173 return 0;
Dan Williamsf284a4f2016-07-07 19:44:50 -07001174}
1175EXPORT_SYMBOL_GPL(nvdimm_flush);
1176
1177/**
1178 * nvdimm_has_flush - determine write flushing requirements
1179 * @nd_region: blk or interleaved pmem region
1180 *
1181 * Returns 1 if writes require flushing
1182 * Returns 0 if writes do not require flushing
1183 * Returns -ENXIO if flushing capability can not be determined
1184 */
1185int nvdimm_has_flush(struct nd_region *nd_region)
1186{
Dan Williamsf284a4f2016-07-07 19:44:50 -07001187 int i;
1188
Dan Williamsc00b3962017-05-29 23:11:57 -07001189 /* no nvdimm or pmem api == flushing capability unknown */
1190 if (nd_region->ndr_mappings == 0
1191 || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
Dan Williamsf284a4f2016-07-07 19:44:50 -07001192 return -ENXIO;
1193
Dan Williamsbc042fd2017-04-24 15:43:05 -07001194 for (i = 0; i < nd_region->ndr_mappings; i++) {
1195 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1196 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1197
1198 /* flush hints present / available */
1199 if (nvdimm->num_flush)
Dan Williamsf284a4f2016-07-07 19:44:50 -07001200 return 1;
Dan Williamsbc042fd2017-04-24 15:43:05 -07001201 }
Dan Williamsf284a4f2016-07-07 19:44:50 -07001202
1203 /*
1204 * The platform defines dimm devices without hints, assume
1205 * platform persistence mechanism like ADR
1206 */
1207 return 0;
1208}
1209EXPORT_SYMBOL_GPL(nvdimm_has_flush);
1210
Dan Williams0b277962017-06-09 09:46:50 -07001211int nvdimm_has_cache(struct nd_region *nd_region)
1212{
Ross Zwisler546eb032018-06-06 10:45:15 -06001213 return is_nd_pmem(&nd_region->dev) &&
1214 !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
Dan Williams0b277962017-06-09 09:46:50 -07001215}
1216EXPORT_SYMBOL_GPL(nvdimm_has_cache);
1217
Pankaj Guptafefc1d92019-07-05 19:33:24 +05301218bool is_nvdimm_sync(struct nd_region *nd_region)
1219{
1220 return is_nd_pmem(&nd_region->dev) &&
1221 !test_bit(ND_REGION_ASYNC, &nd_region->flags);
1222}
1223EXPORT_SYMBOL_GPL(is_nvdimm_sync);
1224
Dan Williamsae86cbf2018-11-24 10:47:04 -08001225struct conflict_context {
1226 struct nd_region *nd_region;
1227 resource_size_t start, size;
1228};
1229
1230static int region_conflict(struct device *dev, void *data)
1231{
1232 struct nd_region *nd_region;
1233 struct conflict_context *ctx = data;
1234 resource_size_t res_end, region_end, region_start;
1235
1236 if (!is_memory(dev))
1237 return 0;
1238
1239 nd_region = to_nd_region(dev);
1240 if (nd_region == ctx->nd_region)
1241 return 0;
1242
1243 res_end = ctx->start + ctx->size;
1244 region_start = nd_region->ndr_start;
1245 region_end = region_start + nd_region->ndr_size;
1246 if (ctx->start >= region_start && ctx->start < region_end)
1247 return -EBUSY;
1248 if (res_end > region_start && res_end <= region_end)
1249 return -EBUSY;
1250 return 0;
1251}
1252
1253int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
1254 resource_size_t size)
1255{
1256 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
1257 struct conflict_context ctx = {
1258 .nd_region = nd_region,
1259 .start = start,
1260 .size = size,
1261 };
1262
1263 return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
1264}
1265
Dan Williamsb354aba2016-05-17 20:24:16 -07001266void __exit nd_region_devs_exit(void)
1267{
1268 ida_destroy(&region_ida);
1269}