blob: a19e535830d9f611b2b2ce05d8e907edea122a2d [file] [log] [blame]
Thomas Gleixner5b497af2019-05-29 07:18:09 -07001// SPDX-License-Identifier: GPL-2.0-only
Dan Williams1f7df6f2015-06-09 20:13:14 -04002/*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
Dan Williams1f7df6f2015-06-09 20:13:14 -04004 */
Dan Williamseaf96152015-05-01 13:11:27 -04005#include <linux/scatterlist.h>
Dan Williams33dd7072019-11-06 17:43:31 -08006#include <linux/memregion.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -04007#include <linux/highmem.h>
Dan Williamseaf96152015-05-01 13:11:27 -04008#include <linux/sched.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -04009#include <linux/slab.h>
Dan Williams0c27af62016-05-27 09:23:01 -070010#include <linux/hash.h>
Dan Williamseaf96152015-05-01 13:11:27 -040011#include <linux/sort.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040012#include <linux/io.h>
Dan Williamsbf9bccc2015-06-17 17:14:46 -040013#include <linux/nd.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040014#include "nd-core.h"
15#include "nd.h"
16
Dan Williamsf284a4f2016-07-07 19:44:50 -070017/*
18 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
19 * irrelevant.
20 */
21#include <linux/io-64-nonatomic-hi-lo.h>
22
Dan Williams0c27af62016-05-27 09:23:01 -070023static DEFINE_PER_CPU(int, flush_idx);
Dan Williams1f7df6f2015-06-09 20:13:14 -040024
Dan Williamse5ae3b22016-06-07 17:00:04 -070025static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
26 struct nd_region_data *ndrd)
27{
28 int i, j;
29
30 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
31 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
Dan Williams595c7302016-09-23 17:53:52 -070032 for (i = 0; i < (1 << ndrd->hints_shift); i++) {
Dan Williamse5ae3b22016-06-07 17:00:04 -070033 struct resource *res = &nvdimm->flush_wpq[i];
34 unsigned long pfn = PHYS_PFN(res->start);
35 void __iomem *flush_page;
36
37 /* check if flush hints share a page */
38 for (j = 0; j < i; j++) {
39 struct resource *res_j = &nvdimm->flush_wpq[j];
40 unsigned long pfn_j = PHYS_PFN(res_j->start);
41
42 if (pfn == pfn_j)
43 break;
44 }
45
46 if (j < i)
47 flush_page = (void __iomem *) ((unsigned long)
Dan Williams595c7302016-09-23 17:53:52 -070048 ndrd_get_flush_wpq(ndrd, dimm, j)
49 & PAGE_MASK);
Dan Williamse5ae3b22016-06-07 17:00:04 -070050 else
51 flush_page = devm_nvdimm_ioremap(dev,
Oliver O'Halloran480b6832016-09-19 20:19:00 +100052 PFN_PHYS(pfn), PAGE_SIZE);
Dan Williamse5ae3b22016-06-07 17:00:04 -070053 if (!flush_page)
54 return -ENXIO;
Dan Williams595c7302016-09-23 17:53:52 -070055 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
56 + (res->start & ~PAGE_MASK));
Dan Williamse5ae3b22016-06-07 17:00:04 -070057 }
58
59 return 0;
60}
61
62int nd_region_activate(struct nd_region *nd_region)
63{
Dave Jiangdb580282016-09-26 11:06:50 -070064 int i, j, num_flush = 0;
Dan Williamse5ae3b22016-06-07 17:00:04 -070065 struct nd_region_data *ndrd;
66 struct device *dev = &nd_region->dev;
67 size_t flush_data_size = sizeof(void *);
68
69 nvdimm_bus_lock(&nd_region->dev);
70 for (i = 0; i < nd_region->ndr_mappings; i++) {
71 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
72 struct nvdimm *nvdimm = nd_mapping->nvdimm;
73
Dave Jiang7d988092018-12-13 15:36:18 -070074 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
75 nvdimm_bus_unlock(&nd_region->dev);
76 return -EBUSY;
77 }
78
Dan Williamse5ae3b22016-06-07 17:00:04 -070079 /* at least one null hint slot per-dimm for the "no-hint" case */
80 flush_data_size += sizeof(void *);
Dan Williams0c27af62016-05-27 09:23:01 -070081 num_flush = min_not_zero(num_flush, nvdimm->num_flush);
Dan Williamse5ae3b22016-06-07 17:00:04 -070082 if (!nvdimm->num_flush)
83 continue;
84 flush_data_size += nvdimm->num_flush * sizeof(void *);
85 }
86 nvdimm_bus_unlock(&nd_region->dev);
87
88 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
89 if (!ndrd)
90 return -ENOMEM;
91 dev_set_drvdata(dev, ndrd);
92
Dan Williams595c7302016-09-23 17:53:52 -070093 if (!num_flush)
94 return 0;
95
96 ndrd->hints_shift = ilog2(num_flush);
Dan Williamse5ae3b22016-06-07 17:00:04 -070097 for (i = 0; i < nd_region->ndr_mappings; i++) {
98 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
99 struct nvdimm *nvdimm = nd_mapping->nvdimm;
100 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
101
102 if (rc)
103 return rc;
104 }
105
Dave Jiangdb580282016-09-26 11:06:50 -0700106 /*
107 * Clear out entries that are duplicates. This should prevent the
108 * extra flushings.
109 */
110 for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
111 /* ignore if NULL already */
112 if (!ndrd_get_flush_wpq(ndrd, i, 0))
113 continue;
114
115 for (j = i + 1; j < nd_region->ndr_mappings; j++)
116 if (ndrd_get_flush_wpq(ndrd, i, 0) ==
117 ndrd_get_flush_wpq(ndrd, j, 0))
118 ndrd_set_flush_wpq(ndrd, j, 0, NULL);
119 }
120
Dan Williamse5ae3b22016-06-07 17:00:04 -0700121 return 0;
122}
123
Dan Williams1f7df6f2015-06-09 20:13:14 -0400124static void nd_region_release(struct device *dev)
125{
126 struct nd_region *nd_region = to_nd_region(dev);
127 u16 i;
128
129 for (i = 0; i < nd_region->ndr_mappings; i++) {
130 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
131 struct nvdimm *nvdimm = nd_mapping->nvdimm;
132
133 put_device(&nvdimm->dev);
134 }
Vishal Verma5212e112015-06-25 04:20:32 -0400135 free_percpu(nd_region->lane);
Dan Williams33dd7072019-11-06 17:43:31 -0800136 memregion_free(nd_region->id);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400137 if (is_nd_blk(dev))
138 kfree(to_nd_blk_region(dev));
139 else
140 kfree(nd_region);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400141}
142
Dan Williams1f7df6f2015-06-09 20:13:14 -0400143struct nd_region *to_nd_region(struct device *dev)
144{
145 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
146
147 WARN_ON(dev->type->release != nd_region_release);
148 return nd_region;
149}
150EXPORT_SYMBOL_GPL(to_nd_region);
151
Dan Williams243f29f2018-04-02 13:14:25 -0700152struct device *nd_region_dev(struct nd_region *nd_region)
153{
154 if (!nd_region)
155 return NULL;
156 return &nd_region->dev;
157}
158EXPORT_SYMBOL_GPL(nd_region_dev);
159
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400160struct nd_blk_region *to_nd_blk_region(struct device *dev)
161{
162 struct nd_region *nd_region = to_nd_region(dev);
163
164 WARN_ON(!is_nd_blk(dev));
165 return container_of(nd_region, struct nd_blk_region, nd_region);
166}
167EXPORT_SYMBOL_GPL(to_nd_blk_region);
168
169void *nd_region_provider_data(struct nd_region *nd_region)
170{
171 return nd_region->provider_data;
172}
173EXPORT_SYMBOL_GPL(nd_region_provider_data);
174
175void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
176{
177 return ndbr->blk_provider_data;
178}
179EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
180
181void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
182{
183 ndbr->blk_provider_data = data;
184}
185EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
186
Dan Williams3d880022015-05-31 15:02:11 -0400187/**
188 * nd_region_to_nstype() - region to an integer namespace type
189 * @nd_region: region-device to interrogate
190 *
191 * This is the 'nstype' attribute of a region as well, an input to the
192 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
193 * namespace devices with namespace drivers.
194 */
195int nd_region_to_nstype(struct nd_region *nd_region)
196{
Dan Williamsc9e582a2017-05-29 23:12:19 -0700197 if (is_memory(&nd_region->dev)) {
Dan Williams3d880022015-05-31 15:02:11 -0400198 u16 i, alias;
199
200 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
201 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
202 struct nvdimm *nvdimm = nd_mapping->nvdimm;
203
Dan Williams8f078b32017-05-04 14:01:24 -0700204 if (test_bit(NDD_ALIASING, &nvdimm->flags))
Dan Williams3d880022015-05-31 15:02:11 -0400205 alias++;
206 }
207 if (alias)
208 return ND_DEVICE_NAMESPACE_PMEM;
209 else
210 return ND_DEVICE_NAMESPACE_IO;
211 } else if (is_nd_blk(&nd_region->dev)) {
212 return ND_DEVICE_NAMESPACE_BLK;
213 }
214
215 return 0;
216}
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400217EXPORT_SYMBOL(nd_region_to_nstype);
218
Dan Williams1f7df6f2015-06-09 20:13:14 -0400219static ssize_t size_show(struct device *dev,
220 struct device_attribute *attr, char *buf)
221{
222 struct nd_region *nd_region = to_nd_region(dev);
223 unsigned long long size = 0;
224
Dan Williamsc9e582a2017-05-29 23:12:19 -0700225 if (is_memory(dev)) {
Dan Williams1f7df6f2015-06-09 20:13:14 -0400226 size = nd_region->ndr_size;
227 } else if (nd_region->ndr_mappings == 1) {
228 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
229
230 size = nd_mapping->size;
231 }
232
233 return sprintf(buf, "%llu\n", size);
234}
235static DEVICE_ATTR_RO(size);
236
Dan Williamsab630892017-04-21 13:28:12 -0700237static ssize_t deep_flush_show(struct device *dev,
238 struct device_attribute *attr, char *buf)
239{
240 struct nd_region *nd_region = to_nd_region(dev);
241
242 /*
243 * NOTE: in the nvdimm_has_flush() error case this attribute is
244 * not visible.
245 */
246 return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
247}
248
249static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
250 const char *buf, size_t len)
251{
252 bool flush;
253 int rc = strtobool(buf, &flush);
254 struct nd_region *nd_region = to_nd_region(dev);
255
256 if (rc)
257 return rc;
258 if (!flush)
259 return -EINVAL;
Pankaj Guptac5d43552019-07-05 19:33:22 +0530260 rc = nvdimm_flush(nd_region, NULL);
261 if (rc)
262 return rc;
Dan Williamsab630892017-04-21 13:28:12 -0700263
264 return len;
265}
266static DEVICE_ATTR_RW(deep_flush);
267
Dan Williams1f7df6f2015-06-09 20:13:14 -0400268static ssize_t mappings_show(struct device *dev,
269 struct device_attribute *attr, char *buf)
270{
271 struct nd_region *nd_region = to_nd_region(dev);
272
273 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
274}
275static DEVICE_ATTR_RO(mappings);
276
Dan Williams3d880022015-05-31 15:02:11 -0400277static ssize_t nstype_show(struct device *dev,
278 struct device_attribute *attr, char *buf)
279{
280 struct nd_region *nd_region = to_nd_region(dev);
281
282 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
283}
284static DEVICE_ATTR_RO(nstype);
285
Dan Williamseaf96152015-05-01 13:11:27 -0400286static ssize_t set_cookie_show(struct device *dev,
287 struct device_attribute *attr, char *buf)
288{
289 struct nd_region *nd_region = to_nd_region(dev);
290 struct nd_interleave_set *nd_set = nd_region->nd_set;
Dan Williamsc12c48c2017-06-04 10:59:15 +0900291 ssize_t rc = 0;
Dan Williamseaf96152015-05-01 13:11:27 -0400292
Dan Williamsc9e582a2017-05-29 23:12:19 -0700293 if (is_memory(dev) && nd_set)
Dan Williamseaf96152015-05-01 13:11:27 -0400294 /* pass, should be precluded by region_visible */;
295 else
296 return -ENXIO;
297
Dan Williamsc12c48c2017-06-04 10:59:15 +0900298 /*
299 * The cookie to show depends on which specification of the
300 * labels we are using. If there are not labels then default to
301 * the v1.1 namespace label cookie definition. To read all this
302 * data we need to wait for probing to settle.
303 */
Dan Williams87a30e12019-07-17 18:08:26 -0700304 nd_device_lock(dev);
Dan Williamsc12c48c2017-06-04 10:59:15 +0900305 nvdimm_bus_lock(dev);
306 wait_nvdimm_bus_probe_idle(dev);
307 if (nd_region->ndr_mappings) {
308 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
309 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
310
311 if (ndd) {
312 struct nd_namespace_index *nsindex;
313
314 nsindex = to_namespace_index(ndd, ndd->ns_current);
315 rc = sprintf(buf, "%#llx\n",
316 nd_region_interleave_set_cookie(nd_region,
317 nsindex));
318 }
319 }
320 nvdimm_bus_unlock(dev);
Dan Williams87a30e12019-07-17 18:08:26 -0700321 nd_device_unlock(dev);
Dan Williamsc12c48c2017-06-04 10:59:15 +0900322
323 if (rc)
324 return rc;
325 return sprintf(buf, "%#llx\n", nd_set->cookie1);
Dan Williamseaf96152015-05-01 13:11:27 -0400326}
327static DEVICE_ATTR_RO(set_cookie);
328
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400329resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
330{
331 resource_size_t blk_max_overlap = 0, available, overlap;
332 int i;
333
334 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
335
336 retry:
337 available = 0;
338 overlap = blk_max_overlap;
339 for (i = 0; i < nd_region->ndr_mappings; i++) {
340 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
341 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
342
343 /* if a dimm is disabled the available capacity is zero */
344 if (!ndd)
345 return 0;
346
Dan Williamsc9e582a2017-05-29 23:12:19 -0700347 if (is_memory(&nd_region->dev)) {
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400348 available += nd_pmem_available_dpa(nd_region,
349 nd_mapping, &overlap);
350 if (overlap > blk_max_overlap) {
351 blk_max_overlap = overlap;
352 goto retry;
353 }
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700354 } else if (is_nd_blk(&nd_region->dev))
355 available += nd_blk_available_dpa(nd_region);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400356 }
357
358 return available;
359}
360
Keith Busch12e31292018-07-24 15:07:57 -0600361resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
362{
363 resource_size_t available = 0;
364 int i;
365
366 if (is_memory(&nd_region->dev))
367 available = PHYS_ADDR_MAX;
368
369 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
370 for (i = 0; i < nd_region->ndr_mappings; i++) {
371 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
372
373 if (is_memory(&nd_region->dev))
374 available = min(available,
375 nd_pmem_max_contiguous_dpa(nd_region,
376 nd_mapping));
377 else if (is_nd_blk(&nd_region->dev))
378 available += nd_blk_available_dpa(nd_region);
379 }
380 if (is_memory(&nd_region->dev))
381 return available * nd_region->ndr_mappings;
382 return available;
383}
384
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400385static ssize_t available_size_show(struct device *dev,
386 struct device_attribute *attr, char *buf)
387{
388 struct nd_region *nd_region = to_nd_region(dev);
389 unsigned long long available = 0;
390
391 /*
392 * Flush in-flight updates and grab a snapshot of the available
393 * size. Of course, this value is potentially invalidated the
394 * memory nvdimm_bus_lock() is dropped, but that's userspace's
395 * problem to not race itself.
396 */
Dan Williams87a30e12019-07-17 18:08:26 -0700397 nd_device_lock(dev);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400398 nvdimm_bus_lock(dev);
399 wait_nvdimm_bus_probe_idle(dev);
400 available = nd_region_available_dpa(nd_region);
401 nvdimm_bus_unlock(dev);
Dan Williams87a30e12019-07-17 18:08:26 -0700402 nd_device_unlock(dev);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400403
404 return sprintf(buf, "%llu\n", available);
405}
406static DEVICE_ATTR_RO(available_size);
407
Keith Busch1e687222018-07-24 15:07:58 -0600408static ssize_t max_available_extent_show(struct device *dev,
409 struct device_attribute *attr, char *buf)
410{
411 struct nd_region *nd_region = to_nd_region(dev);
412 unsigned long long available = 0;
413
Dan Williams87a30e12019-07-17 18:08:26 -0700414 nd_device_lock(dev);
Keith Busch1e687222018-07-24 15:07:58 -0600415 nvdimm_bus_lock(dev);
416 wait_nvdimm_bus_probe_idle(dev);
417 available = nd_region_allocatable_dpa(nd_region);
418 nvdimm_bus_unlock(dev);
Dan Williams87a30e12019-07-17 18:08:26 -0700419 nd_device_unlock(dev);
Keith Busch1e687222018-07-24 15:07:58 -0600420
421 return sprintf(buf, "%llu\n", available);
422}
423static DEVICE_ATTR_RO(max_available_extent);
424
Dan Williams3d880022015-05-31 15:02:11 -0400425static ssize_t init_namespaces_show(struct device *dev,
426 struct device_attribute *attr, char *buf)
427{
Dan Williamse5ae3b22016-06-07 17:00:04 -0700428 struct nd_region_data *ndrd = dev_get_drvdata(dev);
Dan Williams3d880022015-05-31 15:02:11 -0400429 ssize_t rc;
430
431 nvdimm_bus_lock(dev);
Dan Williamse5ae3b22016-06-07 17:00:04 -0700432 if (ndrd)
433 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
Dan Williams3d880022015-05-31 15:02:11 -0400434 else
435 rc = -ENXIO;
436 nvdimm_bus_unlock(dev);
437
438 return rc;
439}
440static DEVICE_ATTR_RO(init_namespaces);
441
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400442static ssize_t namespace_seed_show(struct device *dev,
443 struct device_attribute *attr, char *buf)
444{
445 struct nd_region *nd_region = to_nd_region(dev);
446 ssize_t rc;
447
448 nvdimm_bus_lock(dev);
449 if (nd_region->ns_seed)
450 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
451 else
452 rc = sprintf(buf, "\n");
453 nvdimm_bus_unlock(dev);
454 return rc;
455}
456static DEVICE_ATTR_RO(namespace_seed);
457
Dan Williams8c2f7e82015-06-25 04:20:04 -0400458static ssize_t btt_seed_show(struct device *dev,
459 struct device_attribute *attr, char *buf)
460{
461 struct nd_region *nd_region = to_nd_region(dev);
462 ssize_t rc;
463
464 nvdimm_bus_lock(dev);
465 if (nd_region->btt_seed)
466 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
467 else
468 rc = sprintf(buf, "\n");
469 nvdimm_bus_unlock(dev);
470
471 return rc;
472}
473static DEVICE_ATTR_RO(btt_seed);
474
Dan Williamse1455742015-07-30 17:57:47 -0400475static ssize_t pfn_seed_show(struct device *dev,
476 struct device_attribute *attr, char *buf)
477{
478 struct nd_region *nd_region = to_nd_region(dev);
479 ssize_t rc;
480
481 nvdimm_bus_lock(dev);
482 if (nd_region->pfn_seed)
483 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
484 else
485 rc = sprintf(buf, "\n");
486 nvdimm_bus_unlock(dev);
487
488 return rc;
489}
490static DEVICE_ATTR_RO(pfn_seed);
491
Dan Williamscd034122016-03-11 10:15:36 -0800492static ssize_t dax_seed_show(struct device *dev,
493 struct device_attribute *attr, char *buf)
494{
495 struct nd_region *nd_region = to_nd_region(dev);
496 ssize_t rc;
497
498 nvdimm_bus_lock(dev);
499 if (nd_region->dax_seed)
500 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
501 else
502 rc = sprintf(buf, "\n");
503 nvdimm_bus_unlock(dev);
504
505 return rc;
506}
507static DEVICE_ATTR_RO(dax_seed);
508
Dan Williams58138822015-06-23 20:08:34 -0400509static ssize_t read_only_show(struct device *dev,
510 struct device_attribute *attr, char *buf)
511{
512 struct nd_region *nd_region = to_nd_region(dev);
513
514 return sprintf(buf, "%d\n", nd_region->ro);
515}
516
517static ssize_t read_only_store(struct device *dev,
518 struct device_attribute *attr, const char *buf, size_t len)
519{
520 bool ro;
521 int rc = strtobool(buf, &ro);
522 struct nd_region *nd_region = to_nd_region(dev);
523
524 if (rc)
525 return rc;
526
527 nd_region->ro = ro;
528 return len;
529}
530static DEVICE_ATTR_RW(read_only);
531
Dan Williams23f49842017-04-29 15:24:03 -0700532static ssize_t region_badblocks_show(struct device *dev,
Dave Jiang6a6bef92017-04-07 15:33:20 -0700533 struct device_attribute *attr, char *buf)
534{
535 struct nd_region *nd_region = to_nd_region(dev);
Dan Williams5d394ee2018-09-27 15:01:55 -0700536 ssize_t rc;
Dave Jiang6a6bef92017-04-07 15:33:20 -0700537
Dan Williams87a30e12019-07-17 18:08:26 -0700538 nd_device_lock(dev);
Dan Williams5d394ee2018-09-27 15:01:55 -0700539 if (dev->driver)
540 rc = badblocks_show(&nd_region->bb, buf, 0);
541 else
542 rc = -ENXIO;
Dan Williams87a30e12019-07-17 18:08:26 -0700543 nd_device_unlock(dev);
Dan Williams5d394ee2018-09-27 15:01:55 -0700544
545 return rc;
Dave Jiang6a6bef92017-04-07 15:33:20 -0700546}
Dan Williams23f49842017-04-29 15:24:03 -0700547static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
Dave Jiang6a6bef92017-04-07 15:33:20 -0700548
Dave Jiang802f4be2017-04-07 15:33:25 -0700549static ssize_t resource_show(struct device *dev,
550 struct device_attribute *attr, char *buf)
551{
552 struct nd_region *nd_region = to_nd_region(dev);
553
554 return sprintf(buf, "%#llx\n", nd_region->ndr_start);
555}
Dan Williamsbfd2e912019-11-12 17:13:14 -0800556static DEVICE_ATTR(resource, 0400, resource_show, NULL);
Dave Jiang802f4be2017-04-07 15:33:25 -0700557
Dave Jiang96c3a232018-01-31 12:45:49 -0700558static ssize_t persistence_domain_show(struct device *dev,
559 struct device_attribute *attr, char *buf)
560{
561 struct nd_region *nd_region = to_nd_region(dev);
Dave Jiang96c3a232018-01-31 12:45:49 -0700562
Dan Williamsfe9a5522018-03-21 15:12:07 -0700563 if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
564 return sprintf(buf, "cpu_cache\n");
565 else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
566 return sprintf(buf, "memory_controller\n");
567 else
568 return sprintf(buf, "\n");
Dave Jiang96c3a232018-01-31 12:45:49 -0700569}
570static DEVICE_ATTR_RO(persistence_domain);
571
Dan Williams1f7df6f2015-06-09 20:13:14 -0400572static struct attribute *nd_region_attributes[] = {
573 &dev_attr_size.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400574 &dev_attr_nstype.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400575 &dev_attr_mappings.attr,
Dan Williams8c2f7e82015-06-25 04:20:04 -0400576 &dev_attr_btt_seed.attr,
Dan Williamse1455742015-07-30 17:57:47 -0400577 &dev_attr_pfn_seed.attr,
Dan Williamscd034122016-03-11 10:15:36 -0800578 &dev_attr_dax_seed.attr,
Dan Williamsab630892017-04-21 13:28:12 -0700579 &dev_attr_deep_flush.attr,
Dan Williams58138822015-06-23 20:08:34 -0400580 &dev_attr_read_only.attr,
Dan Williamseaf96152015-05-01 13:11:27 -0400581 &dev_attr_set_cookie.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400582 &dev_attr_available_size.attr,
Keith Busch1e687222018-07-24 15:07:58 -0600583 &dev_attr_max_available_extent.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400584 &dev_attr_namespace_seed.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400585 &dev_attr_init_namespaces.attr,
Dan Williams23f49842017-04-29 15:24:03 -0700586 &dev_attr_badblocks.attr,
Dave Jiang802f4be2017-04-07 15:33:25 -0700587 &dev_attr_resource.attr,
Dave Jiang96c3a232018-01-31 12:45:49 -0700588 &dev_attr_persistence_domain.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400589 NULL,
590};
591
Dan Williamseaf96152015-05-01 13:11:27 -0400592static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
593{
594 struct device *dev = container_of(kobj, typeof(*dev), kobj);
595 struct nd_region *nd_region = to_nd_region(dev);
596 struct nd_interleave_set *nd_set = nd_region->nd_set;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400597 int type = nd_region_to_nstype(nd_region);
Dan Williamseaf96152015-05-01 13:11:27 -0400598
Dan Williamsc9e582a2017-05-29 23:12:19 -0700599 if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
Dmitry Krivenok6bb691a2015-12-02 09:39:29 +0300600 return 0;
601
Dan Williamsc9e582a2017-05-29 23:12:19 -0700602 if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
Dan Williamscd034122016-03-11 10:15:36 -0800603 return 0;
604
Aneesh Kumar K.Vc42adf82019-09-19 14:03:55 +0530605 if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
Dave Jiang6a6bef92017-04-07 15:33:20 -0700606 return 0;
607
Dan Williamsbfd2e912019-11-12 17:13:14 -0800608 if (a == &dev_attr_resource.attr && !is_memory(dev))
609 return 0;
Dave Jiang802f4be2017-04-07 15:33:25 -0700610
Dan Williamsab630892017-04-21 13:28:12 -0700611 if (a == &dev_attr_deep_flush.attr) {
612 int has_flush = nvdimm_has_flush(nd_region);
613
614 if (has_flush == 1)
615 return a->mode;
616 else if (has_flush == 0)
617 return 0444;
618 else
619 return 0;
620 }
621
Dan Williams896196dc2018-03-21 14:06:23 -0700622 if (a == &dev_attr_persistence_domain.attr) {
623 if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
624 | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
625 return 0;
626 return a->mode;
627 }
628
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400629 if (a != &dev_attr_set_cookie.attr
630 && a != &dev_attr_available_size.attr)
Dan Williamseaf96152015-05-01 13:11:27 -0400631 return a->mode;
632
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400633 if ((type == ND_DEVICE_NAMESPACE_PMEM
634 || type == ND_DEVICE_NAMESPACE_BLK)
635 && a == &dev_attr_available_size.attr)
636 return a->mode;
Dan Williamsc9e582a2017-05-29 23:12:19 -0700637 else if (is_memory(dev) && nd_set)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400638 return a->mode;
Dan Williamseaf96152015-05-01 13:11:27 -0400639
640 return 0;
641}
642
Dan Williams1f7df6f2015-06-09 20:13:14 -0400643static ssize_t mappingN(struct device *dev, char *buf, int n)
644{
645 struct nd_region *nd_region = to_nd_region(dev);
646 struct nd_mapping *nd_mapping;
647 struct nvdimm *nvdimm;
648
649 if (n >= nd_region->ndr_mappings)
650 return -ENXIO;
651 nd_mapping = &nd_region->mapping[n];
652 nvdimm = nd_mapping->nvdimm;
653
Dan Williams401c0a12017-08-04 17:20:16 -0700654 return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev),
655 nd_mapping->start, nd_mapping->size,
656 nd_mapping->position);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400657}
658
659#define REGION_MAPPING(idx) \
660static ssize_t mapping##idx##_show(struct device *dev, \
661 struct device_attribute *attr, char *buf) \
662{ \
663 return mappingN(dev, buf, idx); \
664} \
665static DEVICE_ATTR_RO(mapping##idx)
666
667/*
668 * 32 should be enough for a while, even in the presence of socket
669 * interleave a 32-way interleave set is a degenerate case.
670 */
671REGION_MAPPING(0);
672REGION_MAPPING(1);
673REGION_MAPPING(2);
674REGION_MAPPING(3);
675REGION_MAPPING(4);
676REGION_MAPPING(5);
677REGION_MAPPING(6);
678REGION_MAPPING(7);
679REGION_MAPPING(8);
680REGION_MAPPING(9);
681REGION_MAPPING(10);
682REGION_MAPPING(11);
683REGION_MAPPING(12);
684REGION_MAPPING(13);
685REGION_MAPPING(14);
686REGION_MAPPING(15);
687REGION_MAPPING(16);
688REGION_MAPPING(17);
689REGION_MAPPING(18);
690REGION_MAPPING(19);
691REGION_MAPPING(20);
692REGION_MAPPING(21);
693REGION_MAPPING(22);
694REGION_MAPPING(23);
695REGION_MAPPING(24);
696REGION_MAPPING(25);
697REGION_MAPPING(26);
698REGION_MAPPING(27);
699REGION_MAPPING(28);
700REGION_MAPPING(29);
701REGION_MAPPING(30);
702REGION_MAPPING(31);
703
704static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
705{
706 struct device *dev = container_of(kobj, struct device, kobj);
707 struct nd_region *nd_region = to_nd_region(dev);
708
709 if (n < nd_region->ndr_mappings)
710 return a->mode;
711 return 0;
712}
713
714static struct attribute *mapping_attributes[] = {
715 &dev_attr_mapping0.attr,
716 &dev_attr_mapping1.attr,
717 &dev_attr_mapping2.attr,
718 &dev_attr_mapping3.attr,
719 &dev_attr_mapping4.attr,
720 &dev_attr_mapping5.attr,
721 &dev_attr_mapping6.attr,
722 &dev_attr_mapping7.attr,
723 &dev_attr_mapping8.attr,
724 &dev_attr_mapping9.attr,
725 &dev_attr_mapping10.attr,
726 &dev_attr_mapping11.attr,
727 &dev_attr_mapping12.attr,
728 &dev_attr_mapping13.attr,
729 &dev_attr_mapping14.attr,
730 &dev_attr_mapping15.attr,
731 &dev_attr_mapping16.attr,
732 &dev_attr_mapping17.attr,
733 &dev_attr_mapping18.attr,
734 &dev_attr_mapping19.attr,
735 &dev_attr_mapping20.attr,
736 &dev_attr_mapping21.attr,
737 &dev_attr_mapping22.attr,
738 &dev_attr_mapping23.attr,
739 &dev_attr_mapping24.attr,
740 &dev_attr_mapping25.attr,
741 &dev_attr_mapping26.attr,
742 &dev_attr_mapping27.attr,
743 &dev_attr_mapping28.attr,
744 &dev_attr_mapping29.attr,
745 &dev_attr_mapping30.attr,
746 &dev_attr_mapping31.attr,
747 NULL,
748};
749
Dan Williams4ce79fa2019-11-12 17:07:39 -0800750static const struct attribute_group nd_mapping_attribute_group = {
Dan Williams1f7df6f2015-06-09 20:13:14 -0400751 .is_visible = mapping_visible,
752 .attrs = mapping_attributes,
753};
Dan Williams1f7df6f2015-06-09 20:13:14 -0400754
Dan Williams7c4fc8c2019-11-12 17:07:16 -0800755static const struct attribute_group nd_region_attribute_group = {
Dan Williamscb719d52019-11-06 19:56:46 -0800756 .attrs = nd_region_attributes,
757 .is_visible = region_visible,
758};
Dan Williamscb719d52019-11-06 19:56:46 -0800759
Dan Williamsadbb6822019-11-12 17:00:24 -0800760static const struct attribute_group *nd_region_attribute_groups[] = {
761 &nd_device_attribute_group,
Dan Williams7c4fc8c2019-11-12 17:07:16 -0800762 &nd_region_attribute_group,
Dan Williamse2f6a0e2019-11-19 09:51:54 -0800763 &nd_numa_attribute_group,
Dan Williams4ce79fa2019-11-12 17:07:39 -0800764 &nd_mapping_attribute_group,
Dan Williamsadbb6822019-11-12 17:00:24 -0800765 NULL,
766};
767
768static const struct device_type nd_blk_device_type = {
Dan Williamscb719d52019-11-06 19:56:46 -0800769 .name = "nd_blk",
770 .release = nd_region_release,
Dan Williamsadbb6822019-11-12 17:00:24 -0800771 .groups = nd_region_attribute_groups,
Dan Williamscb719d52019-11-06 19:56:46 -0800772};
773
Dan Williamsadbb6822019-11-12 17:00:24 -0800774static const struct device_type nd_pmem_device_type = {
Dan Williamscb719d52019-11-06 19:56:46 -0800775 .name = "nd_pmem",
776 .release = nd_region_release,
Dan Williamsadbb6822019-11-12 17:00:24 -0800777 .groups = nd_region_attribute_groups,
Dan Williamscb719d52019-11-06 19:56:46 -0800778};
779
Dan Williamsadbb6822019-11-12 17:00:24 -0800780static const struct device_type nd_volatile_device_type = {
Dan Williamscb719d52019-11-06 19:56:46 -0800781 .name = "nd_volatile",
782 .release = nd_region_release,
Dan Williamsadbb6822019-11-12 17:00:24 -0800783 .groups = nd_region_attribute_groups,
Dan Williamscb719d52019-11-06 19:56:46 -0800784};
785
786bool is_nd_pmem(struct device *dev)
787{
788 return dev ? dev->type == &nd_pmem_device_type : false;
789}
790
791bool is_nd_blk(struct device *dev)
792{
793 return dev ? dev->type == &nd_blk_device_type : false;
794}
795
796bool is_nd_volatile(struct device *dev)
797{
798 return dev ? dev->type == &nd_volatile_device_type : false;
799}
800
801u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
802 struct nd_namespace_index *nsindex)
803{
804 struct nd_interleave_set *nd_set = nd_region->nd_set;
805
806 if (!nd_set)
807 return 0;
808
809 if (nsindex && __le16_to_cpu(nsindex->major) == 1
810 && __le16_to_cpu(nsindex->minor) == 1)
811 return nd_set->cookie1;
812 return nd_set->cookie2;
813}
814
815u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
816{
817 struct nd_interleave_set *nd_set = nd_region->nd_set;
818
819 if (nd_set)
820 return nd_set->altcookie;
821 return 0;
822}
823
824void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
825{
826 struct nd_label_ent *label_ent, *e;
827
828 lockdep_assert_held(&nd_mapping->lock);
829 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
830 list_del(&label_ent->list);
831 kfree(label_ent);
832 }
833}
834
835/*
836 * When a namespace is activated create new seeds for the next
837 * namespace, or namespace-personality to be configured.
838 */
839void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
840{
841 nvdimm_bus_lock(dev);
842 if (nd_region->ns_seed == dev) {
843 nd_region_create_ns_seed(nd_region);
844 } else if (is_nd_btt(dev)) {
845 struct nd_btt *nd_btt = to_nd_btt(dev);
846
847 if (nd_region->btt_seed == dev)
848 nd_region_create_btt_seed(nd_region);
849 if (nd_region->ns_seed == &nd_btt->ndns->dev)
850 nd_region_create_ns_seed(nd_region);
851 } else if (is_nd_pfn(dev)) {
852 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
853
854 if (nd_region->pfn_seed == dev)
855 nd_region_create_pfn_seed(nd_region);
856 if (nd_region->ns_seed == &nd_pfn->ndns->dev)
857 nd_region_create_ns_seed(nd_region);
858 } else if (is_nd_dax(dev)) {
859 struct nd_dax *nd_dax = to_nd_dax(dev);
860
861 if (nd_region->dax_seed == dev)
862 nd_region_create_dax_seed(nd_region);
863 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
864 nd_region_create_ns_seed(nd_region);
865 }
866 nvdimm_bus_unlock(dev);
867}
Dan Williams1f7df6f2015-06-09 20:13:14 -0400868
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400869int nd_blk_region_init(struct nd_region *nd_region)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400870{
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400871 struct device *dev = &nd_region->dev;
872 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
873
874 if (!is_nd_blk(dev))
875 return 0;
876
877 if (nd_region->ndr_mappings < 1) {
Dan Williamsd5d51fe2017-06-29 09:02:10 -0700878 dev_dbg(dev, "invalid BLK region\n");
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400879 return -ENXIO;
880 }
881
882 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400883}
Dan Williams1f7df6f2015-06-09 20:13:14 -0400884
Vishal Verma5212e112015-06-25 04:20:32 -0400885/**
886 * nd_region_acquire_lane - allocate and lock a lane
887 * @nd_region: region id and number of lanes possible
888 *
889 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
890 * We optimize for the common case where there are 256 lanes, one
891 * per-cpu. For larger systems we need to lock to share lanes. For now
892 * this implementation assumes the cost of maintaining an allocator for
893 * free lanes is on the order of the lock hold time, so it implements a
894 * static lane = cpu % num_lanes mapping.
895 *
896 * In the case of a BTT instance on top of a BLK namespace a lane may be
897 * acquired recursively. We lock on the first instance.
898 *
899 * In the case of a BTT instance on top of PMEM, we only acquire a lane
900 * for the BTT metadata updates.
901 */
902unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
903{
904 unsigned int cpu, lane;
905
906 cpu = get_cpu();
907 if (nd_region->num_lanes < nr_cpu_ids) {
908 struct nd_percpu_lane *ndl_lock, *ndl_count;
909
910 lane = cpu % nd_region->num_lanes;
911 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
912 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
913 if (ndl_count->count++ == 0)
914 spin_lock(&ndl_lock->lock);
915 } else
916 lane = cpu;
917
918 return lane;
919}
920EXPORT_SYMBOL(nd_region_acquire_lane);
921
922void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
923{
924 if (nd_region->num_lanes < nr_cpu_ids) {
925 unsigned int cpu = get_cpu();
926 struct nd_percpu_lane *ndl_lock, *ndl_count;
927
928 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
929 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
930 if (--ndl_count->count == 0)
931 spin_unlock(&ndl_lock->lock);
932 put_cpu();
933 }
934 put_cpu();
935}
936EXPORT_SYMBOL(nd_region_release_lane);
937
Dan Williams1f7df6f2015-06-09 20:13:14 -0400938static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
Dan Williamsadbb6822019-11-12 17:00:24 -0800939 struct nd_region_desc *ndr_desc,
940 const struct device_type *dev_type, const char *caller)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400941{
942 struct nd_region *nd_region;
943 struct device *dev;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400944 void *region_buf;
Vishal Verma5212e112015-06-25 04:20:32 -0400945 unsigned int i;
Dan Williams58138822015-06-23 20:08:34 -0400946 int ro = 0;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400947
948 for (i = 0; i < ndr_desc->num_mappings; i++) {
Dan Williams44c462e2016-09-19 16:38:50 -0700949 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
950 struct nvdimm *nvdimm = mapping->nvdimm;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400951
Aneesh Kumar K.V5b26db92019-09-05 21:16:02 +0530952 if ((mapping->start | mapping->size) % PAGE_SIZE) {
953 dev_err(&nvdimm_bus->dev,
954 "%s: %s mapping%d is not %ld aligned\n",
955 caller, dev_name(&nvdimm->dev), i, PAGE_SIZE);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400956 return NULL;
957 }
Dan Williams58138822015-06-23 20:08:34 -0400958
Dan Williams8f078b32017-05-04 14:01:24 -0700959 if (test_bit(NDD_UNARMED, &nvdimm->flags))
Dan Williams58138822015-06-23 20:08:34 -0400960 ro = 1;
Dan Williamsd5d30d52019-02-02 16:35:26 -0800961
962 if (test_bit(NDD_NOBLK, &nvdimm->flags)
963 && dev_type == &nd_blk_device_type) {
964 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
965 caller, dev_name(&nvdimm->dev), i);
966 return NULL;
967 }
Dan Williams1f7df6f2015-06-09 20:13:14 -0400968 }
969
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400970 if (dev_type == &nd_blk_device_type) {
971 struct nd_blk_region_desc *ndbr_desc;
972 struct nd_blk_region *ndbr;
973
974 ndbr_desc = to_blk_region_desc(ndr_desc);
975 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
976 * ndr_desc->num_mappings,
977 GFP_KERNEL);
978 if (ndbr) {
979 nd_region = &ndbr->nd_region;
980 ndbr->enable = ndbr_desc->enable;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400981 ndbr->do_io = ndbr_desc->do_io;
982 }
983 region_buf = ndbr;
984 } else {
Gustavo A. R. Silva2b90cb22019-06-10 16:06:13 -0500985 nd_region = kzalloc(struct_size(nd_region, mapping,
986 ndr_desc->num_mappings),
987 GFP_KERNEL);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400988 region_buf = nd_region;
989 }
990
991 if (!region_buf)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400992 return NULL;
Dan Williams33dd7072019-11-06 17:43:31 -0800993 nd_region->id = memregion_alloc(GFP_KERNEL);
Vishal Verma5212e112015-06-25 04:20:32 -0400994 if (nd_region->id < 0)
995 goto err_id;
996
997 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
998 if (!nd_region->lane)
999 goto err_percpu;
1000
1001 for (i = 0; i < nr_cpu_ids; i++) {
1002 struct nd_percpu_lane *ndl;
1003
1004 ndl = per_cpu_ptr(nd_region->lane, i);
1005 spin_lock_init(&ndl->lock);
1006 ndl->count = 0;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001007 }
1008
Dan Williams1f7df6f2015-06-09 20:13:14 -04001009 for (i = 0; i < ndr_desc->num_mappings; i++) {
Dan Williams44c462e2016-09-19 16:38:50 -07001010 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
1011 struct nvdimm *nvdimm = mapping->nvdimm;
1012
1013 nd_region->mapping[i].nvdimm = nvdimm;
1014 nd_region->mapping[i].start = mapping->start;
1015 nd_region->mapping[i].size = mapping->size;
Dan Williams401c0a12017-08-04 17:20:16 -07001016 nd_region->mapping[i].position = mapping->position;
Dan Williamsae8219f2016-09-19 16:04:21 -07001017 INIT_LIST_HEAD(&nd_region->mapping[i].labels);
1018 mutex_init(&nd_region->mapping[i].lock);
Dan Williams1f7df6f2015-06-09 20:13:14 -04001019
1020 get_device(&nvdimm->dev);
1021 }
1022 nd_region->ndr_mappings = ndr_desc->num_mappings;
1023 nd_region->provider_data = ndr_desc->provider_data;
Dan Williamseaf96152015-05-01 13:11:27 -04001024 nd_region->nd_set = ndr_desc->nd_set;
Vishal Verma5212e112015-06-25 04:20:32 -04001025 nd_region->num_lanes = ndr_desc->num_lanes;
Dan Williams004f1af2015-08-24 19:20:23 -04001026 nd_region->flags = ndr_desc->flags;
Dan Williams58138822015-06-23 20:08:34 -04001027 nd_region->ro = ro;
Toshi Kani41d7a6d2015-06-19 12:18:33 -06001028 nd_region->numa_node = ndr_desc->numa_node;
Dan Williams8fc5c732018-11-09 12:43:07 -08001029 nd_region->target_node = ndr_desc->target_node;
Dan Williams1b40e092015-05-01 13:34:01 -04001030 ida_init(&nd_region->ns_ida);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001031 ida_init(&nd_region->btt_ida);
Dan Williamse1455742015-07-30 17:57:47 -04001032 ida_init(&nd_region->pfn_ida);
Dan Williamscd034122016-03-11 10:15:36 -08001033 ida_init(&nd_region->dax_ida);
Dan Williams1f7df6f2015-06-09 20:13:14 -04001034 dev = &nd_region->dev;
1035 dev_set_name(dev, "region%d", nd_region->id);
1036 dev->parent = &nvdimm_bus->dev;
1037 dev->type = dev_type;
1038 dev->groups = ndr_desc->attr_groups;
Oliver O'Halloran1ff19f42018-04-06 15:21:13 +10001039 dev->of_node = ndr_desc->of_node;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001040 nd_region->ndr_size = resource_size(ndr_desc->res);
1041 nd_region->ndr_start = ndr_desc->res->start;
Pankaj Guptac5d43552019-07-05 19:33:22 +05301042 if (ndr_desc->flush)
1043 nd_region->flush = ndr_desc->flush;
1044 else
1045 nd_region->flush = NULL;
1046
Dan Williams1f7df6f2015-06-09 20:13:14 -04001047 nd_device_register(dev);
1048
1049 return nd_region;
Vishal Verma5212e112015-06-25 04:20:32 -04001050
1051 err_percpu:
Dan Williams33dd7072019-11-06 17:43:31 -08001052 memregion_free(nd_region->id);
Vishal Verma5212e112015-06-25 04:20:32 -04001053 err_id:
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001054 kfree(region_buf);
Vishal Verma5212e112015-06-25 04:20:32 -04001055 return NULL;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001056}
1057
1058struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
1059 struct nd_region_desc *ndr_desc)
1060{
Vishal Verma5212e112015-06-25 04:20:32 -04001061 ndr_desc->num_lanes = ND_MAX_LANES;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001062 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
1063 __func__);
1064}
1065EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
1066
1067struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
1068 struct nd_region_desc *ndr_desc)
1069{
1070 if (ndr_desc->num_mappings > 1)
1071 return NULL;
Vishal Verma5212e112015-06-25 04:20:32 -04001072 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
Dan Williams1f7df6f2015-06-09 20:13:14 -04001073 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
1074 __func__);
1075}
1076EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
1077
1078struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
1079 struct nd_region_desc *ndr_desc)
1080{
Vishal Verma5212e112015-06-25 04:20:32 -04001081 ndr_desc->num_lanes = ND_MAX_LANES;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001082 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
1083 __func__);
1084}
1085EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
Dan Williamsb354aba2016-05-17 20:24:16 -07001086
Pankaj Guptac5d43552019-07-05 19:33:22 +05301087int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
1088{
1089 int rc = 0;
1090
1091 if (!nd_region->flush)
1092 rc = generic_nvdimm_flush(nd_region);
1093 else {
1094 if (nd_region->flush(nd_region, bio))
1095 rc = -EIO;
1096 }
1097
1098 return rc;
1099}
Dan Williamsf284a4f2016-07-07 19:44:50 -07001100/**
1101 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
1102 * @nd_region: blk or interleaved pmem region
1103 */
Pankaj Guptac5d43552019-07-05 19:33:22 +05301104int generic_nvdimm_flush(struct nd_region *nd_region)
Dan Williamsf284a4f2016-07-07 19:44:50 -07001105{
1106 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
Dan Williams0c27af62016-05-27 09:23:01 -07001107 int i, idx;
1108
1109 /*
1110 * Try to encourage some diversity in flush hint addresses
1111 * across cpus assuming a limited number of flush hints.
1112 */
1113 idx = this_cpu_read(flush_idx);
1114 idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
Dan Williamsf284a4f2016-07-07 19:44:50 -07001115
1116 /*
1117 * The first wmb() is needed to 'sfence' all previous writes
1118 * such that they are architecturally visible for the platform
1119 * buffer flush. Note that we've already arranged for pmem
Dan Williams0aed55a2017-05-29 12:22:50 -07001120 * writes to avoid the cache via memcpy_flushcache(). The final
1121 * wmb() ensures ordering for the NVDIMM flush write.
Dan Williamsf284a4f2016-07-07 19:44:50 -07001122 */
1123 wmb();
1124 for (i = 0; i < nd_region->ndr_mappings; i++)
Dan Williams595c7302016-09-23 17:53:52 -07001125 if (ndrd_get_flush_wpq(ndrd, i, 0))
1126 writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
Dan Williamsf284a4f2016-07-07 19:44:50 -07001127 wmb();
Pankaj Guptac5d43552019-07-05 19:33:22 +05301128
1129 return 0;
Dan Williamsf284a4f2016-07-07 19:44:50 -07001130}
1131EXPORT_SYMBOL_GPL(nvdimm_flush);
1132
1133/**
1134 * nvdimm_has_flush - determine write flushing requirements
1135 * @nd_region: blk or interleaved pmem region
1136 *
1137 * Returns 1 if writes require flushing
1138 * Returns 0 if writes do not require flushing
1139 * Returns -ENXIO if flushing capability can not be determined
1140 */
1141int nvdimm_has_flush(struct nd_region *nd_region)
1142{
Dan Williamsf284a4f2016-07-07 19:44:50 -07001143 int i;
1144
Dan Williamsc00b3962017-05-29 23:11:57 -07001145 /* no nvdimm or pmem api == flushing capability unknown */
1146 if (nd_region->ndr_mappings == 0
1147 || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
Dan Williamsf284a4f2016-07-07 19:44:50 -07001148 return -ENXIO;
1149
Dan Williamsbc042fd2017-04-24 15:43:05 -07001150 for (i = 0; i < nd_region->ndr_mappings; i++) {
1151 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1152 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1153
1154 /* flush hints present / available */
1155 if (nvdimm->num_flush)
Dan Williamsf284a4f2016-07-07 19:44:50 -07001156 return 1;
Dan Williamsbc042fd2017-04-24 15:43:05 -07001157 }
Dan Williamsf284a4f2016-07-07 19:44:50 -07001158
1159 /*
1160 * The platform defines dimm devices without hints, assume
1161 * platform persistence mechanism like ADR
1162 */
1163 return 0;
1164}
1165EXPORT_SYMBOL_GPL(nvdimm_has_flush);
1166
Dan Williams0b277962017-06-09 09:46:50 -07001167int nvdimm_has_cache(struct nd_region *nd_region)
1168{
Ross Zwisler546eb032018-06-06 10:45:15 -06001169 return is_nd_pmem(&nd_region->dev) &&
1170 !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
Dan Williams0b277962017-06-09 09:46:50 -07001171}
1172EXPORT_SYMBOL_GPL(nvdimm_has_cache);
1173
Pankaj Guptafefc1d92019-07-05 19:33:24 +05301174bool is_nvdimm_sync(struct nd_region *nd_region)
1175{
Aneesh Kumar K.V4c806b82019-09-24 17:13:27 +05301176 if (is_nd_volatile(&nd_region->dev))
1177 return true;
1178
Pankaj Guptafefc1d92019-07-05 19:33:24 +05301179 return is_nd_pmem(&nd_region->dev) &&
1180 !test_bit(ND_REGION_ASYNC, &nd_region->flags);
1181}
1182EXPORT_SYMBOL_GPL(is_nvdimm_sync);
1183
Dan Williamsae86cbf2018-11-24 10:47:04 -08001184struct conflict_context {
1185 struct nd_region *nd_region;
1186 resource_size_t start, size;
1187};
1188
1189static int region_conflict(struct device *dev, void *data)
1190{
1191 struct nd_region *nd_region;
1192 struct conflict_context *ctx = data;
1193 resource_size_t res_end, region_end, region_start;
1194
1195 if (!is_memory(dev))
1196 return 0;
1197
1198 nd_region = to_nd_region(dev);
1199 if (nd_region == ctx->nd_region)
1200 return 0;
1201
1202 res_end = ctx->start + ctx->size;
1203 region_start = nd_region->ndr_start;
1204 region_end = region_start + nd_region->ndr_size;
1205 if (ctx->start >= region_start && ctx->start < region_end)
1206 return -EBUSY;
1207 if (res_end > region_start && res_end <= region_end)
1208 return -EBUSY;
1209 return 0;
1210}
1211
1212int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
1213 resource_size_t size)
1214{
1215 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
1216 struct conflict_context ctx = {
1217 .nd_region = nd_region,
1218 .start = start,
1219 .size = size,
1220 };
1221
1222 return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
1223}