blob: 235f2089fab22ddbf537909778f55af18af0a321 [file] [log] [blame]
Dan Williams4a826c82015-06-09 16:09:36 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/device.h>
14#include <linux/ndctl.h>
Dan Williamsb3fde742017-06-04 10:18:39 +090015#include <linux/uuid.h>
Dan Williamsf524bf22015-05-30 12:36:02 -040016#include <linux/slab.h>
Dan Williams4a826c82015-06-09 16:09:36 -040017#include <linux/io.h>
18#include <linux/nd.h>
19#include "nd-core.h"
20#include "label.h"
21#include "nd.h"
22
Dan Williamsb3fde742017-06-04 10:18:39 +090023static guid_t nvdimm_btt_guid;
24static guid_t nvdimm_pfn_guid;
25static guid_t nvdimm_dax_guid;
26
Dan Williams4a826c82015-06-09 16:09:36 -040027static u32 best_seq(u32 a, u32 b)
28{
29 a &= NSINDEX_SEQ_MASK;
30 b &= NSINDEX_SEQ_MASK;
31
32 if (a == 0 || a == b)
33 return b;
34 else if (b == 0)
35 return a;
36 else if (nd_inc_seq(a) == b)
37 return b;
38 else
39 return a;
40}
41
Dan Williams564e8712017-06-03 18:30:43 +090042unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
43{
44 return ndd->nslabel_size;
45}
46
Dan Williams4a826c82015-06-09 16:09:36 -040047size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
48{
49 u32 index_span;
50
51 if (ndd->nsindex_size)
52 return ndd->nsindex_size;
53
54 /*
55 * The minimum index space is 512 bytes, with that amount of
56 * index we can describe ~1400 labels which is less than a byte
57 * of overhead per label. Round up to a byte of overhead per
58 * label and determine the size of the index region. Yes, this
59 * starts to waste space at larger config_sizes, but it's
60 * unlikely we'll ever see anything but 128K.
61 */
Dan Williams564e8712017-06-03 18:30:43 +090062 index_span = ndd->nsarea.config_size / (sizeof_namespace_label(ndd) + 1);
Dan Williams4a826c82015-06-09 16:09:36 -040063 index_span /= NSINDEX_ALIGN * 2;
64 ndd->nsindex_size = index_span * NSINDEX_ALIGN;
65
66 return ndd->nsindex_size;
67}
68
Dan Williams0ba1c632015-05-30 12:35:36 -040069int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
Dan Williamsf524bf22015-05-30 12:36:02 -040070{
Dan Williams564e8712017-06-03 18:30:43 +090071 return ndd->nsarea.config_size / (sizeof_namespace_label(ndd) + 1);
Dan Williamsf524bf22015-05-30 12:36:02 -040072}
73
Dan Williams564e8712017-06-03 18:30:43 +090074static int __nd_label_validate(struct nvdimm_drvdata *ndd)
Dan Williams4a826c82015-06-09 16:09:36 -040075{
76 /*
77 * On media label format consists of two index blocks followed
78 * by an array of labels. None of these structures are ever
79 * updated in place. A sequence number tracks the current
80 * active index and the next one to write, while labels are
81 * written to free slots.
82 *
83 * +------------+
84 * | |
85 * | nsindex0 |
86 * | |
87 * +------------+
88 * | |
89 * | nsindex1 |
90 * | |
91 * +------------+
92 * | label0 |
93 * +------------+
94 * | label1 |
95 * +------------+
96 * | |
97 * ....nslot...
98 * | |
99 * +------------+
100 * | labelN |
101 * +------------+
102 */
103 struct nd_namespace_index *nsindex[] = {
104 to_namespace_index(ndd, 0),
105 to_namespace_index(ndd, 1),
106 };
107 const int num_index = ARRAY_SIZE(nsindex);
108 struct device *dev = ndd->dev;
109 bool valid[2] = { 0 };
110 int i, num_valid = 0;
111 u32 seq;
112
113 for (i = 0; i < num_index; i++) {
114 u32 nslot;
115 u8 sig[NSINDEX_SIG_LEN];
116 u64 sum_save, sum, size;
Dan Williams564e8712017-06-03 18:30:43 +0900117 unsigned int version, labelsize;
Dan Williams4a826c82015-06-09 16:09:36 -0400118
119 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
120 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
121 dev_dbg(dev, "%s: nsindex%d signature invalid\n",
122 __func__, i);
123 continue;
124 }
Dan Williams564e8712017-06-03 18:30:43 +0900125
126 /* label sizes larger than 128 arrived with v1.2 */
127 version = __le16_to_cpu(nsindex[i]->major) * 100
128 + __le16_to_cpu(nsindex[i]->minor);
129 if (version >= 102)
130 labelsize = 1 << (7 + nsindex[i]->labelsize);
131 else
132 labelsize = 128;
133
134 if (labelsize != sizeof_namespace_label(ndd)) {
135 dev_dbg(dev, "%s: nsindex%d labelsize %d invalid\n",
136 __func__, i, nsindex[i]->labelsize);
137 continue;
138 }
139
Dan Williams4a826c82015-06-09 16:09:36 -0400140 sum_save = __le64_to_cpu(nsindex[i]->checksum);
141 nsindex[i]->checksum = __cpu_to_le64(0);
142 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
143 nsindex[i]->checksum = __cpu_to_le64(sum_save);
144 if (sum != sum_save) {
145 dev_dbg(dev, "%s: nsindex%d checksum invalid\n",
146 __func__, i);
147 continue;
148 }
149
150 seq = __le32_to_cpu(nsindex[i]->seq);
151 if ((seq & NSINDEX_SEQ_MASK) == 0) {
152 dev_dbg(dev, "%s: nsindex%d sequence: %#x invalid\n",
153 __func__, i, seq);
154 continue;
155 }
156
157 /* sanity check the index against expected values */
158 if (__le64_to_cpu(nsindex[i]->myoff)
159 != i * sizeof_namespace_index(ndd)) {
160 dev_dbg(dev, "%s: nsindex%d myoff: %#llx invalid\n",
161 __func__, i, (unsigned long long)
162 __le64_to_cpu(nsindex[i]->myoff));
163 continue;
164 }
165 if (__le64_to_cpu(nsindex[i]->otheroff)
166 != (!i) * sizeof_namespace_index(ndd)) {
167 dev_dbg(dev, "%s: nsindex%d otheroff: %#llx invalid\n",
168 __func__, i, (unsigned long long)
169 __le64_to_cpu(nsindex[i]->otheroff));
170 continue;
171 }
172
173 size = __le64_to_cpu(nsindex[i]->mysize);
174 if (size > sizeof_namespace_index(ndd)
175 || size < sizeof(struct nd_namespace_index)) {
176 dev_dbg(dev, "%s: nsindex%d mysize: %#llx invalid\n",
177 __func__, i, size);
178 continue;
179 }
180
181 nslot = __le32_to_cpu(nsindex[i]->nslot);
Dan Williams564e8712017-06-03 18:30:43 +0900182 if (nslot * sizeof_namespace_label(ndd)
Dan Williams4a826c82015-06-09 16:09:36 -0400183 + 2 * sizeof_namespace_index(ndd)
184 > ndd->nsarea.config_size) {
185 dev_dbg(dev, "%s: nsindex%d nslot: %u invalid, config_size: %#x\n",
186 __func__, i, nslot,
187 ndd->nsarea.config_size);
188 continue;
189 }
190 valid[i] = true;
191 num_valid++;
192 }
193
194 switch (num_valid) {
195 case 0:
196 break;
197 case 1:
198 for (i = 0; i < num_index; i++)
199 if (valid[i])
200 return i;
201 /* can't have num_valid > 0 but valid[] = { false, false } */
202 WARN_ON(1);
203 break;
204 default:
205 /* pick the best index... */
206 seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
207 __le32_to_cpu(nsindex[1]->seq));
208 if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
209 return 1;
210 else
211 return 0;
212 break;
213 }
214
215 return -1;
216}
217
Dan Williams564e8712017-06-03 18:30:43 +0900218int nd_label_validate(struct nvdimm_drvdata *ndd)
219{
220 /*
221 * In order to probe for and validate namespace index blocks we
222 * need to know the size of the labels, and we can't trust the
223 * size of the labels until we validate the index blocks.
224 * Resolve this dependency loop by probing for known label
Dan Williams8990cdf2017-06-07 10:19:46 -0700225 * sizes, but default to v1.2 256-byte namespace labels if
226 * discovery fails.
Dan Williams564e8712017-06-03 18:30:43 +0900227 */
Dan Williams8990cdf2017-06-07 10:19:46 -0700228 int label_size[] = { 128, 256 };
Dan Williams564e8712017-06-03 18:30:43 +0900229 int i, rc;
230
231 for (i = 0; i < ARRAY_SIZE(label_size); i++) {
232 ndd->nslabel_size = label_size[i];
233 rc = __nd_label_validate(ndd);
234 if (rc >= 0)
235 return rc;
236 }
237
238 return -1;
239}
240
Dan Williams4a826c82015-06-09 16:09:36 -0400241void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst,
242 struct nd_namespace_index *src)
243{
244 if (dst && src)
245 /* pass */;
246 else
247 return;
248
249 memcpy(dst, src, sizeof_namespace_index(ndd));
250}
251
252static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
253{
254 void *base = to_namespace_index(ndd, 0);
255
256 return base + 2 * sizeof_namespace_index(ndd);
257}
258
Dan Williamsf524bf22015-05-30 12:36:02 -0400259static int to_slot(struct nvdimm_drvdata *ndd,
260 struct nd_namespace_label *nd_label)
261{
Dan Williams564e8712017-06-03 18:30:43 +0900262 unsigned long label, base;
263
264 label = (unsigned long) nd_label;
265 base = (unsigned long) nd_label_base(ndd);
266
267 return (label - base) / sizeof_namespace_label(ndd);
268}
269
270static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
271{
272 unsigned long label, base;
273
274 base = (unsigned long) nd_label_base(ndd);
275 label = base + sizeof_namespace_label(ndd) * slot;
276
277 return (struct nd_namespace_label *) label;
Dan Williamsf524bf22015-05-30 12:36:02 -0400278}
279
Dan Williams4a826c82015-06-09 16:09:36 -0400280#define for_each_clear_bit_le(bit, addr, size) \
281 for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
282 (bit) < (size); \
283 (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
284
285/**
Dan Williamsf524bf22015-05-30 12:36:02 -0400286 * preamble_index - common variable initialization for nd_label_* routines
Dan Williams4a826c82015-06-09 16:09:36 -0400287 * @ndd: dimm container for the relevant label set
Dan Williamsf524bf22015-05-30 12:36:02 -0400288 * @idx: namespace_index index
Dan Williams4a826c82015-06-09 16:09:36 -0400289 * @nsindex_out: on return set to the currently active namespace index
290 * @free: on return set to the free label bitmap in the index
291 * @nslot: on return set to the number of slots in the label space
292 */
Dan Williamsf524bf22015-05-30 12:36:02 -0400293static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
Dan Williams4a826c82015-06-09 16:09:36 -0400294 struct nd_namespace_index **nsindex_out,
295 unsigned long **free, u32 *nslot)
296{
297 struct nd_namespace_index *nsindex;
298
Dan Williamsf524bf22015-05-30 12:36:02 -0400299 nsindex = to_namespace_index(ndd, idx);
Dan Williams4a826c82015-06-09 16:09:36 -0400300 if (nsindex == NULL)
301 return false;
302
303 *free = (unsigned long *) nsindex->free;
304 *nslot = __le32_to_cpu(nsindex->nslot);
305 *nsindex_out = nsindex;
306
307 return true;
308}
309
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400310char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
Dan Williams4a826c82015-06-09 16:09:36 -0400311{
312 if (!label_id || !uuid)
313 return NULL;
314 snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
315 flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
316 return label_id->id;
317}
318
Dan Williamsf524bf22015-05-30 12:36:02 -0400319static bool preamble_current(struct nvdimm_drvdata *ndd,
320 struct nd_namespace_index **nsindex,
321 unsigned long **free, u32 *nslot)
322{
323 return preamble_index(ndd, ndd->ns_current, nsindex,
324 free, nslot);
325}
326
327static bool preamble_next(struct nvdimm_drvdata *ndd,
328 struct nd_namespace_index **nsindex,
329 unsigned long **free, u32 *nslot)
330{
331 return preamble_index(ndd, ndd->ns_next, nsindex,
332 free, nslot);
333}
334
Dan Williams355d8382017-06-06 14:56:43 -0700335static bool slot_valid(struct nvdimm_drvdata *ndd,
336 struct nd_namespace_label *nd_label, u32 slot)
Dan Williams4a826c82015-06-09 16:09:36 -0400337{
338 /* check that we are written where we expect to be written */
339 if (slot != __le32_to_cpu(nd_label->slot))
340 return false;
341
342 /* check that DPA allocations are page aligned */
343 if ((__le64_to_cpu(nd_label->dpa)
344 | __le64_to_cpu(nd_label->rawsize)) % SZ_4K)
345 return false;
346
Dan Williams355d8382017-06-06 14:56:43 -0700347 /* check checksum */
348 if (namespace_label_has(ndd, checksum)) {
349 u64 sum, sum_save;
350
351 sum_save = __le64_to_cpu(nd_label->checksum);
352 nd_label->checksum = __cpu_to_le64(0);
353 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
354 nd_label->checksum = __cpu_to_le64(sum_save);
355 if (sum != sum_save) {
356 dev_dbg(ndd->dev, "%s fail checksum. slot: %d expect: %#llx\n",
357 __func__, slot, sum);
358 return false;
359 }
360 }
361
Dan Williams4a826c82015-06-09 16:09:36 -0400362 return true;
363}
364
365int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
366{
367 struct nd_namespace_index *nsindex;
368 unsigned long *free;
369 u32 nslot, slot;
370
371 if (!preamble_current(ndd, &nsindex, &free, &nslot))
372 return 0; /* no label, nothing to reserve */
373
374 for_each_clear_bit_le(slot, free, nslot) {
375 struct nd_namespace_label *nd_label;
376 struct nd_region *nd_region = NULL;
377 u8 label_uuid[NSLABEL_UUID_LEN];
378 struct nd_label_id label_id;
379 struct resource *res;
380 u32 flags;
381
Dan Williams564e8712017-06-03 18:30:43 +0900382 nd_label = to_label(ndd, slot);
Dan Williams4a826c82015-06-09 16:09:36 -0400383
Dan Williams355d8382017-06-06 14:56:43 -0700384 if (!slot_valid(ndd, nd_label, slot))
Dan Williams4a826c82015-06-09 16:09:36 -0400385 continue;
386
387 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
388 flags = __le32_to_cpu(nd_label->flags);
389 nd_label_gen_id(&label_id, label_uuid, flags);
390 res = nvdimm_allocate_dpa(ndd, &label_id,
391 __le64_to_cpu(nd_label->dpa),
392 __le64_to_cpu(nd_label->rawsize));
393 nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
394 if (!res)
395 return -EBUSY;
396 }
397
398 return 0;
399}
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400400
401int nd_label_active_count(struct nvdimm_drvdata *ndd)
402{
403 struct nd_namespace_index *nsindex;
404 unsigned long *free;
405 u32 nslot, slot;
406 int count = 0;
407
408 if (!preamble_current(ndd, &nsindex, &free, &nslot))
409 return 0;
410
411 for_each_clear_bit_le(slot, free, nslot) {
412 struct nd_namespace_label *nd_label;
413
Dan Williams564e8712017-06-03 18:30:43 +0900414 nd_label = to_label(ndd, slot);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400415
Dan Williams355d8382017-06-06 14:56:43 -0700416 if (!slot_valid(ndd, nd_label, slot)) {
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400417 u32 label_slot = __le32_to_cpu(nd_label->slot);
418 u64 size = __le64_to_cpu(nd_label->rawsize);
419 u64 dpa = __le64_to_cpu(nd_label->dpa);
420
421 dev_dbg(ndd->dev,
422 "%s: slot%d invalid slot: %d dpa: %llx size: %llx\n",
423 __func__, slot, label_slot, dpa, size);
424 continue;
425 }
426 count++;
427 }
428 return count;
429}
430
431struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
432{
433 struct nd_namespace_index *nsindex;
434 unsigned long *free;
435 u32 nslot, slot;
436
437 if (!preamble_current(ndd, &nsindex, &free, &nslot))
438 return NULL;
439
440 for_each_clear_bit_le(slot, free, nslot) {
441 struct nd_namespace_label *nd_label;
442
Dan Williams564e8712017-06-03 18:30:43 +0900443 nd_label = to_label(ndd, slot);
Dan Williams355d8382017-06-06 14:56:43 -0700444 if (!slot_valid(ndd, nd_label, slot))
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400445 continue;
446
447 if (n-- == 0)
Dan Williams564e8712017-06-03 18:30:43 +0900448 return to_label(ndd, slot);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400449 }
450
451 return NULL;
452}
Dan Williamsf524bf22015-05-30 12:36:02 -0400453
Dan Williams0ba1c632015-05-30 12:35:36 -0400454u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
Dan Williamsf524bf22015-05-30 12:36:02 -0400455{
456 struct nd_namespace_index *nsindex;
457 unsigned long *free;
458 u32 nslot, slot;
459
460 if (!preamble_next(ndd, &nsindex, &free, &nslot))
461 return UINT_MAX;
462
463 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
464
465 slot = find_next_bit_le(free, nslot, 0);
466 if (slot == nslot)
467 return UINT_MAX;
468
469 clear_bit_le(slot, free);
470
471 return slot;
472}
473
Dan Williams0ba1c632015-05-30 12:35:36 -0400474bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
Dan Williamsf524bf22015-05-30 12:36:02 -0400475{
476 struct nd_namespace_index *nsindex;
477 unsigned long *free;
478 u32 nslot;
479
480 if (!preamble_next(ndd, &nsindex, &free, &nslot))
481 return false;
482
483 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
484
485 if (slot < nslot)
486 return !test_and_set_bit_le(slot, free);
487 return false;
488}
489
490u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
491{
492 struct nd_namespace_index *nsindex;
493 unsigned long *free;
494 u32 nslot;
495
496 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
497
498 if (!preamble_next(ndd, &nsindex, &free, &nslot))
Dan Williams0ba1c632015-05-30 12:35:36 -0400499 return nvdimm_num_label_slots(ndd);
Dan Williamsf524bf22015-05-30 12:36:02 -0400500
501 return bitmap_weight(free, nslot);
502}
503
504static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
505 unsigned long flags)
506{
507 struct nd_namespace_index *nsindex;
508 unsigned long offset;
509 u64 checksum;
510 u32 nslot;
511 int rc;
512
513 nsindex = to_namespace_index(ndd, index);
514 if (flags & ND_NSINDEX_INIT)
515 nslot = nvdimm_num_label_slots(ndd);
516 else
517 nslot = __le32_to_cpu(nsindex->nslot);
518
519 memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
Dan Williams564e8712017-06-03 18:30:43 +0900520 memset(&nsindex->flags, 0, 3);
521 nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
Dan Williamsf524bf22015-05-30 12:36:02 -0400522 nsindex->seq = __cpu_to_le32(seq);
523 offset = (unsigned long) nsindex
524 - (unsigned long) to_namespace_index(ndd, 0);
525 nsindex->myoff = __cpu_to_le64(offset);
526 nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
527 offset = (unsigned long) to_namespace_index(ndd,
528 nd_label_next_nsindex(index))
529 - (unsigned long) to_namespace_index(ndd, 0);
530 nsindex->otheroff = __cpu_to_le64(offset);
531 offset = (unsigned long) nd_label_base(ndd)
532 - (unsigned long) to_namespace_index(ndd, 0);
533 nsindex->labeloff = __cpu_to_le64(offset);
534 nsindex->nslot = __cpu_to_le32(nslot);
535 nsindex->major = __cpu_to_le16(1);
Dan Williams8990cdf2017-06-07 10:19:46 -0700536 if (sizeof_namespace_label(ndd) < 256)
537 nsindex->minor = __cpu_to_le16(1);
538 else
539 nsindex->minor = __cpu_to_le16(2);
Dan Williamsf524bf22015-05-30 12:36:02 -0400540 nsindex->checksum = __cpu_to_le64(0);
541 if (flags & ND_NSINDEX_INIT) {
542 unsigned long *free = (unsigned long *) nsindex->free;
543 u32 nfree = ALIGN(nslot, BITS_PER_LONG);
544 int last_bits, i;
545
546 memset(nsindex->free, 0xff, nfree / 8);
547 for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
548 clear_bit_le(nslot + i, free);
549 }
550 checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
551 nsindex->checksum = __cpu_to_le64(checksum);
552 rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
553 nsindex, sizeof_namespace_index(ndd));
554 if (rc < 0)
555 return rc;
556
557 if (flags & ND_NSINDEX_INIT)
558 return 0;
559
560 /* copy the index we just wrote to the new 'next' */
561 WARN_ON(index != ndd->ns_next);
562 nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
563 ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
564 ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
565 WARN_ON(ndd->ns_current == ndd->ns_next);
566
567 return 0;
568}
569
570static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
571 struct nd_namespace_label *nd_label)
572{
573 return (unsigned long) nd_label
574 - (unsigned long) to_namespace_index(ndd, 0);
575}
576
Dan Williamsb3fde742017-06-04 10:18:39 +0900577enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
578{
579 if (guid_equal(guid, &nvdimm_btt_guid))
580 return NVDIMM_CCLASS_BTT;
581 else if (guid_equal(guid, &nvdimm_pfn_guid))
582 return NVDIMM_CCLASS_PFN;
583 else if (guid_equal(guid, &nvdimm_dax_guid))
584 return NVDIMM_CCLASS_DAX;
585 else if (guid_equal(guid, &guid_null))
586 return NVDIMM_CCLASS_NONE;
587
588 return NVDIMM_CCLASS_UNKNOWN;
589}
590
591static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
592 guid_t *target)
593{
594 if (claim_class == NVDIMM_CCLASS_BTT)
595 return &nvdimm_btt_guid;
596 else if (claim_class == NVDIMM_CCLASS_PFN)
597 return &nvdimm_pfn_guid;
598 else if (claim_class == NVDIMM_CCLASS_DAX)
599 return &nvdimm_dax_guid;
600 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
601 /*
602 * If we're modifying a namespace for which we don't
603 * know the claim_class, don't touch the existing guid.
604 */
605 return target;
606 } else
607 return &guid_null;
608}
609
Dan Williamsf524bf22015-05-30 12:36:02 -0400610static int __pmem_label_update(struct nd_region *nd_region,
611 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
612 int pos)
613{
Dan Williamsb3fde742017-06-04 10:18:39 +0900614 struct nd_namespace_common *ndns = &nspm->nsio.common;
Dan Williamsfaec6f82017-06-06 11:10:51 -0700615 struct nd_interleave_set *nd_set = nd_region->nd_set;
Dan Williamsf524bf22015-05-30 12:36:02 -0400616 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
Dan Williams16660ea2016-10-05 21:13:23 -0700617 struct nd_label_ent *label_ent, *victim = NULL;
Dan Williamsf524bf22015-05-30 12:36:02 -0400618 struct nd_namespace_label *nd_label;
619 struct nd_namespace_index *nsindex;
Dan Williams16660ea2016-10-05 21:13:23 -0700620 struct nd_label_id label_id;
621 struct resource *res;
Dan Williamsf524bf22015-05-30 12:36:02 -0400622 unsigned long *free;
623 u32 nslot, slot;
624 size_t offset;
Dan Williamsc12c48c2017-06-04 10:59:15 +0900625 u64 cookie;
Dan Williamsf524bf22015-05-30 12:36:02 -0400626 int rc;
627
628 if (!preamble_next(ndd, &nsindex, &free, &nslot))
629 return -ENXIO;
630
Dan Williamsc12c48c2017-06-04 10:59:15 +0900631 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
Dan Williams16660ea2016-10-05 21:13:23 -0700632 nd_label_gen_id(&label_id, nspm->uuid, 0);
633 for_each_dpa_resource(ndd, res)
634 if (strcmp(res->name, label_id.id) == 0)
635 break;
636
637 if (!res) {
638 WARN_ON_ONCE(1);
639 return -ENXIO;
640 }
641
Dan Williamsf524bf22015-05-30 12:36:02 -0400642 /* allocate and write the label to the staging (next) index */
643 slot = nd_label_alloc_slot(ndd);
644 if (slot == UINT_MAX)
645 return -ENXIO;
646 dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
647
Dan Williams564e8712017-06-03 18:30:43 +0900648 nd_label = to_label(ndd, slot);
649 memset(nd_label, 0, sizeof_namespace_label(ndd));
Dan Williamsf524bf22015-05-30 12:36:02 -0400650 memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
651 if (nspm->alt_name)
652 memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
653 nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
654 nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
655 nd_label->position = __cpu_to_le16(pos);
656 nd_label->isetcookie = __cpu_to_le64(cookie);
Dan Williams16660ea2016-10-05 21:13:23 -0700657 nd_label->rawsize = __cpu_to_le64(resource_size(res));
658 nd_label->dpa = __cpu_to_le64(res->start);
Dan Williamsf524bf22015-05-30 12:36:02 -0400659 nd_label->slot = __cpu_to_le32(slot);
Dan Williamsfaec6f82017-06-06 11:10:51 -0700660 if (namespace_label_has(ndd, type_guid))
661 guid_copy(&nd_label->type_guid, &nd_set->type_guid);
Dan Williamsb3fde742017-06-04 10:18:39 +0900662 if (namespace_label_has(ndd, abstraction_guid))
663 guid_copy(&nd_label->abstraction_guid,
664 to_abstraction_guid(ndns->claim_class,
665 &nd_label->abstraction_guid));
Dan Williams355d8382017-06-06 14:56:43 -0700666 if (namespace_label_has(ndd, checksum)) {
667 u64 sum;
668
669 nd_label->checksum = __cpu_to_le64(0);
670 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
671 nd_label->checksum = __cpu_to_le64(sum);
672 }
Dan Williams16660ea2016-10-05 21:13:23 -0700673 nd_dbg_dpa(nd_region, ndd, res, "%s\n", __func__);
Dan Williamsf524bf22015-05-30 12:36:02 -0400674
675 /* update label */
676 offset = nd_label_offset(ndd, nd_label);
677 rc = nvdimm_set_config_data(ndd, offset, nd_label,
Dan Williams564e8712017-06-03 18:30:43 +0900678 sizeof_namespace_label(ndd));
Dan Williamsf524bf22015-05-30 12:36:02 -0400679 if (rc < 0)
680 return rc;
681
682 /* Garbage collect the previous label */
Dan Williamsae8219f2016-09-19 16:04:21 -0700683 mutex_lock(&nd_mapping->lock);
Dan Williams16660ea2016-10-05 21:13:23 -0700684 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
685 if (!label_ent->label)
686 continue;
687 if (memcmp(nspm->uuid, label_ent->label->uuid,
688 NSLABEL_UUID_LEN) != 0)
689 continue;
690 victim = label_ent;
691 list_move_tail(&victim->list, &nd_mapping->labels);
692 break;
693 }
694 if (victim) {
Dan Williamsf524bf22015-05-30 12:36:02 -0400695 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
Dan Williams16660ea2016-10-05 21:13:23 -0700696 slot = to_slot(ndd, victim->label);
697 nd_label_free_slot(ndd, slot);
698 victim->label = NULL;
Dan Williamsf524bf22015-05-30 12:36:02 -0400699 }
700
701 /* update index */
702 rc = nd_label_write_index(ndd, ndd->ns_next,
703 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
Dan Williams16660ea2016-10-05 21:13:23 -0700704 if (rc == 0) {
705 list_for_each_entry(label_ent, &nd_mapping->labels, list)
706 if (!label_ent->label) {
707 label_ent->label = nd_label;
708 nd_label = NULL;
709 break;
710 }
711 dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
712 "failed to track label: %d\n",
713 to_slot(ndd, nd_label));
714 if (nd_label)
715 rc = -ENXIO;
716 }
Dan Williamsae8219f2016-09-19 16:04:21 -0700717 mutex_unlock(&nd_mapping->lock);
Dan Williamsf524bf22015-05-30 12:36:02 -0400718
Dan Williamsae8219f2016-09-19 16:04:21 -0700719 return rc;
Dan Williams0ba1c632015-05-30 12:35:36 -0400720}
721
722static bool is_old_resource(struct resource *res, struct resource **list, int n)
Dan Williamsf524bf22015-05-30 12:36:02 -0400723{
724 int i;
Dan Williams0ba1c632015-05-30 12:35:36 -0400725
726 if (res->flags & DPA_RESOURCE_ADJUSTED)
727 return false;
728 for (i = 0; i < n; i++)
729 if (res == list[i])
730 return true;
731 return false;
732}
733
734static struct resource *to_resource(struct nvdimm_drvdata *ndd,
735 struct nd_namespace_label *nd_label)
736{
737 struct resource *res;
738
739 for_each_dpa_resource(ndd, res) {
740 if (res->start != __le64_to_cpu(nd_label->dpa))
741 continue;
742 if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
743 continue;
744 return res;
745 }
746
747 return NULL;
748}
749
750/*
751 * 1/ Account all the labels that can be freed after this update
752 * 2/ Allocate and write the label to the staging (next) index
753 * 3/ Record the resources in the namespace device
754 */
755static int __blk_label_update(struct nd_region *nd_region,
756 struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
757 int num_labels)
758{
Dan Williamsae8219f2016-09-19 16:04:21 -0700759 int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
Dan Williamsfaec6f82017-06-06 11:10:51 -0700760 struct nd_interleave_set *nd_set = nd_region->nd_set;
Dan Williamsb3fde742017-06-04 10:18:39 +0900761 struct nd_namespace_common *ndns = &nsblk->common;
Dan Williamsf524bf22015-05-30 12:36:02 -0400762 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
Dan Williams0ba1c632015-05-30 12:35:36 -0400763 struct nd_namespace_label *nd_label;
Dan Williamsae8219f2016-09-19 16:04:21 -0700764 struct nd_label_ent *label_ent, *e;
Dan Williams0ba1c632015-05-30 12:35:36 -0400765 struct nd_namespace_index *nsindex;
766 unsigned long *free, *victim_map = NULL;
767 struct resource *res, **old_res_list;
768 struct nd_label_id label_id;
769 u8 uuid[NSLABEL_UUID_LEN];
Dan Williams3934d842017-06-06 14:59:04 -0700770 int min_dpa_idx = 0;
Dan Williamsae8219f2016-09-19 16:04:21 -0700771 LIST_HEAD(list);
Dan Williams0ba1c632015-05-30 12:35:36 -0400772 u32 nslot, slot;
Dan Williamsf524bf22015-05-30 12:36:02 -0400773
Dan Williams0ba1c632015-05-30 12:35:36 -0400774 if (!preamble_next(ndd, &nsindex, &free, &nslot))
775 return -ENXIO;
Dan Williamsf524bf22015-05-30 12:36:02 -0400776
Dan Williams0ba1c632015-05-30 12:35:36 -0400777 old_res_list = nsblk->res;
778 nfree = nd_label_nfree(ndd);
779 old_num_resources = nsblk->num_resources;
780 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
781
782 /*
783 * We need to loop over the old resources a few times, which seems a
784 * bit inefficient, but we need to know that we have the label
785 * space before we start mutating the tracking structures.
786 * Otherwise the recovery method of last resort for userspace is
787 * disable and re-enable the parent region.
788 */
789 alloc = 0;
790 for_each_dpa_resource(ndd, res) {
791 if (strcmp(res->name, label_id.id) != 0)
792 continue;
793 if (!is_old_resource(res, old_res_list, old_num_resources))
794 alloc++;
795 }
796
797 victims = 0;
798 if (old_num_resources) {
799 /* convert old local-label-map to dimm-slot victim-map */
800 victim_map = kcalloc(BITS_TO_LONGS(nslot), sizeof(long),
801 GFP_KERNEL);
802 if (!victim_map)
803 return -ENOMEM;
804
805 /* mark unused labels for garbage collection */
806 for_each_clear_bit_le(slot, free, nslot) {
Dan Williams564e8712017-06-03 18:30:43 +0900807 nd_label = to_label(ndd, slot);
Dan Williams0ba1c632015-05-30 12:35:36 -0400808 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
809 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
810 continue;
811 res = to_resource(ndd, nd_label);
812 if (res && is_old_resource(res, old_res_list,
813 old_num_resources))
814 continue;
815 slot = to_slot(ndd, nd_label);
816 set_bit(slot, victim_map);
817 victims++;
818 }
819 }
820
821 /* don't allow updates that consume the last label */
822 if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
Dan Williams8c2f7e82015-06-25 04:20:04 -0400823 dev_info(&nsblk->common.dev, "insufficient label space\n");
Dan Williams0ba1c632015-05-30 12:35:36 -0400824 kfree(victim_map);
825 return -ENOSPC;
826 }
827 /* from here on we need to abort on error */
828
829
830 /* assign all resources to the namespace before writing the labels */
831 nsblk->res = NULL;
832 nsblk->num_resources = 0;
833 for_each_dpa_resource(ndd, res) {
834 if (strcmp(res->name, label_id.id) != 0)
835 continue;
836 if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
837 rc = -ENOMEM;
838 goto abort;
839 }
840 }
841
Dan Williams3934d842017-06-06 14:59:04 -0700842 /*
843 * Find the resource associated with the first label in the set
844 * per the v1.2 namespace specification.
845 */
846 for (i = 0; i < nsblk->num_resources; i++) {
847 struct resource *min = nsblk->res[min_dpa_idx];
848
849 res = nsblk->res[i];
850 if (res->start < min->start)
851 min_dpa_idx = i;
852 }
853
Dan Williams0ba1c632015-05-30 12:35:36 -0400854 for (i = 0; i < nsblk->num_resources; i++) {
855 size_t offset;
856
857 res = nsblk->res[i];
858 if (is_old_resource(res, old_res_list, old_num_resources))
859 continue; /* carry-over */
860 slot = nd_label_alloc_slot(ndd);
861 if (slot == UINT_MAX)
862 goto abort;
863 dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
864
Dan Williams564e8712017-06-03 18:30:43 +0900865 nd_label = to_label(ndd, slot);
866 memset(nd_label, 0, sizeof_namespace_label(ndd));
Dan Williams0ba1c632015-05-30 12:35:36 -0400867 memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
868 if (nsblk->alt_name)
869 memcpy(nd_label->name, nsblk->alt_name,
870 NSLABEL_NAME_LEN);
871 nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
Dan Williams8f2bc242017-06-06 11:39:30 -0700872
873 /*
874 * Use the presence of the type_guid as a flag to
Dan Williams3934d842017-06-06 14:59:04 -0700875 * determine isetcookie usage and nlabel + position
876 * policy for blk-aperture namespaces.
Dan Williams8f2bc242017-06-06 11:39:30 -0700877 */
Dan Williams3934d842017-06-06 14:59:04 -0700878 if (namespace_label_has(ndd, type_guid)) {
879 if (i == min_dpa_idx) {
880 nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
881 nd_label->position = __cpu_to_le16(0);
882 } else {
883 nd_label->nlabel = __cpu_to_le16(0xffff);
884 nd_label->position = __cpu_to_le16(0xffff);
885 }
Dan Williams8f2bc242017-06-06 11:39:30 -0700886 nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
Dan Williams3934d842017-06-06 14:59:04 -0700887 } else {
888 nd_label->nlabel = __cpu_to_le16(0); /* N/A */
889 nd_label->position = __cpu_to_le16(0); /* N/A */
Dan Williams8f2bc242017-06-06 11:39:30 -0700890 nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
Dan Williams3934d842017-06-06 14:59:04 -0700891 }
Dan Williams8f2bc242017-06-06 11:39:30 -0700892
Dan Williams0ba1c632015-05-30 12:35:36 -0400893 nd_label->dpa = __cpu_to_le64(res->start);
894 nd_label->rawsize = __cpu_to_le64(resource_size(res));
895 nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
896 nd_label->slot = __cpu_to_le32(slot);
Dan Williamsfaec6f82017-06-06 11:10:51 -0700897 if (namespace_label_has(ndd, type_guid))
898 guid_copy(&nd_label->type_guid, &nd_set->type_guid);
Dan Williamsb3fde742017-06-04 10:18:39 +0900899 if (namespace_label_has(ndd, abstraction_guid))
900 guid_copy(&nd_label->abstraction_guid,
901 to_abstraction_guid(ndns->claim_class,
902 &nd_label->abstraction_guid));
903
Dan Williams355d8382017-06-06 14:56:43 -0700904 if (namespace_label_has(ndd, checksum)) {
905 u64 sum;
906
907 nd_label->checksum = __cpu_to_le64(0);
908 sum = nd_fletcher64(nd_label,
909 sizeof_namespace_label(ndd), 1);
910 nd_label->checksum = __cpu_to_le64(sum);
911 }
Dan Williams0ba1c632015-05-30 12:35:36 -0400912
913 /* update label */
914 offset = nd_label_offset(ndd, nd_label);
915 rc = nvdimm_set_config_data(ndd, offset, nd_label,
Dan Williams564e8712017-06-03 18:30:43 +0900916 sizeof_namespace_label(ndd));
Dan Williams0ba1c632015-05-30 12:35:36 -0400917 if (rc < 0)
918 goto abort;
919 }
920
921 /* free up now unused slots in the new index */
922 for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
923 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
924 nd_label_free_slot(ndd, slot);
925 }
926
927 /* update index */
928 rc = nd_label_write_index(ndd, ndd->ns_next,
929 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
930 if (rc)
931 goto abort;
932
933 /*
934 * Now that the on-dimm labels are up to date, fix up the tracking
935 * entries in nd_mapping->labels
936 */
937 nlabel = 0;
Dan Williamsae8219f2016-09-19 16:04:21 -0700938 mutex_lock(&nd_mapping->lock);
939 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
940 nd_label = label_ent->label;
941 if (!nd_label)
942 continue;
Dan Williams0ba1c632015-05-30 12:35:36 -0400943 nlabel++;
944 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
945 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
946 continue;
947 nlabel--;
Dan Williamsae8219f2016-09-19 16:04:21 -0700948 list_move(&label_ent->list, &list);
949 label_ent->label = NULL;
Dan Williams0ba1c632015-05-30 12:35:36 -0400950 }
Dan Williamsae8219f2016-09-19 16:04:21 -0700951 list_splice_tail_init(&list, &nd_mapping->labels);
952 mutex_unlock(&nd_mapping->lock);
953
Dan Williams0ba1c632015-05-30 12:35:36 -0400954 if (nlabel + nsblk->num_resources > num_labels) {
955 /*
956 * Bug, we can't end up with more resources than
957 * available labels
958 */
959 WARN_ON_ONCE(1);
960 rc = -ENXIO;
961 goto out;
962 }
963
Dan Williamsae8219f2016-09-19 16:04:21 -0700964 mutex_lock(&nd_mapping->lock);
965 label_ent = list_first_entry_or_null(&nd_mapping->labels,
966 typeof(*label_ent), list);
967 if (!label_ent) {
968 WARN_ON(1);
969 mutex_unlock(&nd_mapping->lock);
970 rc = -ENXIO;
971 goto out;
972 }
Dan Williams0ba1c632015-05-30 12:35:36 -0400973 for_each_clear_bit_le(slot, free, nslot) {
Dan Williams564e8712017-06-03 18:30:43 +0900974 nd_label = to_label(ndd, slot);
Dan Williams0ba1c632015-05-30 12:35:36 -0400975 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
976 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
977 continue;
978 res = to_resource(ndd, nd_label);
979 res->flags &= ~DPA_RESOURCE_ADJUSTED;
Dan Williamsae8219f2016-09-19 16:04:21 -0700980 dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
981 list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
982 if (label_ent->label)
983 continue;
984 label_ent->label = nd_label;
985 nd_label = NULL;
986 break;
987 }
988 if (nd_label)
989 dev_WARN(&nsblk->common.dev,
990 "failed to track label slot%d\n", slot);
Dan Williams0ba1c632015-05-30 12:35:36 -0400991 }
Dan Williamsae8219f2016-09-19 16:04:21 -0700992 mutex_unlock(&nd_mapping->lock);
Dan Williams0ba1c632015-05-30 12:35:36 -0400993
994 out:
995 kfree(old_res_list);
996 kfree(victim_map);
997 return rc;
998
999 abort:
1000 /*
1001 * 1/ repair the allocated label bitmap in the index
1002 * 2/ restore the resource list
1003 */
1004 nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
1005 kfree(nsblk->res);
1006 nsblk->res = old_res_list;
1007 nsblk->num_resources = old_num_resources;
1008 old_res_list = NULL;
1009 goto out;
1010}
1011
1012static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
1013{
Dan Williamsae8219f2016-09-19 16:04:21 -07001014 int i, old_num_labels = 0;
1015 struct nd_label_ent *label_ent;
Dan Williams0ba1c632015-05-30 12:35:36 -04001016 struct nd_namespace_index *nsindex;
Dan Williams0ba1c632015-05-30 12:35:36 -04001017 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
Dan Williams0ba1c632015-05-30 12:35:36 -04001018
Dan Williamsae8219f2016-09-19 16:04:21 -07001019 mutex_lock(&nd_mapping->lock);
1020 list_for_each_entry(label_ent, &nd_mapping->labels, list)
Dan Williams0ba1c632015-05-30 12:35:36 -04001021 old_num_labels++;
Dan Williamsae8219f2016-09-19 16:04:21 -07001022 mutex_unlock(&nd_mapping->lock);
Dan Williams0ba1c632015-05-30 12:35:36 -04001023
1024 /*
1025 * We need to preserve all the old labels for the mapping so
1026 * they can be garbage collected after writing the new labels.
1027 */
Dan Williamsae8219f2016-09-19 16:04:21 -07001028 for (i = old_num_labels; i < num_labels; i++) {
1029 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
1030 if (!label_ent)
Dan Williams0ba1c632015-05-30 12:35:36 -04001031 return -ENOMEM;
Dan Williamsae8219f2016-09-19 16:04:21 -07001032 mutex_lock(&nd_mapping->lock);
1033 list_add_tail(&label_ent->list, &nd_mapping->labels);
1034 mutex_unlock(&nd_mapping->lock);
Dan Williams0ba1c632015-05-30 12:35:36 -04001035 }
Dan Williams0ba1c632015-05-30 12:35:36 -04001036
Dan Williamsf524bf22015-05-30 12:36:02 -04001037 if (ndd->ns_current == -1 || ndd->ns_next == -1)
1038 /* pass */;
1039 else
Dan Williams0ba1c632015-05-30 12:35:36 -04001040 return max(num_labels, old_num_labels);
Dan Williamsf524bf22015-05-30 12:36:02 -04001041
1042 nsindex = to_namespace_index(ndd, 0);
1043 memset(nsindex, 0, ndd->nsarea.config_size);
1044 for (i = 0; i < 2; i++) {
1045 int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT);
1046
1047 if (rc)
1048 return rc;
1049 }
1050 ndd->ns_next = 1;
1051 ndd->ns_current = 0;
1052
Dan Williams0ba1c632015-05-30 12:35:36 -04001053 return max(num_labels, old_num_labels);
Dan Williamsf524bf22015-05-30 12:36:02 -04001054}
1055
1056static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
1057{
1058 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
Dan Williamsae8219f2016-09-19 16:04:21 -07001059 struct nd_label_ent *label_ent, *e;
Dan Williamsf524bf22015-05-30 12:36:02 -04001060 struct nd_namespace_index *nsindex;
1061 u8 label_uuid[NSLABEL_UUID_LEN];
Dan Williamsf524bf22015-05-30 12:36:02 -04001062 unsigned long *free;
Dan Williamsae8219f2016-09-19 16:04:21 -07001063 LIST_HEAD(list);
Dan Williamsf524bf22015-05-30 12:36:02 -04001064 u32 nslot, slot;
Dan Williamsae8219f2016-09-19 16:04:21 -07001065 int active = 0;
Dan Williamsf524bf22015-05-30 12:36:02 -04001066
1067 if (!uuid)
1068 return 0;
1069
1070 /* no index || no labels == nothing to delete */
Dan Williamsae8219f2016-09-19 16:04:21 -07001071 if (!preamble_next(ndd, &nsindex, &free, &nslot))
Dan Williamsf524bf22015-05-30 12:36:02 -04001072 return 0;
1073
Dan Williamsae8219f2016-09-19 16:04:21 -07001074 mutex_lock(&nd_mapping->lock);
1075 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1076 struct nd_namespace_label *nd_label = label_ent->label;
1077
1078 if (!nd_label)
1079 continue;
1080 active++;
Dan Williamsf524bf22015-05-30 12:36:02 -04001081 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1082 if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
1083 continue;
Dan Williamsae8219f2016-09-19 16:04:21 -07001084 active--;
Dan Williamsf524bf22015-05-30 12:36:02 -04001085 slot = to_slot(ndd, nd_label);
1086 nd_label_free_slot(ndd, slot);
1087 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
Dan Williamsae8219f2016-09-19 16:04:21 -07001088 list_move_tail(&label_ent->list, &list);
1089 label_ent->label = NULL;
Dan Williamsf524bf22015-05-30 12:36:02 -04001090 }
Dan Williamsae8219f2016-09-19 16:04:21 -07001091 list_splice_tail_init(&list, &nd_mapping->labels);
Dan Williamsf524bf22015-05-30 12:36:02 -04001092
Dan Williamsae8219f2016-09-19 16:04:21 -07001093 if (active == 0) {
1094 nd_mapping_free_labels(nd_mapping);
1095 dev_dbg(ndd->dev, "%s: no more active labels\n", __func__);
Dan Williamsf524bf22015-05-30 12:36:02 -04001096 }
Dan Williamsae8219f2016-09-19 16:04:21 -07001097 mutex_unlock(&nd_mapping->lock);
Dan Williamsf524bf22015-05-30 12:36:02 -04001098
1099 return nd_label_write_index(ndd, ndd->ns_next,
1100 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1101}
1102
1103int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1104 struct nd_namespace_pmem *nspm, resource_size_t size)
1105{
1106 int i;
1107
1108 for (i = 0; i < nd_region->ndr_mappings; i++) {
1109 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
Dan Williams16660ea2016-10-05 21:13:23 -07001110 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1111 struct resource *res;
1112 int rc, count = 0;
Dan Williamsf524bf22015-05-30 12:36:02 -04001113
1114 if (size == 0) {
1115 rc = del_labels(nd_mapping, nspm->uuid);
1116 if (rc)
1117 return rc;
1118 continue;
1119 }
1120
Dan Williams16660ea2016-10-05 21:13:23 -07001121 for_each_dpa_resource(ndd, res)
Nicolas Iooss2d9a0272016-10-29 13:28:52 +02001122 if (strncmp(res->name, "pmem", 4) == 0)
Dan Williams16660ea2016-10-05 21:13:23 -07001123 count++;
1124 WARN_ON_ONCE(!count);
1125
1126 rc = init_labels(nd_mapping, count);
Dan Williams0ba1c632015-05-30 12:35:36 -04001127 if (rc < 0)
Dan Williamsf524bf22015-05-30 12:36:02 -04001128 return rc;
1129
1130 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
1131 if (rc)
1132 return rc;
1133 }
1134
1135 return 0;
1136}
Dan Williams0ba1c632015-05-30 12:35:36 -04001137
1138int nd_blk_namespace_label_update(struct nd_region *nd_region,
1139 struct nd_namespace_blk *nsblk, resource_size_t size)
1140{
1141 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1142 struct resource *res;
1143 int count = 0;
1144
1145 if (size == 0)
1146 return del_labels(nd_mapping, nsblk->uuid);
1147
1148 for_each_dpa_resource(to_ndd(nd_mapping), res)
1149 count++;
1150
1151 count = init_labels(nd_mapping, count);
1152 if (count < 0)
1153 return count;
1154
1155 return __blk_label_update(nd_region, nd_mapping, nsblk, count);
1156}
Dan Williamsb3fde742017-06-04 10:18:39 +09001157
1158int __init nd_label_init(void)
1159{
1160 WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
1161 WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
1162 WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
1163
1164 return 0;
1165}