blob: 22197b18d85e3eb02823c3c4aa5bf0d22b7b42cc [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Nathan Fontenot6c6ea532017-12-01 10:47:08 -06002/*
3 * Dynamic reconfiguration memory support
4 *
5 * Copyright 2017 IBM Corporation
Nathan Fontenot6c6ea532017-12-01 10:47:08 -06006 */
7
8#define pr_fmt(fmt) "drmem: " fmt
9
10#include <linux/kernel.h>
11#include <linux/of.h>
12#include <linux/of_fdt.h>
13#include <linux/memblock.h>
14#include <asm/prom.h>
15#include <asm/drmem.h>
16
Hari Bathiniadfefc62020-07-29 17:10:32 +053017static int n_root_addr_cells, n_root_size_cells;
18
Nathan Fontenot6c6ea532017-12-01 10:47:08 -060019static struct drmem_lmb_info __drmem_info;
20struct drmem_lmb_info *drmem_info = &__drmem_info;
Laurent Dufourd144f4d2021-05-17 11:06:06 +020021static bool in_drmem_update;
Nathan Fontenot6c6ea532017-12-01 10:47:08 -060022
Nathan Fontenot514a9cb2017-12-01 10:47:21 -060023u64 drmem_lmb_memory_max(void)
24{
25 struct drmem_lmb *last_lmb;
26
27 last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
28 return last_lmb->base_addr + drmem_lmb_size();
29}
30
Nathan Fontenot6195a502017-12-01 10:47:31 -060031static u32 drmem_lmb_flags(struct drmem_lmb *lmb)
32{
33 /*
34 * Return the value of the lmb flags field minus the reserved
35 * bit used internally for hotplug processing.
36 */
37 return lmb->flags & ~DRMEM_LMB_RESERVED;
38}
39
40static struct property *clone_property(struct property *prop, u32 prop_sz)
41{
42 struct property *new_prop;
43
44 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
45 if (!new_prop)
46 return NULL;
47
48 new_prop->name = kstrdup(prop->name, GFP_KERNEL);
49 new_prop->value = kzalloc(prop_sz, GFP_KERNEL);
50 if (!new_prop->name || !new_prop->value) {
51 kfree(new_prop->name);
52 kfree(new_prop->value);
53 kfree(new_prop);
54 return NULL;
55 }
56
57 new_prop->length = prop_sz;
58#if defined(CONFIG_OF_DYNAMIC)
59 of_property_set_flag(new_prop, OF_DYNAMIC);
60#endif
61 return new_prop;
62}
63
64static int drmem_update_dt_v1(struct device_node *memory,
65 struct property *prop)
66{
67 struct property *new_prop;
Nathan Fontenot2c777212017-12-01 10:47:42 -060068 struct of_drconf_cell_v1 *dr_cell;
Nathan Fontenot6195a502017-12-01 10:47:31 -060069 struct drmem_lmb *lmb;
70 u32 *p;
71
72 new_prop = clone_property(prop, prop->length);
73 if (!new_prop)
74 return -1;
75
76 p = new_prop->value;
77 *p++ = cpu_to_be32(drmem_info->n_lmbs);
78
Nathan Fontenot2c777212017-12-01 10:47:42 -060079 dr_cell = (struct of_drconf_cell_v1 *)p;
Nathan Fontenot6195a502017-12-01 10:47:31 -060080
81 for_each_drmem_lmb(lmb) {
82 dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
83 dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
84 dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
85 dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
86
87 dr_cell++;
88 }
89
90 of_update_property(memory, new_prop);
91 return 0;
92}
93
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -060094static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
95 struct drmem_lmb *lmb)
96{
97 dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
98 dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
99 dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
Bharata B Rao2f7d03e2018-02-21 16:06:26 +0530100 dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600101}
102
103static int drmem_update_dt_v2(struct device_node *memory,
104 struct property *prop)
105{
106 struct property *new_prop;
107 struct of_drconf_cell_v2 *dr_cell;
108 struct drmem_lmb *lmb, *prev_lmb;
109 u32 lmb_sets, prop_sz, seq_lmbs;
110 u32 *p;
111
112 /* First pass, determine how many LMB sets are needed. */
113 lmb_sets = 0;
114 prev_lmb = NULL;
115 for_each_drmem_lmb(lmb) {
116 if (!prev_lmb) {
117 prev_lmb = lmb;
118 lmb_sets++;
119 continue;
120 }
121
122 if (prev_lmb->aa_index != lmb->aa_index ||
Bharata B Rao2f7d03e2018-02-21 16:06:26 +0530123 drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600124 lmb_sets++;
125
126 prev_lmb = lmb;
127 }
128
129 prop_sz = lmb_sets * sizeof(*dr_cell) + sizeof(__be32);
130 new_prop = clone_property(prop, prop_sz);
131 if (!new_prop)
132 return -1;
133
134 p = new_prop->value;
135 *p++ = cpu_to_be32(lmb_sets);
136
137 dr_cell = (struct of_drconf_cell_v2 *)p;
138
139 /* Second pass, populate the LMB set data */
140 prev_lmb = NULL;
141 seq_lmbs = 0;
142 for_each_drmem_lmb(lmb) {
143 if (prev_lmb == NULL) {
144 /* Start of first LMB set */
145 prev_lmb = lmb;
146 init_drconf_v2_cell(dr_cell, lmb);
147 seq_lmbs++;
148 continue;
149 }
150
151 if (prev_lmb->aa_index != lmb->aa_index ||
Bharata B Rao2f7d03e2018-02-21 16:06:26 +0530152 drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600153 /* end of one set, start of another */
154 dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
155 dr_cell++;
156
157 init_drconf_v2_cell(dr_cell, lmb);
158 seq_lmbs = 1;
159 } else {
160 seq_lmbs++;
161 }
162
163 prev_lmb = lmb;
164 }
165
166 /* close out last LMB set */
167 dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
168 of_update_property(memory, new_prop);
169 return 0;
170}
171
Nathan Fontenot6195a502017-12-01 10:47:31 -0600172int drmem_update_dt(void)
173{
174 struct device_node *memory;
175 struct property *prop;
176 int rc = -1;
177
178 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
179 if (!memory)
180 return -1;
181
Laurent Dufourd144f4d2021-05-17 11:06:06 +0200182 /*
183 * Set in_drmem_update to prevent the notifier callback to process the
184 * DT property back since the change is coming from the LMB tree.
185 */
186 in_drmem_update = true;
Nathan Fontenot6195a502017-12-01 10:47:31 -0600187 prop = of_find_property(memory, "ibm,dynamic-memory", NULL);
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600188 if (prop) {
Nathan Fontenot6195a502017-12-01 10:47:31 -0600189 rc = drmem_update_dt_v1(memory, prop);
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600190 } else {
191 prop = of_find_property(memory, "ibm,dynamic-memory-v2", NULL);
192 if (prop)
193 rc = drmem_update_dt_v2(memory, prop);
194 }
Laurent Dufourd144f4d2021-05-17 11:06:06 +0200195 in_drmem_update = false;
Nathan Fontenot6195a502017-12-01 10:47:31 -0600196
197 of_node_put(memory);
198 return rc;
199}
200
Hari Bathiniadfefc62020-07-29 17:10:32 +0530201static void read_drconf_v1_cell(struct drmem_lmb *lmb,
Nathan Fontenot6c6ea532017-12-01 10:47:08 -0600202 const __be32 **prop)
203{
204 const __be32 *p = *prop;
205
Hari Bathiniadfefc62020-07-29 17:10:32 +0530206 lmb->base_addr = of_read_number(p, n_root_addr_cells);
207 p += n_root_addr_cells;
Nathan Fontenot6c6ea532017-12-01 10:47:08 -0600208 lmb->drc_index = of_read_number(p++, 1);
209
210 p++; /* skip reserved field */
211
212 lmb->aa_index = of_read_number(p++, 1);
213 lmb->flags = of_read_number(p++, 1);
214
215 *prop = p;
216}
217
Hari Bathiniadfefc62020-07-29 17:10:32 +0530218static int
219__walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm, void *data,
220 int (*func)(struct drmem_lmb *, const __be32 **, void *))
Nathan Fontenot6c6ea532017-12-01 10:47:08 -0600221{
222 struct drmem_lmb lmb;
223 u32 i, n_lmbs;
Hari Bathiniadfefc62020-07-29 17:10:32 +0530224 int ret = 0;
Nathan Fontenot6c6ea532017-12-01 10:47:08 -0600225
226 n_lmbs = of_read_number(prop++, 1);
Nathan Fontenot6c6ea532017-12-01 10:47:08 -0600227 for (i = 0; i < n_lmbs; i++) {
228 read_drconf_v1_cell(&lmb, &prop);
Hari Bathiniadfefc62020-07-29 17:10:32 +0530229 ret = func(&lmb, &usm, data);
230 if (ret)
231 break;
Nathan Fontenot6c6ea532017-12-01 10:47:08 -0600232 }
Hari Bathiniadfefc62020-07-29 17:10:32 +0530233
234 return ret;
Nathan Fontenot6c6ea532017-12-01 10:47:08 -0600235}
236
Hari Bathiniadfefc62020-07-29 17:10:32 +0530237static void read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600238 const __be32 **prop)
239{
240 const __be32 *p = *prop;
241
242 dr_cell->seq_lmbs = of_read_number(p++, 1);
Hari Bathiniadfefc62020-07-29 17:10:32 +0530243 dr_cell->base_addr = of_read_number(p, n_root_addr_cells);
244 p += n_root_addr_cells;
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600245 dr_cell->drc_index = of_read_number(p++, 1);
246 dr_cell->aa_index = of_read_number(p++, 1);
247 dr_cell->flags = of_read_number(p++, 1);
248
249 *prop = p;
250}
251
Hari Bathiniadfefc62020-07-29 17:10:32 +0530252static int
253__walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm, void *data,
254 int (*func)(struct drmem_lmb *, const __be32 **, void *))
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600255{
256 struct of_drconf_cell_v2 dr_cell;
257 struct drmem_lmb lmb;
258 u32 i, j, lmb_sets;
Hari Bathiniadfefc62020-07-29 17:10:32 +0530259 int ret = 0;
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600260
261 lmb_sets = of_read_number(prop++, 1);
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600262 for (i = 0; i < lmb_sets; i++) {
263 read_drconf_v2_cell(&dr_cell, &prop);
264
265 for (j = 0; j < dr_cell.seq_lmbs; j++) {
266 lmb.base_addr = dr_cell.base_addr;
267 dr_cell.base_addr += drmem_lmb_size();
268
269 lmb.drc_index = dr_cell.drc_index;
270 dr_cell.drc_index++;
271
272 lmb.aa_index = dr_cell.aa_index;
273 lmb.flags = dr_cell.flags;
274
Hari Bathiniadfefc62020-07-29 17:10:32 +0530275 ret = func(&lmb, &usm, data);
276 if (ret)
277 break;
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600278 }
279 }
Hari Bathiniadfefc62020-07-29 17:10:32 +0530280
281 return ret;
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600282}
283
Nathan Fontenot514a9cb2017-12-01 10:47:21 -0600284#ifdef CONFIG_PPC_PSERIES
Hari Bathiniadfefc62020-07-29 17:10:32 +0530285int __init walk_drmem_lmbs_early(unsigned long node, void *data,
286 int (*func)(struct drmem_lmb *, const __be32 **, void *))
Nathan Fontenot6c6ea532017-12-01 10:47:08 -0600287{
288 const __be32 *prop, *usm;
Hari Bathiniadfefc62020-07-29 17:10:32 +0530289 int len, ret = -ENODEV;
Nathan Fontenot6c6ea532017-12-01 10:47:08 -0600290
291 prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
292 if (!prop || len < dt_root_size_cells * sizeof(__be32))
Hari Bathiniadfefc62020-07-29 17:10:32 +0530293 return ret;
294
295 /* Get the address & size cells */
296 n_root_addr_cells = dt_root_addr_cells;
297 n_root_size_cells = dt_root_size_cells;
Nathan Fontenot6c6ea532017-12-01 10:47:08 -0600298
299 drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
300
301 usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len);
302
303 prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len);
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600304 if (prop) {
Hari Bathiniadfefc62020-07-29 17:10:32 +0530305 ret = __walk_drmem_v1_lmbs(prop, usm, data, func);
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600306 } else {
307 prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2",
308 &len);
309 if (prop)
Hari Bathiniadfefc62020-07-29 17:10:32 +0530310 ret = __walk_drmem_v2_lmbs(prop, usm, data, func);
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600311 }
Nathan Fontenot6c6ea532017-12-01 10:47:08 -0600312
313 memblock_dump_all();
Hari Bathiniadfefc62020-07-29 17:10:32 +0530314 return ret;
Nathan Fontenot6c6ea532017-12-01 10:47:08 -0600315}
316
Laurent Dufourd144f4d2021-05-17 11:06:06 +0200317/*
318 * Update the LMB associativity index.
319 */
320static int update_lmb(struct drmem_lmb *updated_lmb,
321 __maybe_unused const __be32 **usm,
322 __maybe_unused void *data)
323{
324 struct drmem_lmb *lmb;
325
326 for_each_drmem_lmb(lmb) {
327 if (lmb->drc_index != updated_lmb->drc_index)
328 continue;
329
330 lmb->aa_index = updated_lmb->aa_index;
331 break;
332 }
333 return 0;
334}
335
336/*
337 * Update the LMB associativity index.
338 *
339 * This needs to be called when the hypervisor is updating the
340 * dynamic-reconfiguration-memory node property.
341 */
342void drmem_update_lmbs(struct property *prop)
343{
344 /*
345 * Don't update the LMBs if triggered by the update done in
346 * drmem_update_dt(), the LMB values have been used to the update the DT
347 * property in that case.
348 */
349 if (in_drmem_update)
350 return;
351 if (!strcmp(prop->name, "ibm,dynamic-memory"))
352 __walk_drmem_v1_lmbs(prop->value, NULL, NULL, update_lmb);
353 else if (!strcmp(prop->name, "ibm,dynamic-memory-v2"))
354 __walk_drmem_v2_lmbs(prop->value, NULL, NULL, update_lmb);
355}
Nathan Fontenot6c6ea532017-12-01 10:47:08 -0600356#endif
Nathan Fontenot514a9cb2017-12-01 10:47:21 -0600357
Hari Bathiniadfefc62020-07-29 17:10:32 +0530358static int init_drmem_lmb_size(struct device_node *dn)
Nathan Fontenot514a9cb2017-12-01 10:47:21 -0600359{
360 const __be32 *prop;
361 int len;
362
363 if (drmem_info->lmb_size)
364 return 0;
365
366 prop = of_get_property(dn, "ibm,lmb-size", &len);
Hari Bathiniadfefc62020-07-29 17:10:32 +0530367 if (!prop || len < n_root_size_cells * sizeof(__be32)) {
Nathan Fontenot514a9cb2017-12-01 10:47:21 -0600368 pr_info("Could not determine LMB size\n");
369 return -1;
370 }
371
Hari Bathiniadfefc62020-07-29 17:10:32 +0530372 drmem_info->lmb_size = of_read_number(prop, n_root_size_cells);
Nathan Fontenot514a9cb2017-12-01 10:47:21 -0600373 return 0;
374}
375
376/*
377 * Returns the property linux,drconf-usable-memory if
378 * it exists (the property exists only in kexec/kdump kernels,
379 * added by kexec-tools)
380 */
381static const __be32 *of_get_usable_memory(struct device_node *dn)
382{
383 const __be32 *prop;
384 u32 len;
385
386 prop = of_get_property(dn, "linux,drconf-usable-memory", &len);
387 if (!prop || len < sizeof(unsigned int))
388 return NULL;
389
390 return prop;
391}
392
Hari Bathiniadfefc62020-07-29 17:10:32 +0530393int walk_drmem_lmbs(struct device_node *dn, void *data,
394 int (*func)(struct drmem_lmb *, const __be32 **, void *))
Nathan Fontenot514a9cb2017-12-01 10:47:21 -0600395{
396 const __be32 *prop, *usm;
Hari Bathiniadfefc62020-07-29 17:10:32 +0530397 int ret = -ENODEV;
398
399 if (!of_root)
400 return ret;
401
402 /* Get the address & size cells */
403 of_node_get(of_root);
404 n_root_addr_cells = of_n_addr_cells(of_root);
405 n_root_size_cells = of_n_size_cells(of_root);
406 of_node_put(of_root);
Nathan Fontenot514a9cb2017-12-01 10:47:21 -0600407
408 if (init_drmem_lmb_size(dn))
Hari Bathiniadfefc62020-07-29 17:10:32 +0530409 return ret;
Nathan Fontenot514a9cb2017-12-01 10:47:21 -0600410
411 usm = of_get_usable_memory(dn);
412
413 prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600414 if (prop) {
Hari Bathiniadfefc62020-07-29 17:10:32 +0530415 ret = __walk_drmem_v1_lmbs(prop, usm, data, func);
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600416 } else {
417 prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
418 if (prop)
Hari Bathiniadfefc62020-07-29 17:10:32 +0530419 ret = __walk_drmem_v2_lmbs(prop, usm, data, func);
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600420 }
Hari Bathiniadfefc62020-07-29 17:10:32 +0530421
422 return ret;
Nathan Fontenot514a9cb2017-12-01 10:47:21 -0600423}
424
425static void __init init_drmem_v1_lmbs(const __be32 *prop)
426{
427 struct drmem_lmb *lmb;
428
429 drmem_info->n_lmbs = of_read_number(prop++, 1);
Nathan Fontenot2c106362018-02-15 21:27:41 -0600430 if (drmem_info->n_lmbs == 0)
431 return;
Nathan Fontenot514a9cb2017-12-01 10:47:21 -0600432
433 drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
434 GFP_KERNEL);
435 if (!drmem_info->lmbs)
436 return;
437
Scott Chelohae5e179a2020-08-10 20:51:15 -0500438 for_each_drmem_lmb(lmb)
Nathan Fontenot514a9cb2017-12-01 10:47:21 -0600439 read_drconf_v1_cell(lmb, &prop);
440}
441
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600442static void __init init_drmem_v2_lmbs(const __be32 *prop)
443{
444 struct drmem_lmb *lmb;
445 struct of_drconf_cell_v2 dr_cell;
446 const __be32 *p;
447 u32 i, j, lmb_sets;
448 int lmb_index;
449
450 lmb_sets = of_read_number(prop++, 1);
Nathan Fontenot2c106362018-02-15 21:27:41 -0600451 if (lmb_sets == 0)
452 return;
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600453
454 /* first pass, calculate the number of LMBs */
455 p = prop;
456 for (i = 0; i < lmb_sets; i++) {
457 read_drconf_v2_cell(&dr_cell, &p);
458 drmem_info->n_lmbs += dr_cell.seq_lmbs;
459 }
460
461 drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
462 GFP_KERNEL);
463 if (!drmem_info->lmbs)
464 return;
465
466 /* second pass, read in the LMB information */
467 lmb_index = 0;
468 p = prop;
469
470 for (i = 0; i < lmb_sets; i++) {
471 read_drconf_v2_cell(&dr_cell, &p);
472
473 for (j = 0; j < dr_cell.seq_lmbs; j++) {
474 lmb = &drmem_info->lmbs[lmb_index++];
475
476 lmb->base_addr = dr_cell.base_addr;
477 dr_cell.base_addr += drmem_info->lmb_size;
478
479 lmb->drc_index = dr_cell.drc_index;
480 dr_cell.drc_index++;
481
482 lmb->aa_index = dr_cell.aa_index;
483 lmb->flags = dr_cell.flags;
484 }
485 }
486}
487
Nathan Fontenot514a9cb2017-12-01 10:47:21 -0600488static int __init drmem_init(void)
489{
490 struct device_node *dn;
491 const __be32 *prop;
492
493 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
494 if (!dn) {
495 pr_info("No dynamic reconfiguration memory found\n");
496 return 0;
497 }
498
499 if (init_drmem_lmb_size(dn)) {
500 of_node_put(dn);
501 return 0;
502 }
503
504 prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600505 if (prop) {
Nathan Fontenot514a9cb2017-12-01 10:47:21 -0600506 init_drmem_v1_lmbs(prop);
Nathan Fontenot2b31e3a2017-12-01 10:47:53 -0600507 } else {
508 prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
509 if (prop)
510 init_drmem_v2_lmbs(prop);
511 }
Nathan Fontenot514a9cb2017-12-01 10:47:21 -0600512
513 of_node_put(dn);
514 return 0;
515}
516late_initcall(drmem_init);