blob: 37e0bb835516f349758dc9f2e4e8e5aeab49b627 [file] [log] [blame]
Michael Holzheuc29a7ba2014-03-06 18:47:21 +01001/*
2 * NUMA support for s390
3 *
4 * NUMA emulation (aka fake NUMA) distributes the available memory to nodes
5 * without using real topology information about the physical memory of the
6 * machine.
7 *
8 * It distributes the available CPUs to nodes while respecting the original
9 * machine topology information. This is done by trying to avoid to separate
10 * CPUs which reside on the same book or even on the same MC.
11 *
12 * Because the current Linux scheduler code requires a stable cpu to node
13 * mapping, cores are pinned to nodes when the first CPU thread is set online.
14 *
15 * Copyright IBM Corp. 2015
16 */
17
18#define KMSG_COMPONENT "numa_emu"
19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
21#include <linux/kernel.h>
22#include <linux/cpumask.h>
23#include <linux/memblock.h>
24#include <linux/node.h>
25#include <linux/memory.h>
Michael Holzheu7cde4912015-08-05 11:23:53 +020026#include <linux/slab.h>
Michael Holzheuc29a7ba2014-03-06 18:47:21 +010027#include <asm/smp.h>
28#include <asm/topology.h>
29#include "numa_mode.h"
30#include "toptree.h"
31
32/* Distances between the different system components */
33#define DIST_EMPTY 0
34#define DIST_CORE 1
35#define DIST_MC 2
36#define DIST_BOOK 3
Heiko Carstensadac0f12016-05-25 10:25:50 +020037#define DIST_DRAWER 4
38#define DIST_MAX 5
Michael Holzheuc29a7ba2014-03-06 18:47:21 +010039
40/* Node distance reported to common code */
41#define EMU_NODE_DIST 10
42
43/* Node ID for free (not yet pinned) cores */
44#define NODE_ID_FREE -1
45
46/* Different levels of toptree */
Heiko Carstensadac0f12016-05-25 10:25:50 +020047enum toptree_level {CORE, MC, BOOK, DRAWER, NODE, TOPOLOGY};
Michael Holzheuc29a7ba2014-03-06 18:47:21 +010048
49/* The two toptree IDs */
50enum {TOPTREE_ID_PHYS, TOPTREE_ID_NUMA};
51
52/* Number of NUMA nodes */
53static int emu_nodes = 1;
54/* NUMA stripe size */
55static unsigned long emu_size;
Michael Holzheu3a3814c2015-08-01 18:12:41 +020056
57/*
58 * Node to core pinning information updates are protected by
59 * "sched_domains_mutex".
60 */
Michael Holzheu7cde4912015-08-05 11:23:53 +020061static struct {
62 s32 to_node_id[CONFIG_NR_CPUS]; /* Pinned core to node mapping */
63 int total; /* Total number of pinned cores */
64 int per_node_target; /* Cores per node without extra cores */
65 int per_node[MAX_NUMNODES]; /* Number of cores pinned to node */
66} *emu_cores;
Michael Holzheuc29a7ba2014-03-06 18:47:21 +010067
68/*
69 * Pin a core to a node
70 */
71static void pin_core_to_node(int core_id, int node_id)
72{
Michael Holzheu7cde4912015-08-05 11:23:53 +020073 if (emu_cores->to_node_id[core_id] == NODE_ID_FREE) {
74 emu_cores->per_node[node_id]++;
75 emu_cores->to_node_id[core_id] = node_id;
76 emu_cores->total++;
Michael Holzheuc29a7ba2014-03-06 18:47:21 +010077 } else {
Michael Holzheu7cde4912015-08-05 11:23:53 +020078 WARN_ON(emu_cores->to_node_id[core_id] != node_id);
Michael Holzheuc29a7ba2014-03-06 18:47:21 +010079 }
80}
81
82/*
83 * Number of pinned cores of a node
84 */
85static int cores_pinned(struct toptree *node)
86{
Michael Holzheu7cde4912015-08-05 11:23:53 +020087 return emu_cores->per_node[node->id];
Michael Holzheuc29a7ba2014-03-06 18:47:21 +010088}
89
90/*
91 * ID of the node where the core is pinned (or NODE_ID_FREE)
92 */
93static int core_pinned_to_node_id(struct toptree *core)
94{
Michael Holzheu7cde4912015-08-05 11:23:53 +020095 return emu_cores->to_node_id[core->id];
Michael Holzheuc29a7ba2014-03-06 18:47:21 +010096}
97
98/*
99 * Number of cores in the tree that are not yet pinned
100 */
101static int cores_free(struct toptree *tree)
102{
103 struct toptree *core;
104 int count = 0;
105
106 toptree_for_each(core, tree, CORE) {
107 if (core_pinned_to_node_id(core) == NODE_ID_FREE)
108 count++;
109 }
110 return count;
111}
112
113/*
114 * Return node of core
115 */
116static struct toptree *core_node(struct toptree *core)
117{
Heiko Carstensadac0f12016-05-25 10:25:50 +0200118 return core->parent->parent->parent->parent;
119}
120
121/*
122 * Return drawer of core
123 */
124static struct toptree *core_drawer(struct toptree *core)
125{
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100126 return core->parent->parent->parent;
127}
128
129/*
130 * Return book of core
131 */
132static struct toptree *core_book(struct toptree *core)
133{
134 return core->parent->parent;
135}
136
137/*
138 * Return mc of core
139 */
140static struct toptree *core_mc(struct toptree *core)
141{
142 return core->parent;
143}
144
145/*
146 * Distance between two cores
147 */
148static int dist_core_to_core(struct toptree *core1, struct toptree *core2)
149{
Heiko Carstensadac0f12016-05-25 10:25:50 +0200150 if (core_drawer(core1)->id != core_drawer(core2)->id)
151 return DIST_DRAWER;
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100152 if (core_book(core1)->id != core_book(core2)->id)
153 return DIST_BOOK;
154 if (core_mc(core1)->id != core_mc(core2)->id)
155 return DIST_MC;
156 /* Same core or sibling on same MC */
157 return DIST_CORE;
158}
159
160/*
161 * Distance of a node to a core
162 */
163static int dist_node_to_core(struct toptree *node, struct toptree *core)
164{
165 struct toptree *core_node;
166 int dist_min = DIST_MAX;
167
168 toptree_for_each(core_node, node, CORE)
169 dist_min = min(dist_min, dist_core_to_core(core_node, core));
170 return dist_min == DIST_MAX ? DIST_EMPTY : dist_min;
171}
172
173/*
174 * Unify will delete empty nodes, therefore recreate nodes.
175 */
176static void toptree_unify_tree(struct toptree *tree)
177{
178 int nid;
179
180 toptree_unify(tree);
181 for (nid = 0; nid < emu_nodes; nid++)
182 toptree_get_child(tree, nid);
183}
184
185/*
186 * Find the best/nearest node for a given core and ensure that no node
Michael Holzheu7cde4912015-08-05 11:23:53 +0200187 * gets more than "emu_cores->per_node_target + extra" cores.
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100188 */
189static struct toptree *node_for_core(struct toptree *numa, struct toptree *core,
190 int extra)
191{
192 struct toptree *node, *node_best = NULL;
Michael Holzheu7cde4912015-08-05 11:23:53 +0200193 int dist_cur, dist_best, cores_target;
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100194
Michael Holzheu7cde4912015-08-05 11:23:53 +0200195 cores_target = emu_cores->per_node_target + extra;
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100196 dist_best = DIST_MAX;
197 node_best = NULL;
198 toptree_for_each(node, numa, NODE) {
199 /* Already pinned cores must use their nodes */
200 if (core_pinned_to_node_id(core) == node->id) {
201 node_best = node;
202 break;
203 }
204 /* Skip nodes that already have enough cores */
Michael Holzheu7cde4912015-08-05 11:23:53 +0200205 if (cores_pinned(node) >= cores_target)
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100206 continue;
207 dist_cur = dist_node_to_core(node, core);
208 if (dist_cur < dist_best) {
209 dist_best = dist_cur;
210 node_best = node;
211 }
212 }
213 return node_best;
214}
215
216/*
217 * Find the best node for each core with respect to "extra" core count
218 */
219static void toptree_to_numa_single(struct toptree *numa, struct toptree *phys,
220 int extra)
221{
222 struct toptree *node, *core, *tmp;
223
224 toptree_for_each_safe(core, tmp, phys, CORE) {
225 node = node_for_core(numa, core, extra);
226 if (!node)
227 return;
228 toptree_move(core, node);
229 pin_core_to_node(core->id, node->id);
230 }
231}
232
233/*
234 * Move structures of given level to specified NUMA node
235 */
236static void move_level_to_numa_node(struct toptree *node, struct toptree *phys,
237 enum toptree_level level, bool perfect)
238{
Michael Holzheu7cde4912015-08-05 11:23:53 +0200239 int cores_free, cores_target = emu_cores->per_node_target;
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100240 struct toptree *cur, *tmp;
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100241
242 toptree_for_each_safe(cur, tmp, phys, level) {
Michael Holzheu7cde4912015-08-05 11:23:53 +0200243 cores_free = cores_target - toptree_count(node, CORE);
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100244 if (perfect) {
245 if (cores_free == toptree_count(cur, CORE))
246 toptree_move(cur, node);
247 } else {
248 if (cores_free >= toptree_count(cur, CORE))
249 toptree_move(cur, node);
250 }
251 }
252}
253
254/*
255 * Move structures of a given level to NUMA nodes. If "perfect" is specified
256 * move only perfectly fitting structures. Otherwise move also smaller
257 * than needed structures.
258 */
259static void move_level_to_numa(struct toptree *numa, struct toptree *phys,
260 enum toptree_level level, bool perfect)
261{
262 struct toptree *node;
263
264 toptree_for_each(node, numa, NODE)
265 move_level_to_numa_node(node, phys, level, perfect);
266}
267
268/*
269 * For the first run try to move the big structures
270 */
271static void toptree_to_numa_first(struct toptree *numa, struct toptree *phys)
272{
273 struct toptree *core;
274
275 /* Always try to move perfectly fitting structures first */
Heiko Carstensadac0f12016-05-25 10:25:50 +0200276 move_level_to_numa(numa, phys, DRAWER, true);
277 move_level_to_numa(numa, phys, DRAWER, false);
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100278 move_level_to_numa(numa, phys, BOOK, true);
279 move_level_to_numa(numa, phys, BOOK, false);
280 move_level_to_numa(numa, phys, MC, true);
281 move_level_to_numa(numa, phys, MC, false);
282 /* Now pin all the moved cores */
283 toptree_for_each(core, numa, CORE)
284 pin_core_to_node(core->id, core_node(core)->id);
285}
286
287/*
288 * Allocate new topology and create required nodes
289 */
290static struct toptree *toptree_new(int id, int nodes)
291{
292 struct toptree *tree;
293 int nid;
294
295 tree = toptree_alloc(TOPOLOGY, id);
296 if (!tree)
297 goto fail;
298 for (nid = 0; nid < nodes; nid++) {
299 if (!toptree_get_child(tree, nid))
300 goto fail;
301 }
302 return tree;
303fail:
304 panic("NUMA emulation could not allocate topology");
305}
306
307/*
Michael Holzheu7cde4912015-08-05 11:23:53 +0200308 * Allocate and initialize core to node mapping
309 */
310static void create_core_to_node_map(void)
311{
312 int i;
313
314 emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL);
315 if (emu_cores == NULL)
316 panic("Could not allocate cores to node memory");
317 for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
318 emu_cores->to_node_id[i] = NODE_ID_FREE;
319}
320
321/*
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100322 * Move cores from physical topology into NUMA target topology
323 * and try to keep as much of the physical topology as possible.
324 */
325static struct toptree *toptree_to_numa(struct toptree *phys)
326{
327 static int first = 1;
328 struct toptree *numa;
Michael Holzheu7cde4912015-08-05 11:23:53 +0200329 int cores_total;
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100330
Michael Holzheu7cde4912015-08-05 11:23:53 +0200331 cores_total = emu_cores->total + cores_free(phys);
332 emu_cores->per_node_target = cores_total / emu_nodes;
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100333 numa = toptree_new(TOPTREE_ID_NUMA, emu_nodes);
334 if (first) {
335 toptree_to_numa_first(numa, phys);
336 first = 0;
337 }
338 toptree_to_numa_single(numa, phys, 0);
339 toptree_to_numa_single(numa, phys, 1);
340 toptree_unify_tree(numa);
341
342 WARN_ON(cpumask_weight(&phys->mask));
343 return numa;
344}
345
346/*
347 * Create a toptree out of the physical topology that we got from the hypervisor
348 */
349static struct toptree *toptree_from_topology(void)
350{
Heiko Carstensadac0f12016-05-25 10:25:50 +0200351 struct toptree *phys, *node, *drawer, *book, *mc, *core;
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100352 struct cpu_topology_s390 *top;
353 int cpu;
354
355 phys = toptree_new(TOPTREE_ID_PHYS, 1);
356
357 for_each_online_cpu(cpu) {
358 top = &per_cpu(cpu_topology, cpu);
359 node = toptree_get_child(phys, 0);
Heiko Carstensadac0f12016-05-25 10:25:50 +0200360 drawer = toptree_get_child(node, top->drawer_id);
361 book = toptree_get_child(drawer, top->book_id);
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100362 mc = toptree_get_child(book, top->socket_id);
363 core = toptree_get_child(mc, top->core_id);
Heiko Carstensadac0f12016-05-25 10:25:50 +0200364 if (!drawer || !book || !mc || !core)
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100365 panic("NUMA emulation could not allocate memory");
366 cpumask_set_cpu(cpu, &core->mask);
367 toptree_update_mask(mc);
368 }
369 return phys;
370}
371
372/*
373 * Add toptree core to topology and create correct CPU masks
374 */
375static void topology_add_core(struct toptree *core)
376{
377 struct cpu_topology_s390 *top;
378 int cpu;
379
380 for_each_cpu(cpu, &core->mask) {
381 top = &per_cpu(cpu_topology, cpu);
382 cpumask_copy(&top->thread_mask, &core->mask);
383 cpumask_copy(&top->core_mask, &core_mc(core)->mask);
384 cpumask_copy(&top->book_mask, &core_book(core)->mask);
Heiko Carstensadac0f12016-05-25 10:25:50 +0200385 cpumask_copy(&top->drawer_mask, &core_drawer(core)->mask);
Martin Schwidefsky22be9cd2015-09-22 14:21:16 +0200386 cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100387 top->node_id = core_node(core)->id;
388 }
389}
390
391/*
392 * Apply toptree to topology and create CPU masks
393 */
394static void toptree_to_topology(struct toptree *numa)
395{
396 struct toptree *core;
397 int i;
398
399 /* Clear all node masks */
400 for (i = 0; i < MAX_NUMNODES; i++)
Martin Schwidefsky22be9cd2015-09-22 14:21:16 +0200401 cpumask_clear(&node_to_cpumask_map[i]);
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100402
403 /* Rebuild all masks */
404 toptree_for_each(core, numa, CORE)
405 topology_add_core(core);
406}
407
408/*
409 * Show the node to core mapping
410 */
411static void print_node_to_core_map(void)
412{
413 int nid, cid;
414
415 if (!numa_debug_enabled)
416 return;
417 printk(KERN_DEBUG "NUMA node to core mapping\n");
418 for (nid = 0; nid < emu_nodes; nid++) {
419 printk(KERN_DEBUG " node %3d: ", nid);
Michael Holzheu7cde4912015-08-05 11:23:53 +0200420 for (cid = 0; cid < ARRAY_SIZE(emu_cores->to_node_id); cid++) {
421 if (emu_cores->to_node_id[cid] == nid)
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100422 printk(KERN_CONT "%d ", cid);
423 }
424 printk(KERN_CONT "\n");
425 }
426}
427
428/*
429 * Transfer physical topology into a NUMA topology and modify CPU masks
430 * according to the NUMA topology.
431 *
Michael Holzheu3a3814c2015-08-01 18:12:41 +0200432 * Must be called with "sched_domains_mutex" lock held.
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100433 */
434static void emu_update_cpu_topology(void)
435{
436 struct toptree *phys, *numa;
437
Michael Holzheu7cde4912015-08-05 11:23:53 +0200438 if (emu_cores == NULL)
439 create_core_to_node_map();
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100440 phys = toptree_from_topology();
441 numa = toptree_to_numa(phys);
442 toptree_free(phys);
443 toptree_to_topology(numa);
444 toptree_free(numa);
445 print_node_to_core_map();
446}
447
448/*
449 * If emu_size is not set, use CONFIG_EMU_SIZE. Then round to minimum
450 * alignment (needed for memory hotplug).
451 */
452static unsigned long emu_setup_size_adjust(unsigned long size)
453{
Michael Holzheub02064a2015-09-03 11:57:56 +0200454 unsigned long size_new;
455
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100456 size = size ? : CONFIG_EMU_SIZE;
Michael Holzheub02064a2015-09-03 11:57:56 +0200457 size_new = roundup(size, memory_block_size_bytes());
458 if (size_new == size)
459 return size;
460 pr_warn("Increasing memory stripe size from %ld MB to %ld MB\n",
461 size >> 20, size_new >> 20);
462 return size_new;
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100463}
464
465/*
466 * If we have not enough memory for the specified nodes, reduce the node count.
467 */
468static int emu_setup_nodes_adjust(int nodes)
469{
470 int nodes_max;
471
472 nodes_max = memblock.memory.total_size / emu_size;
473 nodes_max = max(nodes_max, 1);
474 if (nodes_max >= nodes)
475 return nodes;
476 pr_warn("Not enough memory for %d nodes, reducing node count\n", nodes);
477 return nodes_max;
478}
479
480/*
481 * Early emu setup
482 */
483static void emu_setup(void)
484{
Heiko Carstensef4423ce2016-07-28 18:14:29 +0200485 int nid;
486
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100487 emu_size = emu_setup_size_adjust(emu_size);
488 emu_nodes = emu_setup_nodes_adjust(emu_nodes);
Heiko Carstensef4423ce2016-07-28 18:14:29 +0200489 for (nid = 0; nid < emu_nodes; nid++)
490 node_set(nid, node_possible_map);
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100491 pr_info("Creating %d nodes with memory stripe size %ld MB\n",
492 emu_nodes, emu_size >> 20);
493}
494
495/*
496 * Return node id for given page number
497 */
498static int emu_pfn_to_nid(unsigned long pfn)
499{
500 return (pfn / (emu_size >> PAGE_SHIFT)) % emu_nodes;
501}
502
503/*
504 * Return stripe size
505 */
506static unsigned long emu_align(void)
507{
508 return emu_size;
509}
510
511/*
512 * Return distance between two nodes
513 */
514static int emu_distance(int node1, int node2)
515{
516 return (node1 != node2) * EMU_NODE_DIST;
517}
518
519/*
520 * Define callbacks for generic s390 NUMA infrastructure
521 */
522const struct numa_mode numa_mode_emu = {
523 .name = "emu",
524 .setup = emu_setup,
525 .update_cpu_topology = emu_update_cpu_topology,
526 .__pfn_to_nid = emu_pfn_to_nid,
527 .align = emu_align,
528 .distance = emu_distance,
529};
530
531/*
532 * Kernel parameter: emu_nodes=<n>
533 */
534static int __init early_parse_emu_nodes(char *p)
535{
536 int count;
537
538 if (kstrtoint(p, 0, &count) != 0 || count <= 0)
539 return 0;
540 if (count <= 0)
541 return 0;
542 emu_nodes = min(count, MAX_NUMNODES);
543 return 0;
544}
545early_param("emu_nodes", early_parse_emu_nodes);
546
547/*
548 * Kernel parameter: emu_size=[<n>[k|M|G|T]]
549 */
550static int __init early_parse_emu_size(char *p)
551{
552 emu_size = memparse(p, NULL);
553 return 0;
554}
555early_param("emu_size", early_parse_emu_size);