blob: 8eb9e9743f5d8d4d85bf62c8a7bfebce8c248259 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Philipp Hachtmann3a368f72014-03-06 18:25:13 +01002/*
3 * NUMA support for s390
4 *
5 * Implement NUMA core code.
6 *
7 * Copyright IBM Corp. 2015
8 */
9
10#define KMSG_COMPONENT "numa"
11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13#include <linux/kernel.h>
14#include <linux/mmzone.h>
15#include <linux/cpumask.h>
Philipp Hachtmann3a368f72014-03-06 18:25:13 +010016#include <linux/memblock.h>
17#include <linux/slab.h>
18#include <linux/node.h>
19
20#include <asm/numa.h>
21#include "numa_mode.h"
22
23pg_data_t *node_data[MAX_NUMNODES];
24EXPORT_SYMBOL(node_data);
25
Martin Schwidefsky22be9cd2015-09-22 14:21:16 +020026cpumask_t node_to_cpumask_map[MAX_NUMNODES];
Philipp Hachtmann3a368f72014-03-06 18:25:13 +010027EXPORT_SYMBOL(node_to_cpumask_map);
28
Heiko Carstensef4423ce2016-07-28 18:14:29 +020029static void plain_setup(void)
30{
31 node_set(0, node_possible_map);
32}
33
Philipp Hachtmann3a368f72014-03-06 18:25:13 +010034const struct numa_mode numa_mode_plain = {
35 .name = "plain",
Heiko Carstensef4423ce2016-07-28 18:14:29 +020036 .setup = plain_setup,
Philipp Hachtmann3a368f72014-03-06 18:25:13 +010037};
38
39static const struct numa_mode *mode = &numa_mode_plain;
40
41int numa_pfn_to_nid(unsigned long pfn)
42{
43 return mode->__pfn_to_nid ? mode->__pfn_to_nid(pfn) : 0;
44}
45
46void numa_update_cpu_topology(void)
47{
48 if (mode->update_cpu_topology)
49 mode->update_cpu_topology();
50}
51
52int __node_distance(int a, int b)
53{
54 return mode->distance ? mode->distance(a, b) : 0;
55}
Justin M. Forbesa541f0e2018-10-31 13:02:03 -050056EXPORT_SYMBOL(__node_distance);
Philipp Hachtmann3a368f72014-03-06 18:25:13 +010057
58int numa_debug_enabled;
59
60/*
Philipp Hachtmann3a368f72014-03-06 18:25:13 +010061 * numa_setup_memory() - Assign bootmem to nodes
62 *
63 * The memory is first added to memblock without any respect to nodes.
64 * This is fixed before remaining memblock memory is handed over to the
65 * buddy allocator.
66 * An important side effect is that large bootmem allocations might easily
67 * cross node boundaries, which can be needed for large allocations with
68 * smaller memory stripes in each node (i.e. when using NUMA emulation).
69 *
70 * Memory defines nodes:
71 * Therefore this routine also sets the nodes online with memory.
72 */
73static void __init numa_setup_memory(void)
74{
75 unsigned long cur_base, align, end_of_dram;
76 int nid = 0;
77
78 end_of_dram = memblock_end_of_DRAM();
79 align = mode->align ? mode->align() : ULONG_MAX;
80
81 /*
82 * Step through all available memory and assign it to the nodes
83 * indicated by the mode implementation.
84 * All nodes which are seen here will be set online.
85 */
86 cur_base = 0;
87 do {
88 nid = numa_pfn_to_nid(PFN_DOWN(cur_base));
89 node_set_online(nid);
90 memblock_set_node(cur_base, align, &memblock.memory, nid);
91 cur_base += align;
92 } while (cur_base < end_of_dram);
93
94 /* Allocate and fill out node_data */
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070095 for (nid = 0; nid < MAX_NUMNODES; nid++) {
Mike Rapoportc2938ee2019-03-07 16:31:10 -080096 NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070097 if (!NODE_DATA(nid))
98 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
99 __func__, sizeof(pg_data_t), 8);
100 }
Philipp Hachtmann3a368f72014-03-06 18:25:13 +0100101
102 for_each_online_node(nid) {
103 unsigned long start_pfn, end_pfn;
104 unsigned long t_start, t_end;
105 int i;
106
107 start_pfn = ULONG_MAX;
108 end_pfn = 0;
109 for_each_mem_pfn_range(i, nid, &t_start, &t_end, NULL) {
110 if (t_start < start_pfn)
111 start_pfn = t_start;
112 if (t_end > end_pfn)
113 end_pfn = t_end;
114 }
115 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
116 NODE_DATA(nid)->node_id = nid;
117 }
118}
119
120/*
121 * numa_setup() - Earliest initialization
122 *
123 * Assign the mode and call the mode's setup routine.
124 */
125void __init numa_setup(void)
126{
127 pr_info("NUMA mode: %s\n", mode->name);
Heiko Carstensef4423ce2016-07-28 18:14:29 +0200128 nodes_clear(node_possible_map);
Martin Schwidefskyfb7d7512018-07-31 16:14:18 +0200129 /* Initially attach all possible CPUs to node 0. */
130 cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
Philipp Hachtmann3a368f72014-03-06 18:25:13 +0100131 if (mode->setup)
132 mode->setup();
133 numa_setup_memory();
134 memblock_dump_all();
135}
136
Philipp Hachtmann3a368f72014-03-06 18:25:13 +0100137/*
Philipp Hachtmann3a368f72014-03-06 18:25:13 +0100138 * numa_init_late() - Initialization initcall
139 *
140 * Register NUMA nodes.
141 */
142static int __init numa_init_late(void)
143{
144 int nid;
145
146 for_each_online_node(nid)
147 register_one_node(nid);
148 return 0;
149}
Michael Holzheu2d0f76a2016-01-20 19:22:16 +0100150arch_initcall(numa_init_late);
Philipp Hachtmann3a368f72014-03-06 18:25:13 +0100151
152static int __init parse_debug(char *parm)
153{
154 numa_debug_enabled = 1;
155 return 0;
156}
157early_param("numa_debug", parse_debug);
158
159static int __init parse_numa(char *parm)
160{
161 if (strcmp(parm, numa_mode_plain.name) == 0)
162 mode = &numa_mode_plain;
Michael Holzheuc29a7ba2014-03-06 18:47:21 +0100163#ifdef CONFIG_NUMA_EMU
164 if (strcmp(parm, numa_mode_emu.name) == 0)
165 mode = &numa_mode_emu;
166#endif
Philipp Hachtmann3a368f72014-03-06 18:25:13 +0100167 return 0;
168}
169early_param("numa", parse_numa);