Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 1 | /* |
| 2 | * NUMA support for s390 |
| 3 | * |
| 4 | * Implement NUMA core code. |
| 5 | * |
| 6 | * Copyright IBM Corp. 2015 |
| 7 | */ |
| 8 | |
| 9 | #define KMSG_COMPONENT "numa" |
| 10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
| 11 | |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/mmzone.h> |
| 14 | #include <linux/cpumask.h> |
| 15 | #include <linux/bootmem.h> |
| 16 | #include <linux/memblock.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/node.h> |
| 19 | |
| 20 | #include <asm/numa.h> |
| 21 | #include "numa_mode.h" |
| 22 | |
| 23 | pg_data_t *node_data[MAX_NUMNODES]; |
| 24 | EXPORT_SYMBOL(node_data); |
| 25 | |
Martin Schwidefsky | 22be9cd | 2015-09-22 14:21:16 +0200 | [diff] [blame] | 26 | cpumask_t node_to_cpumask_map[MAX_NUMNODES]; |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 27 | EXPORT_SYMBOL(node_to_cpumask_map); |
| 28 | |
Heiko Carstens | ef4423ce | 2016-07-28 18:14:29 +0200 | [diff] [blame] | 29 | static void plain_setup(void) |
| 30 | { |
| 31 | node_set(0, node_possible_map); |
| 32 | } |
| 33 | |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 34 | const struct numa_mode numa_mode_plain = { |
| 35 | .name = "plain", |
Heiko Carstens | ef4423ce | 2016-07-28 18:14:29 +0200 | [diff] [blame] | 36 | .setup = plain_setup, |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 37 | }; |
| 38 | |
| 39 | static const struct numa_mode *mode = &numa_mode_plain; |
| 40 | |
| 41 | int numa_pfn_to_nid(unsigned long pfn) |
| 42 | { |
| 43 | return mode->__pfn_to_nid ? mode->__pfn_to_nid(pfn) : 0; |
| 44 | } |
| 45 | |
| 46 | void numa_update_cpu_topology(void) |
| 47 | { |
| 48 | if (mode->update_cpu_topology) |
| 49 | mode->update_cpu_topology(); |
| 50 | } |
| 51 | |
| 52 | int __node_distance(int a, int b) |
| 53 | { |
| 54 | return mode->distance ? mode->distance(a, b) : 0; |
| 55 | } |
| 56 | |
| 57 | int numa_debug_enabled; |
| 58 | |
| 59 | /* |
| 60 | * alloc_node_data() - Allocate node data |
| 61 | */ |
| 62 | static __init pg_data_t *alloc_node_data(void) |
| 63 | { |
| 64 | pg_data_t *res; |
| 65 | |
Heiko Carstens | ef1f7fd | 2016-01-15 14:50:25 +0100 | [diff] [blame] | 66 | res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 8); |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 67 | memset(res, 0, sizeof(pg_data_t)); |
| 68 | return res; |
| 69 | } |
| 70 | |
| 71 | /* |
| 72 | * numa_setup_memory() - Assign bootmem to nodes |
| 73 | * |
| 74 | * The memory is first added to memblock without any respect to nodes. |
| 75 | * This is fixed before remaining memblock memory is handed over to the |
| 76 | * buddy allocator. |
| 77 | * An important side effect is that large bootmem allocations might easily |
| 78 | * cross node boundaries, which can be needed for large allocations with |
| 79 | * smaller memory stripes in each node (i.e. when using NUMA emulation). |
| 80 | * |
| 81 | * Memory defines nodes: |
| 82 | * Therefore this routine also sets the nodes online with memory. |
| 83 | */ |
| 84 | static void __init numa_setup_memory(void) |
| 85 | { |
| 86 | unsigned long cur_base, align, end_of_dram; |
| 87 | int nid = 0; |
| 88 | |
| 89 | end_of_dram = memblock_end_of_DRAM(); |
| 90 | align = mode->align ? mode->align() : ULONG_MAX; |
| 91 | |
| 92 | /* |
| 93 | * Step through all available memory and assign it to the nodes |
| 94 | * indicated by the mode implementation. |
| 95 | * All nodes which are seen here will be set online. |
| 96 | */ |
| 97 | cur_base = 0; |
| 98 | do { |
| 99 | nid = numa_pfn_to_nid(PFN_DOWN(cur_base)); |
| 100 | node_set_online(nid); |
| 101 | memblock_set_node(cur_base, align, &memblock.memory, nid); |
| 102 | cur_base += align; |
| 103 | } while (cur_base < end_of_dram); |
| 104 | |
| 105 | /* Allocate and fill out node_data */ |
| 106 | for (nid = 0; nid < MAX_NUMNODES; nid++) |
| 107 | NODE_DATA(nid) = alloc_node_data(); |
| 108 | |
| 109 | for_each_online_node(nid) { |
| 110 | unsigned long start_pfn, end_pfn; |
| 111 | unsigned long t_start, t_end; |
| 112 | int i; |
| 113 | |
| 114 | start_pfn = ULONG_MAX; |
| 115 | end_pfn = 0; |
| 116 | for_each_mem_pfn_range(i, nid, &t_start, &t_end, NULL) { |
| 117 | if (t_start < start_pfn) |
| 118 | start_pfn = t_start; |
| 119 | if (t_end > end_pfn) |
| 120 | end_pfn = t_end; |
| 121 | } |
| 122 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; |
| 123 | NODE_DATA(nid)->node_id = nid; |
| 124 | } |
| 125 | } |
| 126 | |
| 127 | /* |
| 128 | * numa_setup() - Earliest initialization |
| 129 | * |
| 130 | * Assign the mode and call the mode's setup routine. |
| 131 | */ |
| 132 | void __init numa_setup(void) |
| 133 | { |
| 134 | pr_info("NUMA mode: %s\n", mode->name); |
Heiko Carstens | ef4423ce | 2016-07-28 18:14:29 +0200 | [diff] [blame] | 135 | nodes_clear(node_possible_map); |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 136 | if (mode->setup) |
| 137 | mode->setup(); |
| 138 | numa_setup_memory(); |
| 139 | memblock_dump_all(); |
| 140 | } |
| 141 | |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 142 | /* |
| 143 | * numa_init_early() - Initialization initcall |
| 144 | * |
| 145 | * This runs when only one CPU is online and before the first |
| 146 | * topology update is called for by the scheduler. |
| 147 | */ |
| 148 | static int __init numa_init_early(void) |
| 149 | { |
| 150 | /* Attach all possible CPUs to node 0 for now. */ |
Martin Schwidefsky | 22be9cd | 2015-09-22 14:21:16 +0200 | [diff] [blame] | 151 | cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 152 | return 0; |
| 153 | } |
| 154 | early_initcall(numa_init_early); |
| 155 | |
| 156 | /* |
| 157 | * numa_init_late() - Initialization initcall |
| 158 | * |
| 159 | * Register NUMA nodes. |
| 160 | */ |
| 161 | static int __init numa_init_late(void) |
| 162 | { |
| 163 | int nid; |
| 164 | |
| 165 | for_each_online_node(nid) |
| 166 | register_one_node(nid); |
| 167 | return 0; |
| 168 | } |
Michael Holzheu | 2d0f76a | 2016-01-20 19:22:16 +0100 | [diff] [blame] | 169 | arch_initcall(numa_init_late); |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 170 | |
| 171 | static int __init parse_debug(char *parm) |
| 172 | { |
| 173 | numa_debug_enabled = 1; |
| 174 | return 0; |
| 175 | } |
| 176 | early_param("numa_debug", parse_debug); |
| 177 | |
| 178 | static int __init parse_numa(char *parm) |
| 179 | { |
| 180 | if (strcmp(parm, numa_mode_plain.name) == 0) |
| 181 | mode = &numa_mode_plain; |
Michael Holzheu | c29a7ba | 2014-03-06 18:47:21 +0100 | [diff] [blame] | 182 | #ifdef CONFIG_NUMA_EMU |
| 183 | if (strcmp(parm, numa_mode_emu.name) == 0) |
| 184 | mode = &numa_mode_emu; |
| 185 | #endif |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 186 | return 0; |
| 187 | } |
| 188 | early_param("numa", parse_numa); |