Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 2 | /* |
| 3 | * NUMA support for s390 |
| 4 | * |
| 5 | * Implement NUMA core code. |
| 6 | * |
| 7 | * Copyright IBM Corp. 2015 |
| 8 | */ |
| 9 | |
| 10 | #define KMSG_COMPONENT "numa" |
| 11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
| 12 | |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/mmzone.h> |
| 15 | #include <linux/cpumask.h> |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 16 | #include <linux/memblock.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/node.h> |
| 19 | |
| 20 | #include <asm/numa.h> |
| 21 | #include "numa_mode.h" |
| 22 | |
| 23 | pg_data_t *node_data[MAX_NUMNODES]; |
| 24 | EXPORT_SYMBOL(node_data); |
| 25 | |
Martin Schwidefsky | 22be9cd | 2015-09-22 14:21:16 +0200 | [diff] [blame] | 26 | cpumask_t node_to_cpumask_map[MAX_NUMNODES]; |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 27 | EXPORT_SYMBOL(node_to_cpumask_map); |
| 28 | |
Heiko Carstens | ef4423ce | 2016-07-28 18:14:29 +0200 | [diff] [blame] | 29 | static void plain_setup(void) |
| 30 | { |
| 31 | node_set(0, node_possible_map); |
| 32 | } |
| 33 | |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 34 | const struct numa_mode numa_mode_plain = { |
| 35 | .name = "plain", |
Heiko Carstens | ef4423ce | 2016-07-28 18:14:29 +0200 | [diff] [blame] | 36 | .setup = plain_setup, |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 37 | }; |
| 38 | |
| 39 | static const struct numa_mode *mode = &numa_mode_plain; |
| 40 | |
| 41 | int numa_pfn_to_nid(unsigned long pfn) |
| 42 | { |
| 43 | return mode->__pfn_to_nid ? mode->__pfn_to_nid(pfn) : 0; |
| 44 | } |
| 45 | |
| 46 | void numa_update_cpu_topology(void) |
| 47 | { |
| 48 | if (mode->update_cpu_topology) |
| 49 | mode->update_cpu_topology(); |
| 50 | } |
| 51 | |
| 52 | int __node_distance(int a, int b) |
| 53 | { |
| 54 | return mode->distance ? mode->distance(a, b) : 0; |
| 55 | } |
Justin M. Forbes | a541f0e | 2018-10-31 13:02:03 -0500 | [diff] [blame] | 56 | EXPORT_SYMBOL(__node_distance); |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 57 | |
| 58 | int numa_debug_enabled; |
| 59 | |
| 60 | /* |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 61 | * numa_setup_memory() - Assign bootmem to nodes |
| 62 | * |
| 63 | * The memory is first added to memblock without any respect to nodes. |
| 64 | * This is fixed before remaining memblock memory is handed over to the |
| 65 | * buddy allocator. |
| 66 | * An important side effect is that large bootmem allocations might easily |
| 67 | * cross node boundaries, which can be needed for large allocations with |
| 68 | * smaller memory stripes in each node (i.e. when using NUMA emulation). |
| 69 | * |
| 70 | * Memory defines nodes: |
| 71 | * Therefore this routine also sets the nodes online with memory. |
| 72 | */ |
| 73 | static void __init numa_setup_memory(void) |
| 74 | { |
| 75 | unsigned long cur_base, align, end_of_dram; |
| 76 | int nid = 0; |
| 77 | |
| 78 | end_of_dram = memblock_end_of_DRAM(); |
| 79 | align = mode->align ? mode->align() : ULONG_MAX; |
| 80 | |
| 81 | /* |
| 82 | * Step through all available memory and assign it to the nodes |
| 83 | * indicated by the mode implementation. |
| 84 | * All nodes which are seen here will be set online. |
| 85 | */ |
| 86 | cur_base = 0; |
| 87 | do { |
| 88 | nid = numa_pfn_to_nid(PFN_DOWN(cur_base)); |
| 89 | node_set_online(nid); |
| 90 | memblock_set_node(cur_base, align, &memblock.memory, nid); |
| 91 | cur_base += align; |
| 92 | } while (cur_base < end_of_dram); |
| 93 | |
| 94 | /* Allocate and fill out node_data */ |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 95 | for (nid = 0; nid < MAX_NUMNODES; nid++) { |
Mike Rapoport | c2938ee | 2019-03-07 16:31:10 -0800 | [diff] [blame] | 96 | NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 97 | if (!NODE_DATA(nid)) |
| 98 | panic("%s: Failed to allocate %zu bytes align=0x%x\n", |
| 99 | __func__, sizeof(pg_data_t), 8); |
| 100 | } |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 101 | |
| 102 | for_each_online_node(nid) { |
| 103 | unsigned long start_pfn, end_pfn; |
| 104 | unsigned long t_start, t_end; |
| 105 | int i; |
| 106 | |
| 107 | start_pfn = ULONG_MAX; |
| 108 | end_pfn = 0; |
| 109 | for_each_mem_pfn_range(i, nid, &t_start, &t_end, NULL) { |
| 110 | if (t_start < start_pfn) |
| 111 | start_pfn = t_start; |
| 112 | if (t_end > end_pfn) |
| 113 | end_pfn = t_end; |
| 114 | } |
| 115 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; |
| 116 | NODE_DATA(nid)->node_id = nid; |
| 117 | } |
| 118 | } |
| 119 | |
| 120 | /* |
| 121 | * numa_setup() - Earliest initialization |
| 122 | * |
| 123 | * Assign the mode and call the mode's setup routine. |
| 124 | */ |
| 125 | void __init numa_setup(void) |
| 126 | { |
| 127 | pr_info("NUMA mode: %s\n", mode->name); |
Heiko Carstens | ef4423ce | 2016-07-28 18:14:29 +0200 | [diff] [blame] | 128 | nodes_clear(node_possible_map); |
Martin Schwidefsky | fb7d751 | 2018-07-31 16:14:18 +0200 | [diff] [blame] | 129 | /* Initially attach all possible CPUs to node 0. */ |
| 130 | cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 131 | if (mode->setup) |
| 132 | mode->setup(); |
| 133 | numa_setup_memory(); |
| 134 | memblock_dump_all(); |
| 135 | } |
| 136 | |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 137 | /* |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 138 | * numa_init_late() - Initialization initcall |
| 139 | * |
| 140 | * Register NUMA nodes. |
| 141 | */ |
| 142 | static int __init numa_init_late(void) |
| 143 | { |
| 144 | int nid; |
| 145 | |
| 146 | for_each_online_node(nid) |
| 147 | register_one_node(nid); |
| 148 | return 0; |
| 149 | } |
Michael Holzheu | 2d0f76a | 2016-01-20 19:22:16 +0100 | [diff] [blame] | 150 | arch_initcall(numa_init_late); |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 151 | |
| 152 | static int __init parse_debug(char *parm) |
| 153 | { |
| 154 | numa_debug_enabled = 1; |
| 155 | return 0; |
| 156 | } |
| 157 | early_param("numa_debug", parse_debug); |
| 158 | |
| 159 | static int __init parse_numa(char *parm) |
| 160 | { |
| 161 | if (strcmp(parm, numa_mode_plain.name) == 0) |
| 162 | mode = &numa_mode_plain; |
Michael Holzheu | c29a7ba | 2014-03-06 18:47:21 +0100 | [diff] [blame] | 163 | #ifdef CONFIG_NUMA_EMU |
| 164 | if (strcmp(parm, numa_mode_emu.name) == 0) |
| 165 | mode = &numa_mode_emu; |
| 166 | #endif |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 167 | return 0; |
| 168 | } |
| 169 | early_param("numa", parse_numa); |