Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Joe Perches | 4068523 | 2009-12-09 10:45:34 -0800 | [diff] [blame] | 2 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 3 | |
Glauber de Oliveira Costa | 4fe29a8 | 2008-03-19 14:25:23 -0300 | [diff] [blame] | 4 | #include <linux/kernel.h> |
Paul Gortmaker | 523d0fb4 | 2016-11-14 14:04:43 -0500 | [diff] [blame] | 5 | #include <linux/export.h> |
Glauber de Oliveira Costa | 4fe29a8 | 2008-03-19 14:25:23 -0300 | [diff] [blame] | 6 | #include <linux/init.h> |
Mike Rapoport | 2013288 | 2018-10-30 15:09:21 -0700 | [diff] [blame] | 7 | #include <linux/memblock.h> |
Glauber de Oliveira Costa | 4fe29a8 | 2008-03-19 14:25:23 -0300 | [diff] [blame] | 8 | #include <linux/percpu.h> |
Bernhard Walle | 1ecd276 | 2008-06-20 15:38:22 +0200 | [diff] [blame] | 9 | #include <linux/kexec.h> |
Yinghai Lu | 17b4cce | 2008-06-21 21:02:20 -0700 | [diff] [blame] | 10 | #include <linux/crash_dump.h> |
Jaswinder Singh Rajput | 8a87dd9 | 2009-01-04 17:04:26 +0530 | [diff] [blame] | 11 | #include <linux/smp.h> |
| 12 | #include <linux/topology.h> |
Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 13 | #include <linux/pfn.h> |
Glauber de Oliveira Costa | 4fe29a8 | 2008-03-19 14:25:23 -0300 | [diff] [blame] | 14 | #include <asm/sections.h> |
| 15 | #include <asm/processor.h> |
Paul Gortmaker | 523d0fb4 | 2016-11-14 14:04:43 -0500 | [diff] [blame] | 16 | #include <asm/desc.h> |
Glauber de Oliveira Costa | 4fe29a8 | 2008-03-19 14:25:23 -0300 | [diff] [blame] | 17 | #include <asm/setup.h> |
Alexey Starikovskiy | 0fc0906 | 2008-04-04 23:40:48 +0400 | [diff] [blame] | 18 | #include <asm/mpspec.h> |
Alexey Starikovskiy | 76eb413 | 2008-04-04 23:40:41 +0400 | [diff] [blame] | 19 | #include <asm/apicdef.h> |
Bernhard Walle | 1ecd276 | 2008-06-20 15:38:22 +0200 | [diff] [blame] | 20 | #include <asm/highmem.h> |
Tejun Heo | 1a51e3a | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 21 | #include <asm/proto.h> |
Jaswinder Singh Rajput | 0687903 | 2009-01-10 12:17:37 +0530 | [diff] [blame] | 22 | #include <asm/cpumask.h> |
Brian Gerst | 34019be | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 23 | #include <asm/cpu.h> |
Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 24 | #include <asm/stackprotector.h> |
Alexey Starikovskiy | 76eb413 | 2008-04-04 23:40:41 +0400 | [diff] [blame] | 25 | |
Vlad Zolotarov | 0816b0f | 2012-06-11 12:56:52 +0300 | [diff] [blame] | 26 | DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number); |
Brian Gerst | ea92790 | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 27 | EXPORT_PER_CPU_SYMBOL(cpu_number); |
Brian Gerst | ea92790 | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 28 | |
Brian Gerst | 1688401 | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 29 | #ifdef CONFIG_X86_64 |
| 30 | #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) |
| 31 | #else |
| 32 | #define BOOT_PERCPU_OFFSET 0 |
| 33 | #endif |
| 34 | |
Jan Beulich | 2c773dd | 2014-11-04 08:26:42 +0000 | [diff] [blame] | 35 | DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; |
Brian Gerst | 1688401 | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 36 | EXPORT_PER_CPU_SYMBOL(this_cpu_off); |
| 37 | |
Kees Cook | 404f6aa | 2016-08-08 16:29:06 -0700 | [diff] [blame] | 38 | unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = { |
Brian Gerst | 34019be | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 39 | [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, |
Tejun Heo | 9939dda | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 40 | }; |
Tejun Heo | 9939dda | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 41 | EXPORT_SYMBOL(__per_cpu_offset); |
Glauber de Oliveira Costa | 4fe29a8 | 2008-03-19 14:25:23 -0300 | [diff] [blame] | 42 | |
Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 43 | /* |
| 44 | * On x86_64 symbols referenced from code should be reachable using |
| 45 | * 32bit relocations. Reserve space for static percpu variables in |
| 46 | * modules so that they are always served from the first chunk which |
| 47 | * is located at the percpu segment base. On x86_32, anything can |
| 48 | * address anywhere. No need to reserve space in the first chunk. |
| 49 | */ |
| 50 | #ifdef CONFIG_X86_64 |
| 51 | #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE |
| 52 | #else |
| 53 | #define PERCPU_FIRST_CHUNK_RESERVE 0 |
| 54 | #endif |
| 55 | |
Tejun Heo | 4518e6a | 2009-08-14 15:00:52 +0900 | [diff] [blame] | 56 | #ifdef CONFIG_X86_32 |
Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 57 | /** |
Tejun Heo | 89c9215 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 58 | * pcpu_need_numa - determine percpu allocation needs to consider NUMA |
| 59 | * |
| 60 | * If NUMA is not configured or there is only one NUMA node available, |
| 61 | * there is no reason to consider NUMA. This function determines |
| 62 | * whether percpu allocation should consider NUMA or not. |
| 63 | * |
| 64 | * RETURNS: |
| 65 | * true if NUMA should be considered; otherwise, false. |
| 66 | */ |
| 67 | static bool __init pcpu_need_numa(void) |
| 68 | { |
Mike Rapoport | a9ee6cf | 2021-06-28 19:43:01 -0700 | [diff] [blame] | 69 | #ifdef CONFIG_NUMA |
Tejun Heo | 89c9215 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 70 | pg_data_t *last = NULL; |
| 71 | unsigned int cpu; |
| 72 | |
| 73 | for_each_possible_cpu(cpu) { |
| 74 | int node = early_cpu_to_node(cpu); |
| 75 | |
| 76 | if (node_online(node) && NODE_DATA(node) && |
| 77 | last && last != NODE_DATA(node)) |
| 78 | return true; |
| 79 | |
| 80 | last = NODE_DATA(node); |
| 81 | } |
| 82 | #endif |
| 83 | return false; |
| 84 | } |
Tejun Heo | 4518e6a | 2009-08-14 15:00:52 +0900 | [diff] [blame] | 85 | #endif |
Tejun Heo | 89c9215 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 86 | |
| 87 | /** |
Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 88 | * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu |
| 89 | * @cpu: cpu to allocate for |
| 90 | * @size: size allocation in bytes |
| 91 | * @align: alignment |
| 92 | * |
| 93 | * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper |
| 94 | * does the right thing for NUMA regardless of the current |
| 95 | * configuration. |
| 96 | * |
| 97 | * RETURNS: |
| 98 | * Pointer to the allocated area on success, NULL on failure. |
| 99 | */ |
| 100 | static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, |
| 101 | unsigned long align) |
| 102 | { |
| 103 | const unsigned long goal = __pa(MAX_DMA_ADDRESS); |
Mike Rapoport | a9ee6cf | 2021-06-28 19:43:01 -0700 | [diff] [blame] | 104 | #ifdef CONFIG_NUMA |
Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 105 | int node = early_cpu_to_node(cpu); |
| 106 | void *ptr; |
| 107 | |
| 108 | if (!node_online(node) || !NODE_DATA(node)) { |
Mike Rapoport | 26fb3da | 2019-03-11 23:30:42 -0700 | [diff] [blame] | 109 | ptr = memblock_alloc_from(size, align, goal); |
Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 110 | pr_info("cpu %d has no node %d or node-local memory\n", |
| 111 | cpu, node); |
| 112 | pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", |
| 113 | cpu, size, __pa(ptr)); |
| 114 | } else { |
Mike Rapoport | 26fb3da | 2019-03-11 23:30:42 -0700 | [diff] [blame] | 115 | ptr = memblock_alloc_try_nid(size, align, goal, |
| 116 | MEMBLOCK_ALLOC_ACCESSIBLE, |
| 117 | node); |
Mike Rapoport | bf2886e | 2018-10-30 15:08:18 -0700 | [diff] [blame] | 118 | |
Joe Perches | 4068523 | 2009-12-09 10:45:34 -0800 | [diff] [blame] | 119 | pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n", |
| 120 | cpu, size, node, __pa(ptr)); |
Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 121 | } |
| 122 | return ptr; |
| 123 | #else |
Mike Rapoport | 26fb3da | 2019-03-11 23:30:42 -0700 | [diff] [blame] | 124 | return memblock_alloc_from(size, align, goal); |
Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 125 | #endif |
| 126 | } |
| 127 | |
| 128 | /* |
Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 129 | * Helpers for first chunk memory allocation |
| 130 | */ |
Tejun Heo | 3cbc856 | 2009-08-14 15:00:50 +0900 | [diff] [blame] | 131 | static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) |
Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 132 | { |
Tejun Heo | 3cbc856 | 2009-08-14 15:00:50 +0900 | [diff] [blame] | 133 | return pcpu_alloc_bootmem(cpu, size, align); |
Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 134 | } |
| 135 | |
| 136 | static void __init pcpu_fc_free(void *ptr, size_t size) |
| 137 | { |
Mike Rapoport | 4421cca | 2021-11-05 13:43:22 -0700 | [diff] [blame] | 138 | memblock_free(ptr, size); |
Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 139 | } |
| 140 | |
Tejun Heo | 4518e6a | 2009-08-14 15:00:52 +0900 | [diff] [blame] | 141 | static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) |
| 142 | { |
Mike Rapoport | a9ee6cf | 2021-06-28 19:43:01 -0700 | [diff] [blame] | 143 | #ifdef CONFIG_NUMA |
Tejun Heo | a530b79 | 2009-07-04 08:11:00 +0900 | [diff] [blame] | 144 | if (early_cpu_to_node(from) == early_cpu_to_node(to)) |
| 145 | return LOCAL_DISTANCE; |
| 146 | else |
| 147 | return REMOTE_DISTANCE; |
Tejun Heo | 8ac8375 | 2009-02-24 11:57:22 +0900 | [diff] [blame] | 148 | #else |
Tejun Heo | 4518e6a | 2009-08-14 15:00:52 +0900 | [diff] [blame] | 149 | return LOCAL_DISTANCE; |
Tejun Heo | 8ac8375 | 2009-02-24 11:57:22 +0900 | [diff] [blame] | 150 | #endif |
Tejun Heo | 89c9215 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 151 | } |
| 152 | |
Tejun Heo | 00ae406 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 153 | static void __init pcpup_populate_pte(unsigned long addr) |
Tejun Heo | 458a3e6 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 154 | { |
| 155 | populate_extra_pte(addr); |
| 156 | } |
| 157 | |
Brian Gerst | b2d2f43 | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 158 | static inline void setup_percpu_segment(int cpu) |
| 159 | { |
| 160 | #ifdef CONFIG_X86_32 |
Thomas Gleixner | 1dd439f | 2017-08-28 08:47:38 +0200 | [diff] [blame] | 161 | struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu), |
| 162 | 0xFFFFF); |
Brian Gerst | b2d2f43 | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 163 | |
Thomas Gleixner | 1dd439f | 2017-08-28 08:47:38 +0200 | [diff] [blame] | 164 | write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S); |
Brian Gerst | b2d2f43 | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 165 | #endif |
| 166 | } |
| 167 | |
Glauber de Oliveira Costa | 4fe29a8 | 2008-03-19 14:25:23 -0300 | [diff] [blame] | 168 | void __init setup_per_cpu_areas(void) |
| 169 | { |
Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 170 | unsigned int cpu; |
Tejun Heo | 1112441 | 2009-02-20 16:29:09 +0900 | [diff] [blame] | 171 | unsigned long delta; |
Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 172 | int rc; |
Mike Travis | a168196 | 2008-12-16 17:33:53 -0800 | [diff] [blame] | 173 | |
Alexey Dobriyan | b9726c2 | 2019-03-05 15:48:26 -0800 | [diff] [blame] | 174 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%u\n", |
Mike Travis | a168196 | 2008-12-16 17:33:53 -0800 | [diff] [blame] | 175 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); |
| 176 | |
Tejun Heo | 8ac8375 | 2009-02-24 11:57:22 +0900 | [diff] [blame] | 177 | /* |
Tejun Heo | 4518e6a | 2009-08-14 15:00:52 +0900 | [diff] [blame] | 178 | * Allocate percpu area. Embedding allocator is our favorite; |
| 179 | * however, on NUMA configurations, it can result in very |
| 180 | * sparse unit mapping and vmalloc area isn't spacious enough |
| 181 | * on 32bit. Use page in that case. |
Tejun Heo | 8ac8375 | 2009-02-24 11:57:22 +0900 | [diff] [blame] | 182 | */ |
Tejun Heo | 4518e6a | 2009-08-14 15:00:52 +0900 | [diff] [blame] | 183 | #ifdef CONFIG_X86_32 |
| 184 | if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa()) |
| 185 | pcpu_chosen_fc = PCPU_FC_PAGE; |
| 186 | #endif |
Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 187 | rc = -EINVAL; |
Tejun Heo | 4518e6a | 2009-08-14 15:00:52 +0900 | [diff] [blame] | 188 | if (pcpu_chosen_fc != PCPU_FC_PAGE) { |
Tejun Heo | 4518e6a | 2009-08-14 15:00:52 +0900 | [diff] [blame] | 189 | const size_t dyn_size = PERCPU_MODULE_RESERVE + |
| 190 | PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE; |
Tejun Heo | d5e2800 | 2012-04-27 10:54:35 -0700 | [diff] [blame] | 191 | size_t atom_size; |
Tejun Heo | f58dc01 | 2009-08-14 15:00:50 +0900 | [diff] [blame] | 192 | |
Tejun Heo | d5e2800 | 2012-04-27 10:54:35 -0700 | [diff] [blame] | 193 | /* |
| 194 | * On 64bit, use PMD_SIZE for atom_size so that embedded |
| 195 | * percpu areas are aligned to PMD. This, in the future, |
| 196 | * can also allow using PMD mappings in vmalloc area. Use |
| 197 | * PAGE_SIZE on 32bit as vmalloc space is highly contended |
| 198 | * and large vmalloc area allocs can easily fail. |
| 199 | */ |
| 200 | #ifdef CONFIG_X86_64 |
| 201 | atom_size = PMD_SIZE; |
| 202 | #else |
| 203 | atom_size = PAGE_SIZE; |
| 204 | #endif |
Tejun Heo | 4518e6a | 2009-08-14 15:00:52 +0900 | [diff] [blame] | 205 | rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, |
| 206 | dyn_size, atom_size, |
| 207 | pcpu_cpu_distance, |
| 208 | pcpu_fc_alloc, pcpu_fc_free); |
Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 209 | if (rc < 0) |
Kefeng Wang | 8d3bcc4 | 2019-10-18 11:18:24 +0800 | [diff] [blame] | 210 | pr_warn("%s allocator failed (%d), falling back to page size\n", |
| 211 | pcpu_fc_names[pcpu_chosen_fc], rc); |
Tejun Heo | fa8a709 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 212 | } |
Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 213 | if (rc < 0) |
Tejun Heo | 4518e6a | 2009-08-14 15:00:52 +0900 | [diff] [blame] | 214 | rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, |
| 215 | pcpu_fc_alloc, pcpu_fc_free, |
| 216 | pcpup_populate_pte); |
Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 217 | if (rc < 0) |
| 218 | panic("cannot initialize percpu area (err=%d)", rc); |
Tejun Heo | 1a51e3a | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 219 | |
Tejun Heo | 5f5d840 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 220 | /* alrighty, percpu areas up and running */ |
Tejun Heo | 1112441 | 2009-02-20 16:29:09 +0900 | [diff] [blame] | 221 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
| 222 | for_each_possible_cpu(cpu) { |
Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 223 | per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; |
Brian Gerst | 26f80bd | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 224 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); |
Brian Gerst | ea92790 | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 225 | per_cpu(cpu_number, cpu) = cpu; |
Brian Gerst | b2d2f43 | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 226 | setup_percpu_segment(cpu); |
Brian Gerst | 0d77e7f | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 227 | /* |
Tejun Heo | cf3997f | 2009-01-27 14:25:05 +0900 | [diff] [blame] | 228 | * Copy data used in early init routines from the |
| 229 | * initial arrays to the per cpu data areas. These |
| 230 | * arrays then become expendable and the *_early_ptr's |
| 231 | * are zeroed indicating that the static arrays are |
| 232 | * gone. |
Brian Gerst | 0d77e7f | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 233 | */ |
Brian Gerst | ec70de8 | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 234 | #ifdef CONFIG_X86_LOCAL_APIC |
Brian Gerst | 0d77e7f | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 235 | per_cpu(x86_cpu_to_apicid, cpu) = |
Tejun Heo | cf3997f | 2009-01-27 14:25:05 +0900 | [diff] [blame] | 236 | early_per_cpu_map(x86_cpu_to_apicid, cpu); |
Brian Gerst | 0d77e7f | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 237 | per_cpu(x86_bios_cpu_apicid, cpu) = |
Tejun Heo | cf3997f | 2009-01-27 14:25:05 +0900 | [diff] [blame] | 238 | early_per_cpu_map(x86_bios_cpu_apicid, cpu); |
Vitaly Kuznetsov | 3e9e57f | 2016-06-30 17:56:36 +0200 | [diff] [blame] | 239 | per_cpu(x86_cpu_to_acpiid, cpu) = |
| 240 | early_per_cpu_map(x86_cpu_to_acpiid, cpu); |
Brian Gerst | ec70de8 | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 241 | #endif |
Tejun Heo | 4c321ff | 2011-01-23 14:37:30 +0100 | [diff] [blame] | 242 | #ifdef CONFIG_X86_32 |
| 243 | per_cpu(x86_cpu_to_logical_apicid, cpu) = |
| 244 | early_per_cpu_map(x86_cpu_to_logical_apicid, cpu); |
| 245 | #endif |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 246 | #ifdef CONFIG_NUMA |
| 247 | per_cpu(x86_cpu_to_node_map, cpu) = |
Tejun Heo | cf3997f | 2009-01-27 14:25:05 +0900 | [diff] [blame] | 248 | early_per_cpu_map(x86_cpu_to_node_map, cpu); |
Yinghai Lu | 9aebbdb | 2010-07-20 13:24:30 -0700 | [diff] [blame] | 249 | /* |
Linus Torvalds | a4ce96a | 2010-07-21 09:25:42 -0700 | [diff] [blame] | 250 | * Ensure that the boot cpu numa_node is correct when the boot |
Yinghai Lu | 9aebbdb | 2010-07-20 13:24:30 -0700 | [diff] [blame] | 251 | * cpu is on a node that doesn't have memory installed. |
| 252 | * Also cpu_up() will call cpu_to_node() for APs when |
| 253 | * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set |
| 254 | * up later with c_init aka intel_init/amd_init. |
| 255 | * So set them all (boot cpu and all APs). |
| 256 | */ |
| 257 | set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 258 | #endif |
Tejun Heo | 1a51e3a | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 259 | /* |
Denys Vlasenko | c273fb3 | 2010-02-20 01:03:53 +0100 | [diff] [blame] | 260 | * Up to this point, the boot CPU has been using .init.data |
Brian Gerst | 2697fbd | 2009-01-27 12:56:48 +0900 | [diff] [blame] | 261 | * area. Reload any changed state for the boot CPU. |
Tejun Heo | 1a51e3a | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 262 | */ |
Robert Richter | f6e9456c | 2010-07-21 19:03:58 +0200 | [diff] [blame] | 263 | if (!cpu) |
Brian Gerst | 552be87 | 2009-01-30 17:47:53 +0900 | [diff] [blame] | 264 | switch_to_new_gdt(cpu); |
Glauber de Oliveira Costa | 4fe29a8 | 2008-03-19 14:25:23 -0300 | [diff] [blame] | 265 | } |
| 266 | |
Brian Gerst | 0d77e7f | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 267 | /* indicate the early static arrays will soon be gone */ |
James Bottomley | 22f2513 | 2009-01-27 14:21:37 +0900 | [diff] [blame] | 268 | #ifdef CONFIG_X86_LOCAL_APIC |
Brian Gerst | 0d77e7f | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 269 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; |
| 270 | early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; |
Vitaly Kuznetsov | 3e9e57f | 2016-06-30 17:56:36 +0200 | [diff] [blame] | 271 | early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL; |
James Bottomley | 22f2513 | 2009-01-27 14:21:37 +0900 | [diff] [blame] | 272 | #endif |
Tejun Heo | 4c321ff | 2011-01-23 14:37:30 +0100 | [diff] [blame] | 273 | #ifdef CONFIG_X86_32 |
| 274 | early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL; |
| 275 | #endif |
Tejun Heo | 645a791 | 2011-01-23 14:37:40 +0100 | [diff] [blame] | 276 | #ifdef CONFIG_NUMA |
Brian Gerst | 0d77e7f | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 277 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; |
| 278 | #endif |
Mike Travis | 9f0e8d0 | 2008-04-04 18:11:01 -0700 | [diff] [blame] | 279 | |
Mike Travis | 9f248bd | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 280 | /* Setup node to cpumask map */ |
| 281 | setup_node_to_cpumask_map(); |
Mike Travis | c2d1cec | 2009-01-04 05:18:03 -0800 | [diff] [blame] | 282 | |
| 283 | /* Setup cpu initialized, callin, callout masks */ |
| 284 | setup_cpu_local_masks(); |
Andy Lutomirski | 23b2a4d | 2017-03-22 14:32:32 -0700 | [diff] [blame] | 285 | |
Andy Lutomirski | 23b2a4d | 2017-03-22 14:32:32 -0700 | [diff] [blame] | 286 | /* |
Andy Lutomirski | d2b6dc6 | 2017-05-08 17:09:10 -0700 | [diff] [blame] | 287 | * Sync back kernel address range again. We already did this in |
| 288 | * setup_arch(), but percpu data also needs to be available in |
Joerg Roedel | 7f0a002 | 2020-06-01 21:52:40 -0700 | [diff] [blame] | 289 | * the smpboot asm and arch_sync_kernel_mappings() doesn't sync to |
| 290 | * swapper_pg_dir on 32-bit. The per-cpu mappings need to be available |
| 291 | * there too. |
Thomas Gleixner | 945fd17 | 2018-02-28 21:14:26 +0100 | [diff] [blame] | 292 | * |
| 293 | * FIXME: Can the later sync in setup_cpu_entry_areas() replace |
| 294 | * this call? |
Andy Lutomirski | 23b2a4d | 2017-03-22 14:32:32 -0700 | [diff] [blame] | 295 | */ |
Thomas Gleixner | 945fd17 | 2018-02-28 21:14:26 +0100 | [diff] [blame] | 296 | sync_initial_page_table(); |
Glauber de Oliveira Costa | 4fe29a8 | 2008-03-19 14:25:23 -0300 | [diff] [blame] | 297 | } |