blob: 7b65275544b2c3dc7adc6b43c4f7681288e0e75e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Joe Perches40685232009-12-09 10:45:34 -08002#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -03004#include <linux/kernel.h>
Paul Gortmaker523d0fb42016-11-14 14:04:43 -05005#include <linux/export.h>
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -03006#include <linux/init.h>
Mike Rapoport20132882018-10-30 15:09:21 -07007#include <linux/memblock.h>
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -03008#include <linux/percpu.h>
Bernhard Walle1ecd2762008-06-20 15:38:22 +02009#include <linux/kexec.h>
Yinghai Lu17b4cce2008-06-21 21:02:20 -070010#include <linux/crash_dump.h>
Jaswinder Singh Rajput8a87dd92009-01-04 17:04:26 +053011#include <linux/smp.h>
12#include <linux/topology.h>
Tejun Heo5f5d8402009-02-24 11:57:21 +090013#include <linux/pfn.h>
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030014#include <asm/sections.h>
15#include <asm/processor.h>
Paul Gortmaker523d0fb42016-11-14 14:04:43 -050016#include <asm/desc.h>
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030017#include <asm/setup.h>
Alexey Starikovskiy0fc09062008-04-04 23:40:48 +040018#include <asm/mpspec.h>
Alexey Starikovskiy76eb4132008-04-04 23:40:41 +040019#include <asm/apicdef.h>
Bernhard Walle1ecd2762008-06-20 15:38:22 +020020#include <asm/highmem.h>
Tejun Heo1a51e3a2009-01-13 20:41:35 +090021#include <asm/proto.h>
Jaswinder Singh Rajput06879032009-01-10 12:17:37 +053022#include <asm/cpumask.h>
Brian Gerst34019be2009-01-27 12:56:48 +090023#include <asm/cpu.h>
Tejun Heo60a53172009-02-09 22:17:40 +090024#include <asm/stackprotector.h>
Alexey Starikovskiy76eb4132008-04-04 23:40:41 +040025
Vlad Zolotarov0816b0f2012-06-11 12:56:52 +030026DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
Brian Gerstea927902009-01-19 00:38:58 +090027EXPORT_PER_CPU_SYMBOL(cpu_number);
Brian Gerstea927902009-01-19 00:38:58 +090028
Brian Gerst16884012009-01-27 12:56:48 +090029#ifdef CONFIG_X86_64
30#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
31#else
32#define BOOT_PERCPU_OFFSET 0
33#endif
34
Jan Beulich2c773dd2014-11-04 08:26:42 +000035DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
Brian Gerst16884012009-01-27 12:56:48 +090036EXPORT_PER_CPU_SYMBOL(this_cpu_off);
37
Kees Cook404f6aa2016-08-08 16:29:06 -070038unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = {
Brian Gerst34019be2009-01-27 12:56:48 +090039 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
Tejun Heo9939dda2009-01-13 20:41:35 +090040};
Tejun Heo9939dda2009-01-13 20:41:35 +090041EXPORT_SYMBOL(__per_cpu_offset);
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -030042
Tejun Heo6b19b0c2009-03-06 14:33:59 +090043/*
44 * On x86_64 symbols referenced from code should be reachable using
45 * 32bit relocations. Reserve space for static percpu variables in
46 * modules so that they are always served from the first chunk which
47 * is located at the percpu segment base. On x86_32, anything can
48 * address anywhere. No need to reserve space in the first chunk.
49 */
50#ifdef CONFIG_X86_64
51#define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
52#else
53#define PERCPU_FIRST_CHUNK_RESERVE 0
54#endif
55
Tejun Heo4518e6a2009-08-14 15:00:52 +090056#ifdef CONFIG_X86_32
Tejun Heo5f5d8402009-02-24 11:57:21 +090057/**
Tejun Heo89c92152009-02-24 11:57:21 +090058 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
59 *
60 * If NUMA is not configured or there is only one NUMA node available,
61 * there is no reason to consider NUMA. This function determines
62 * whether percpu allocation should consider NUMA or not.
63 *
64 * RETURNS:
65 * true if NUMA should be considered; otherwise, false.
66 */
67static bool __init pcpu_need_numa(void)
68{
Mike Rapoporta9ee6cf2021-06-28 19:43:01 -070069#ifdef CONFIG_NUMA
Tejun Heo89c92152009-02-24 11:57:21 +090070 pg_data_t *last = NULL;
71 unsigned int cpu;
72
73 for_each_possible_cpu(cpu) {
74 int node = early_cpu_to_node(cpu);
75
76 if (node_online(node) && NODE_DATA(node) &&
77 last && last != NODE_DATA(node))
78 return true;
79
80 last = NODE_DATA(node);
81 }
82#endif
83 return false;
84}
Tejun Heo4518e6a2009-08-14 15:00:52 +090085#endif
Tejun Heo89c92152009-02-24 11:57:21 +090086
87/**
Tejun Heo5f5d8402009-02-24 11:57:21 +090088 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89 * @cpu: cpu to allocate for
90 * @size: size allocation in bytes
91 * @align: alignment
92 *
93 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
94 * does the right thing for NUMA regardless of the current
95 * configuration.
96 *
97 * RETURNS:
98 * Pointer to the allocated area on success, NULL on failure.
99 */
100static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
101 unsigned long align)
102{
103 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
Mike Rapoporta9ee6cf2021-06-28 19:43:01 -0700104#ifdef CONFIG_NUMA
Tejun Heo5f5d8402009-02-24 11:57:21 +0900105 int node = early_cpu_to_node(cpu);
106 void *ptr;
107
108 if (!node_online(node) || !NODE_DATA(node)) {
Mike Rapoport26fb3da2019-03-11 23:30:42 -0700109 ptr = memblock_alloc_from(size, align, goal);
Tejun Heo5f5d8402009-02-24 11:57:21 +0900110 pr_info("cpu %d has no node %d or node-local memory\n",
111 cpu, node);
112 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113 cpu, size, __pa(ptr));
114 } else {
Mike Rapoport26fb3da2019-03-11 23:30:42 -0700115 ptr = memblock_alloc_try_nid(size, align, goal,
116 MEMBLOCK_ALLOC_ACCESSIBLE,
117 node);
Mike Rapoportbf2886e2018-10-30 15:08:18 -0700118
Joe Perches40685232009-12-09 10:45:34 -0800119 pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
120 cpu, size, node, __pa(ptr));
Tejun Heo5f5d8402009-02-24 11:57:21 +0900121 }
122 return ptr;
123#else
Mike Rapoport26fb3da2019-03-11 23:30:42 -0700124 return memblock_alloc_from(size, align, goal);
Tejun Heo5f5d8402009-02-24 11:57:21 +0900125#endif
126}
127
128/*
Tejun Heod4b95f82009-07-04 08:10:59 +0900129 * Helpers for first chunk memory allocation
130 */
Tejun Heo3cbc8562009-08-14 15:00:50 +0900131static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
Tejun Heod4b95f82009-07-04 08:10:59 +0900132{
Tejun Heo3cbc8562009-08-14 15:00:50 +0900133 return pcpu_alloc_bootmem(cpu, size, align);
Tejun Heod4b95f82009-07-04 08:10:59 +0900134}
135
136static void __init pcpu_fc_free(void *ptr, size_t size)
137{
Mike Rapoport4421cca2021-11-05 13:43:22 -0700138 memblock_free(ptr, size);
Tejun Heod4b95f82009-07-04 08:10:59 +0900139}
140
Tejun Heo4518e6a2009-08-14 15:00:52 +0900141static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
142{
Mike Rapoporta9ee6cf2021-06-28 19:43:01 -0700143#ifdef CONFIG_NUMA
Tejun Heoa530b792009-07-04 08:11:00 +0900144 if (early_cpu_to_node(from) == early_cpu_to_node(to))
145 return LOCAL_DISTANCE;
146 else
147 return REMOTE_DISTANCE;
Tejun Heo8ac83752009-02-24 11:57:22 +0900148#else
Tejun Heo4518e6a2009-08-14 15:00:52 +0900149 return LOCAL_DISTANCE;
Tejun Heo8ac83752009-02-24 11:57:22 +0900150#endif
Tejun Heo89c92152009-02-24 11:57:21 +0900151}
152
Tejun Heo00ae4062009-08-14 15:00:49 +0900153static void __init pcpup_populate_pte(unsigned long addr)
Tejun Heo458a3e62009-02-24 11:57:21 +0900154{
155 populate_extra_pte(addr);
156}
157
Brian Gerstb2d2f432009-01-27 12:56:48 +0900158static inline void setup_percpu_segment(int cpu)
159{
160#ifdef CONFIG_X86_32
Thomas Gleixner1dd439f2017-08-28 08:47:38 +0200161 struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu),
162 0xFFFFF);
Brian Gerstb2d2f432009-01-27 12:56:48 +0900163
Thomas Gleixner1dd439f2017-08-28 08:47:38 +0200164 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
Brian Gerstb2d2f432009-01-27 12:56:48 +0900165#endif
166}
167
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300168void __init setup_per_cpu_areas(void)
169{
Tejun Heo5f5d8402009-02-24 11:57:21 +0900170 unsigned int cpu;
Tejun Heo11124412009-02-20 16:29:09 +0900171 unsigned long delta;
Tejun Heofb435d52009-08-14 15:00:51 +0900172 int rc;
Mike Travisa1681962008-12-16 17:33:53 -0800173
Alexey Dobriyanb9726c22019-03-05 15:48:26 -0800174 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%u\n",
Mike Travisa1681962008-12-16 17:33:53 -0800175 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
176
Tejun Heo8ac83752009-02-24 11:57:22 +0900177 /*
Tejun Heo4518e6a2009-08-14 15:00:52 +0900178 * Allocate percpu area. Embedding allocator is our favorite;
179 * however, on NUMA configurations, it can result in very
180 * sparse unit mapping and vmalloc area isn't spacious enough
181 * on 32bit. Use page in that case.
Tejun Heo8ac83752009-02-24 11:57:22 +0900182 */
Tejun Heo4518e6a2009-08-14 15:00:52 +0900183#ifdef CONFIG_X86_32
184 if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
185 pcpu_chosen_fc = PCPU_FC_PAGE;
186#endif
Tejun Heofb435d52009-08-14 15:00:51 +0900187 rc = -EINVAL;
Tejun Heo4518e6a2009-08-14 15:00:52 +0900188 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
Tejun Heo4518e6a2009-08-14 15:00:52 +0900189 const size_t dyn_size = PERCPU_MODULE_RESERVE +
190 PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
Tejun Heod5e28002012-04-27 10:54:35 -0700191 size_t atom_size;
Tejun Heof58dc012009-08-14 15:00:50 +0900192
Tejun Heod5e28002012-04-27 10:54:35 -0700193 /*
194 * On 64bit, use PMD_SIZE for atom_size so that embedded
195 * percpu areas are aligned to PMD. This, in the future,
196 * can also allow using PMD mappings in vmalloc area. Use
197 * PAGE_SIZE on 32bit as vmalloc space is highly contended
198 * and large vmalloc area allocs can easily fail.
199 */
200#ifdef CONFIG_X86_64
201 atom_size = PMD_SIZE;
202#else
203 atom_size = PAGE_SIZE;
204#endif
Tejun Heo4518e6a2009-08-14 15:00:52 +0900205 rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
206 dyn_size, atom_size,
207 pcpu_cpu_distance,
208 pcpu_fc_alloc, pcpu_fc_free);
Tejun Heofb435d52009-08-14 15:00:51 +0900209 if (rc < 0)
Kefeng Wang8d3bcc42019-10-18 11:18:24 +0800210 pr_warn("%s allocator failed (%d), falling back to page size\n",
211 pcpu_fc_names[pcpu_chosen_fc], rc);
Tejun Heofa8a7092009-06-22 11:56:24 +0900212 }
Tejun Heofb435d52009-08-14 15:00:51 +0900213 if (rc < 0)
Tejun Heo4518e6a2009-08-14 15:00:52 +0900214 rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
215 pcpu_fc_alloc, pcpu_fc_free,
216 pcpup_populate_pte);
Tejun Heofb435d52009-08-14 15:00:51 +0900217 if (rc < 0)
218 panic("cannot initialize percpu area (err=%d)", rc);
Tejun Heo1a51e3a2009-01-13 20:41:35 +0900219
Tejun Heo5f5d8402009-02-24 11:57:21 +0900220 /* alrighty, percpu areas up and running */
Tejun Heo11124412009-02-20 16:29:09 +0900221 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
222 for_each_possible_cpu(cpu) {
Tejun Heofb435d52009-08-14 15:00:51 +0900223 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
Brian Gerst26f80bd2009-01-19 00:38:58 +0900224 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
Brian Gerstea927902009-01-19 00:38:58 +0900225 per_cpu(cpu_number, cpu) = cpu;
Brian Gerstb2d2f432009-01-27 12:56:48 +0900226 setup_percpu_segment(cpu);
Brian Gerst0d77e7f2009-01-27 12:56:47 +0900227 /*
Tejun Heocf3997f2009-01-27 14:25:05 +0900228 * Copy data used in early init routines from the
229 * initial arrays to the per cpu data areas. These
230 * arrays then become expendable and the *_early_ptr's
231 * are zeroed indicating that the static arrays are
232 * gone.
Brian Gerst0d77e7f2009-01-27 12:56:47 +0900233 */
Brian Gerstec70de82009-01-27 12:56:47 +0900234#ifdef CONFIG_X86_LOCAL_APIC
Brian Gerst0d77e7f2009-01-27 12:56:47 +0900235 per_cpu(x86_cpu_to_apicid, cpu) =
Tejun Heocf3997f2009-01-27 14:25:05 +0900236 early_per_cpu_map(x86_cpu_to_apicid, cpu);
Brian Gerst0d77e7f2009-01-27 12:56:47 +0900237 per_cpu(x86_bios_cpu_apicid, cpu) =
Tejun Heocf3997f2009-01-27 14:25:05 +0900238 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
Vitaly Kuznetsov3e9e57f2016-06-30 17:56:36 +0200239 per_cpu(x86_cpu_to_acpiid, cpu) =
240 early_per_cpu_map(x86_cpu_to_acpiid, cpu);
Brian Gerstec70de82009-01-27 12:56:47 +0900241#endif
Tejun Heo4c321ff2011-01-23 14:37:30 +0100242#ifdef CONFIG_X86_32
243 per_cpu(x86_cpu_to_logical_apicid, cpu) =
244 early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
245#endif
Brian Gerst6470aff2009-01-27 12:56:47 +0900246#ifdef CONFIG_NUMA
247 per_cpu(x86_cpu_to_node_map, cpu) =
Tejun Heocf3997f2009-01-27 14:25:05 +0900248 early_per_cpu_map(x86_cpu_to_node_map, cpu);
Yinghai Lu9aebbdb2010-07-20 13:24:30 -0700249 /*
Linus Torvaldsa4ce96a2010-07-21 09:25:42 -0700250 * Ensure that the boot cpu numa_node is correct when the boot
Yinghai Lu9aebbdb2010-07-20 13:24:30 -0700251 * cpu is on a node that doesn't have memory installed.
252 * Also cpu_up() will call cpu_to_node() for APs when
253 * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set
254 * up later with c_init aka intel_init/amd_init.
255 * So set them all (boot cpu and all APs).
256 */
257 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
Brian Gerst6470aff2009-01-27 12:56:47 +0900258#endif
Tejun Heo1a51e3a2009-01-13 20:41:35 +0900259 /*
Denys Vlasenkoc273fb32010-02-20 01:03:53 +0100260 * Up to this point, the boot CPU has been using .init.data
Brian Gerst2697fbd2009-01-27 12:56:48 +0900261 * area. Reload any changed state for the boot CPU.
Tejun Heo1a51e3a2009-01-13 20:41:35 +0900262 */
Robert Richterf6e9456c2010-07-21 19:03:58 +0200263 if (!cpu)
Brian Gerst552be872009-01-30 17:47:53 +0900264 switch_to_new_gdt(cpu);
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300265 }
266
Brian Gerst0d77e7f2009-01-27 12:56:47 +0900267 /* indicate the early static arrays will soon be gone */
James Bottomley22f25132009-01-27 14:21:37 +0900268#ifdef CONFIG_X86_LOCAL_APIC
Brian Gerst0d77e7f2009-01-27 12:56:47 +0900269 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
270 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
Vitaly Kuznetsov3e9e57f2016-06-30 17:56:36 +0200271 early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL;
James Bottomley22f25132009-01-27 14:21:37 +0900272#endif
Tejun Heo4c321ff2011-01-23 14:37:30 +0100273#ifdef CONFIG_X86_32
274 early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
275#endif
Tejun Heo645a7912011-01-23 14:37:40 +0100276#ifdef CONFIG_NUMA
Brian Gerst0d77e7f2009-01-27 12:56:47 +0900277 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
278#endif
Mike Travis9f0e8d02008-04-04 18:11:01 -0700279
Mike Travis9f248bd2008-05-12 21:21:12 +0200280 /* Setup node to cpumask map */
281 setup_node_to_cpumask_map();
Mike Travisc2d1cec2009-01-04 05:18:03 -0800282
283 /* Setup cpu initialized, callin, callout masks */
284 setup_cpu_local_masks();
Andy Lutomirski23b2a4d2017-03-22 14:32:32 -0700285
Andy Lutomirski23b2a4d2017-03-22 14:32:32 -0700286 /*
Andy Lutomirskid2b6dc62017-05-08 17:09:10 -0700287 * Sync back kernel address range again. We already did this in
288 * setup_arch(), but percpu data also needs to be available in
Joerg Roedel7f0a0022020-06-01 21:52:40 -0700289 * the smpboot asm and arch_sync_kernel_mappings() doesn't sync to
290 * swapper_pg_dir on 32-bit. The per-cpu mappings need to be available
291 * there too.
Thomas Gleixner945fd172018-02-28 21:14:26 +0100292 *
293 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
294 * this call?
Andy Lutomirski23b2a4d2017-03-22 14:32:32 -0700295 */
Thomas Gleixner945fd172018-02-28 21:14:26 +0100296 sync_initial_page_table();
Glauber de Oliveira Costa4fe29a82008-03-19 14:25:23 -0300297}