blob: f97eb2371672f92b01b87a220559afccdac442f7 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/arch/arm/kernel/setup.c
4 *
5 * Copyright (C) 1995-2001 Russell King
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
Ard Biesheuvelda58fb62015-09-24 13:49:52 -07007#include <linux/efi.h>
Paul Gortmakerecea4ab2011-07-22 10:58:34 -04008#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/kernel.h>
10#include <linux/stddef.h>
11#include <linux/ioport.h>
12#include <linux/delay.h>
13#include <linux/utsname.h>
14#include <linux/initrd.h>
15#include <linux/console.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/seq_file.h>
Jon Smirl894673e2006-07-10 04:44:13 -070017#include <linux/screen_info.h>
Arnd Bergmann883a1062013-01-31 17:51:18 +000018#include <linux/of_platform.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/init.h>
Mika Westerberg3c57fb42010-05-10 09:20:22 +010020#include <linux/kexec.h>
Ard Biesheuvel7a1be312020-10-11 10:21:37 +010021#include <linux/libfdt.h>
Grant Likely93c02ab2011-04-28 14:27:21 -060022#include <linux/of_fdt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/cpu.h>
24#include <linux/interrupt.h>
Russell King7bbb7942006-02-16 11:08:09 +000025#include <linux/smp.h>
Russell Kinge119bff2010-01-10 17:23:29 +000026#include <linux/proc_fs.h>
Russell King2778f622010-07-09 16:27:52 +010027#include <linux/memblock.h>
Dave Martin2ecccf92011-08-19 17:58:35 +010028#include <linux/bug.h>
29#include <linux/compiler.h>
Nicolas Pitre27a3f0e2011-08-25 19:10:29 -040030#include <linux/sort.h>
Mark Rutlandbe120392015-07-31 15:46:19 +010031#include <linux/psci.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Catalin Marinasb86040a2009-07-24 12:32:54 +010033#include <asm/unified.h>
Russell King15d07dc2012-03-28 18:30:01 +010034#include <asm/cp15.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/cpu.h>
Russell King0ba8b9b2008-08-10 18:08:10 +010036#include <asm/cputype.h>
Ard Biesheuvelda58fb62015-09-24 13:49:52 -070037#include <asm/efi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <asm/elf.h>
Ard Biesheuvel29373672015-09-01 08:59:28 +020039#include <asm/early_ioremap.h>
Stefan Agnera5f4c562015-08-13 00:01:52 +010040#include <asm/fixmap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <asm/procinfo.h>
Stefano Stabellini05774082013-05-21 14:24:11 +000042#include <asm/psci.h>
Russell King37efe642008-12-01 11:53:07 +000043#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <asm/setup.h>
Russell Kingf00ec482010-09-04 10:47:48 +010045#include <asm/smp_plat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/mach-types.h>
47#include <asm/cacheflush.h>
Russell King46097c72008-08-10 18:10:19 +010048#include <asm/cachetype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/tlbflush.h>
Stefano Stabellini5882bfe2015-05-06 14:13:31 +000050#include <asm/xen/hypervisor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Grant Likely93c02ab2011-04-28 14:27:21 -060052#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/mach/arch.h>
54#include <asm/mach/irq.h>
55#include <asm/mach/time.h>
David Howells9f97da72012-03-28 18:30:01 +010056#include <asm/system_info.h>
57#include <asm/system_misc.h>
Jason Wessel5cbad0e2008-02-20 13:33:40 -060058#include <asm/traps.h>
Catalin Marinasbff595c2009-02-16 11:41:36 +010059#include <asm/unwind.h>
Tejun Heo1c16d242011-12-08 10:22:06 -080060#include <asm/memblock.h>
Dave Martin4588c342012-02-17 16:54:28 +000061#include <asm/virt.h>
Linus Walleij5615f692020-10-25 23:55:16 +010062#include <asm/kasan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Richard Purdie4cd9d6f2008-01-02 00:56:46 +010064#include "atags.h"
Ben Dooks0fc1c832006-03-15 23:17:30 +000065
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
67#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
68char fpe_type[8];
69
70static int __init fpe_setup(char *line)
71{
72 memcpy(fpe_type, line, 8);
73 return 1;
74}
75
76__setup("fpe=", fpe_setup);
77#endif
78
Russell Kingca8f0b02014-05-27 20:34:28 +010079extern void init_default_cache_policy(unsigned long);
Russell Kingff69a4c2013-07-26 14:55:59 +010080extern void paging_init(const struct machine_desc *desc);
Jon Medhurstb089c312017-04-10 11:13:59 +010081extern void early_mm_init(const struct machine_desc *);
Laura Abbott374d446d2017-01-13 22:51:08 +010082extern void adjust_lowmem_bounds(void);
Robin Holt16d6d5b2013-07-08 16:01:39 -070083extern enum reboot_mode reboot_mode;
Russell Kingff69a4c2013-07-26 14:55:59 +010084extern void setup_dma_zone(const struct machine_desc *desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86unsigned int processor_id;
Krzysztof Halasac18f6582007-12-18 03:53:27 +010087EXPORT_SYMBOL(processor_id);
Russell King0385ebc2010-12-04 17:45:55 +000088unsigned int __machine_arch_type __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089EXPORT_SYMBOL(__machine_arch_type);
Russell King0385ebc2010-12-04 17:45:55 +000090unsigned int cacheid __read_mostly;
Russell Kingc0e95872008-09-25 15:35:28 +010091EXPORT_SYMBOL(cacheid);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Bill Gatliff9d20fdd2007-05-31 22:02:22 +010093unsigned int __atags_pointer __initdata;
94
Linus Torvalds1da177e2005-04-16 15:20:36 -070095unsigned int system_rev;
96EXPORT_SYMBOL(system_rev);
97
Paul Kocialkowski3f599872015-05-06 15:23:56 +010098const char *system_serial;
99EXPORT_SYMBOL(system_serial);
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101unsigned int system_serial_low;
102EXPORT_SYMBOL(system_serial_low);
103
104unsigned int system_serial_high;
105EXPORT_SYMBOL(system_serial_high);
106
Russell King0385ebc2010-12-04 17:45:55 +0000107unsigned int elf_hwcap __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108EXPORT_SYMBOL(elf_hwcap);
109
Ard Biesheuvelb342ea42014-02-19 22:28:40 +0100110unsigned int elf_hwcap2 __read_mostly;
111EXPORT_SYMBOL(elf_hwcap2);
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
114#ifdef MULTI_CPU
Kees Cook76197512016-08-10 22:46:49 +0100115struct processor processor __ro_after_init;
Russell King383fb3e2018-07-19 12:21:31 +0100116#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
117struct processor *cpu_vtable[NR_CPUS] = {
118 [0] = &processor,
119};
120#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121#endif
122#ifdef MULTI_TLB
Kees Cook76197512016-08-10 22:46:49 +0100123struct cpu_tlb_fns cpu_tlb __ro_after_init;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124#endif
125#ifdef MULTI_USER
Kees Cook76197512016-08-10 22:46:49 +0100126struct cpu_user_fns cpu_user __ro_after_init;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#endif
128#ifdef MULTI_CACHE
Kees Cook76197512016-08-10 22:46:49 +0100129struct cpu_cache_fns cpu_cache __ro_after_init;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#endif
Catalin Marinas953233d2007-02-05 14:48:08 +0100131#ifdef CONFIG_OUTER_CACHE
Kees Cook76197512016-08-10 22:46:49 +0100132struct outer_cache_fns outer_cache __ro_after_init;
Santosh Shilimkar6c09f092010-02-16 07:57:43 +0100133EXPORT_SYMBOL(outer_cache);
Catalin Marinas953233d2007-02-05 14:48:08 +0100134#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Dave Martin2ecccf92011-08-19 17:58:35 +0100136/*
137 * Cached cpu_architecture() result for use by assembler code.
138 * C code should use the cpu_architecture() function instead of accessing this
139 * variable directly.
140 */
141int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
142
Russell Kingccea7a12005-05-31 22:22:32 +0100143struct stack {
144 u32 irq[3];
145 u32 abt[3];
146 u32 und[3];
Daniel Thompsonc0e7f7e2014-09-17 17:12:06 +0100147 u32 fiq[3];
Russell Kingccea7a12005-05-31 22:22:32 +0100148} ____cacheline_aligned;
149
Catalin Marinas55bdd692010-05-21 18:06:41 +0100150#ifndef CONFIG_CPU_V7M
Russell Kingccea7a12005-05-31 22:22:32 +0100151static struct stack stacks[NR_CPUS];
Catalin Marinas55bdd692010-05-21 18:06:41 +0100152#endif
Russell Kingccea7a12005-05-31 22:22:32 +0100153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154char elf_platform[ELF_PLATFORM_SIZE];
155EXPORT_SYMBOL(elf_platform);
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157static const char *cpu_name;
158static const char *machine_name;
Jeremy Kerr48ab7e02010-01-27 01:13:31 +0100159static char __initdata cmd_line[COMMAND_LINE_SIZE];
Russell Kingff69a4c2013-07-26 14:55:59 +0100160const struct machine_desc *machine_desc __initdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
163#define ENDIANNESS ((char)endian_test.l)
164
165DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
166
167/*
168 * Standard memory resources
169 */
170static struct resource mem_res[] = {
Greg Kroah-Hartman740e5182006-06-12 14:47:06 -0700171 {
172 .name = "Video RAM",
173 .start = 0,
174 .end = 0,
175 .flags = IORESOURCE_MEM
176 },
177 {
Kees Cooka36d8e52012-01-18 01:57:21 +0100178 .name = "Kernel code",
Greg Kroah-Hartman740e5182006-06-12 14:47:06 -0700179 .start = 0,
180 .end = 0,
Toshi Kani35d98e92016-01-26 21:57:22 +0100181 .flags = IORESOURCE_SYSTEM_RAM
Greg Kroah-Hartman740e5182006-06-12 14:47:06 -0700182 },
183 {
184 .name = "Kernel data",
185 .start = 0,
186 .end = 0,
Toshi Kani35d98e92016-01-26 21:57:22 +0100187 .flags = IORESOURCE_SYSTEM_RAM
Greg Kroah-Hartman740e5182006-06-12 14:47:06 -0700188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189};
190
191#define video_ram mem_res[0]
192#define kernel_code mem_res[1]
193#define kernel_data mem_res[2]
194
195static struct resource io_res[] = {
Greg Kroah-Hartman740e5182006-06-12 14:47:06 -0700196 {
197 .name = "reserved",
198 .start = 0x3bc,
199 .end = 0x3be,
200 .flags = IORESOURCE_IO | IORESOURCE_BUSY
201 },
202 {
203 .name = "reserved",
204 .start = 0x378,
205 .end = 0x37f,
206 .flags = IORESOURCE_IO | IORESOURCE_BUSY
207 },
208 {
209 .name = "reserved",
210 .start = 0x278,
211 .end = 0x27f,
212 .flags = IORESOURCE_IO | IORESOURCE_BUSY
213 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214};
215
216#define lp0 io_res[0]
217#define lp1 io_res[1]
218#define lp2 io_res[2]
219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220static const char *proc_arch[] = {
221 "undefined/unknown",
222 "3",
223 "4",
224 "4T",
225 "5",
226 "5T",
227 "5TE",
228 "5TEJ",
229 "6TEJ",
Catalin Marinas6b090a22006-01-12 16:28:16 +0000230 "7",
Catalin Marinas55bdd692010-05-21 18:06:41 +0100231 "7M",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 "?(12)",
233 "?(13)",
234 "?(14)",
235 "?(15)",
236 "?(16)",
237 "?(17)",
238};
239
Catalin Marinas55bdd692010-05-21 18:06:41 +0100240#ifdef CONFIG_CPU_V7M
241static int __get_cpu_architecture(void)
242{
243 return CPU_ARCH_ARMv7M;
244}
245#else
Dave Martin2ecccf92011-08-19 17:58:35 +0100246static int __get_cpu_architecture(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247{
248 int cpu_arch;
249
Russell King0ba8b9b2008-08-10 18:08:10 +0100250 if ((read_cpuid_id() & 0x0008f000) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 cpu_arch = CPU_ARCH_UNKNOWN;
Russell King0ba8b9b2008-08-10 18:08:10 +0100252 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
253 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
254 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
255 cpu_arch = (read_cpuid_id() >> 16) & 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 if (cpu_arch)
257 cpu_arch += CPU_ARCH_ARMv3;
Russell King0ba8b9b2008-08-10 18:08:10 +0100258 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
Catalin Marinas180005c2007-09-25 16:49:45 +0100259 /* Revised CPUID format. Read the Memory Model Feature
260 * Register 0 and check for VMSAv7 or PMSAv7 */
Mason526299c2015-03-17 21:37:25 +0100261 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
Catalin Marinas315cfe72011-02-15 18:06:57 +0100262 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
263 (mmfr0 & 0x000000f0) >= 0x00000030)
Catalin Marinas180005c2007-09-25 16:49:45 +0100264 cpu_arch = CPU_ARCH_ARMv7;
265 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
266 (mmfr0 & 0x000000f0) == 0x00000020)
267 cpu_arch = CPU_ARCH_ARMv6;
268 else
269 cpu_arch = CPU_ARCH_UNKNOWN;
270 } else
271 cpu_arch = CPU_ARCH_UNKNOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
273 return cpu_arch;
274}
Catalin Marinas55bdd692010-05-21 18:06:41 +0100275#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Dave Martin2ecccf92011-08-19 17:58:35 +0100277int __pure cpu_architecture(void)
278{
279 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
280
281 return __cpu_architecture;
282}
283
Will Deacon8925ec42010-09-13 16:18:30 +0100284static int cpu_has_aliasing_icache(unsigned int arch)
285{
286 int aliasing_icache;
287 unsigned int id_reg, num_sets, line_size;
288
Will Deacon7f94e9c2011-08-23 22:22:11 +0100289 /* PIPT caches never alias. */
290 if (icache_is_pipt())
291 return 0;
292
Will Deacon8925ec42010-09-13 16:18:30 +0100293 /* arch specifies the register format */
294 switch (arch) {
295 case CPU_ARCH_ARMv7:
Jonathan Austin26150aa2016-08-30 17:24:34 +0100296 set_csselr(CSSELR_ICACHE | CSSELR_L1);
Linus Walleij5fb31a92010-10-06 11:07:28 +0100297 isb();
Jonathan Austin26150aa2016-08-30 17:24:34 +0100298 id_reg = read_ccsidr();
Will Deacon8925ec42010-09-13 16:18:30 +0100299 line_size = 4 << ((id_reg & 0x7) + 2);
300 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
301 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
302 break;
303 case CPU_ARCH_ARMv6:
304 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
305 break;
306 default:
307 /* I-cache aliases will be handled by D-cache aliasing code */
308 aliasing_icache = 0;
309 }
310
311 return aliasing_icache;
312}
313
Russell Kingc0e95872008-09-25 15:35:28 +0100314static void __init cacheid_init(void)
315{
Russell Kingc0e95872008-09-25 15:35:28 +0100316 unsigned int arch = cpu_architecture();
317
Jonathan Austinf5a5c892016-08-30 17:27:19 +0100318 if (arch >= CPU_ARCH_ARMv6) {
Uwe Kleine-Königac52e832013-01-30 17:38:21 +0100319 unsigned int cachetype = read_cpuid_cachetype();
Jonathan Austinf5a5c892016-08-30 17:27:19 +0100320
Vladimir Murzind360a682017-06-12 13:35:52 +0100321 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
Jonathan Austinf5a5c892016-08-30 17:27:19 +0100322 cacheid = 0;
323 } else if ((cachetype & (7 << 29)) == 4 << 29) {
Catalin Marinasb57ee992009-03-03 11:44:12 +0100324 /* ARMv7 register format */
Will Deacon72dc53a2011-08-03 12:37:04 +0100325 arch = CPU_ARCH_ARMv7;
Catalin Marinasb57ee992009-03-03 11:44:12 +0100326 cacheid = CACHEID_VIPT_NONALIASING;
Will Deacon7f94e9c2011-08-23 22:22:11 +0100327 switch (cachetype & (3 << 14)) {
328 case (1 << 14):
Catalin Marinasb57ee992009-03-03 11:44:12 +0100329 cacheid |= CACHEID_ASID_TAGGED;
Will Deacon7f94e9c2011-08-23 22:22:11 +0100330 break;
331 case (3 << 14):
332 cacheid |= CACHEID_PIPT;
333 break;
334 }
Will Deacon8925ec42010-09-13 16:18:30 +0100335 } else {
Will Deacon72dc53a2011-08-03 12:37:04 +0100336 arch = CPU_ARCH_ARMv6;
337 if (cachetype & (1 << 23))
338 cacheid = CACHEID_VIPT_ALIASING;
339 else
340 cacheid = CACHEID_VIPT_NONALIASING;
Will Deacon8925ec42010-09-13 16:18:30 +0100341 }
Will Deacon72dc53a2011-08-03 12:37:04 +0100342 if (cpu_has_aliasing_icache(arch))
343 cacheid |= CACHEID_VIPT_I_ALIASING;
Russell Kingc0e95872008-09-25 15:35:28 +0100344 } else {
345 cacheid = CACHEID_VIVT;
346 }
Russell King2b4ae1f2008-09-25 15:39:20 +0100347
Olof Johansson1b0f6682013-12-05 18:29:35 +0100348 pr_info("CPU: %s data cache, %s instruction cache\n",
Russell King2b4ae1f2008-09-25 15:39:20 +0100349 cache_is_vivt() ? "VIVT" :
350 cache_is_vipt_aliasing() ? "VIPT aliasing" :
Will Deacon7f94e9c2011-08-23 22:22:11 +0100351 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
Russell King2b4ae1f2008-09-25 15:39:20 +0100352 cache_is_vivt() ? "VIVT" :
353 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
Will Deacon8925ec42010-09-13 16:18:30 +0100354 icache_is_vipt_aliasing() ? "VIPT aliasing" :
Will Deacon7f94e9c2011-08-23 22:22:11 +0100355 icache_is_pipt() ? "PIPT" :
Russell King2b4ae1f2008-09-25 15:39:20 +0100356 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
Russell Kingc0e95872008-09-25 15:35:28 +0100357}
358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359/*
360 * These functions re-use the assembly code in head.S, which
361 * already provide the required functionality.
362 */
Russell King0f44ba12006-02-24 21:04:56 +0000363extern struct proc_info_list *lookup_processor_type(unsigned int);
Russell King6fc31d52011-01-12 17:50:42 +0000364
Grant Likely93c02ab2011-04-28 14:27:21 -0600365void __init early_print(const char *str, ...)
Russell King6fc31d52011-01-12 17:50:42 +0000366{
367 extern void printascii(const char *);
368 char buf[256];
369 va_list ap;
370
371 va_start(ap, str);
372 vsnprintf(buf, sizeof(buf), str, ap);
373 va_end(ap);
374
375#ifdef CONFIG_DEBUG_LL
376 printascii(buf);
377#endif
378 printk("%s", buf);
379}
380
Nicolas Pitre42f25bd2015-12-12 02:49:21 +0100381#ifdef CONFIG_ARM_PATCH_IDIV
382
383static inline u32 __attribute_const__ sdiv_instruction(void)
384{
385 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
386 /* "sdiv r0, r0, r1" */
387 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
388 return __opcode_to_mem_thumb32(insn);
389 }
390
391 /* "sdiv r0, r0, r1" */
392 return __opcode_to_mem_arm(0xe710f110);
393}
394
395static inline u32 __attribute_const__ udiv_instruction(void)
396{
397 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
398 /* "udiv r0, r0, r1" */
399 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
400 return __opcode_to_mem_thumb32(insn);
401 }
402
403 /* "udiv r0, r0, r1" */
404 return __opcode_to_mem_arm(0xe730f110);
405}
406
407static inline u32 __attribute_const__ bx_lr_instruction(void)
408{
409 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
410 /* "bx lr; nop" */
411 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
412 return __opcode_to_mem_thumb32(insn);
413 }
414
415 /* "bx lr" */
416 return __opcode_to_mem_arm(0xe12fff1e);
417}
418
419static void __init patch_aeabi_idiv(void)
420{
421 extern void __aeabi_uidiv(void);
422 extern void __aeabi_idiv(void);
423 uintptr_t fn_addr;
424 unsigned int mask;
425
426 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
427 if (!(elf_hwcap & mask))
428 return;
429
430 pr_info("CPU: div instructions available: patching division code\n");
431
432 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
Nicolas Pitre208fae52016-03-14 02:55:45 +0100433 asm ("" : "+g" (fn_addr));
Nicolas Pitre42f25bd2015-12-12 02:49:21 +0100434 ((u32 *)fn_addr)[0] = udiv_instruction();
435 ((u32 *)fn_addr)[1] = bx_lr_instruction();
436 flush_icache_range(fn_addr, fn_addr + 8);
437
438 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
Nicolas Pitre208fae52016-03-14 02:55:45 +0100439 asm ("" : "+g" (fn_addr));
Nicolas Pitre42f25bd2015-12-12 02:49:21 +0100440 ((u32 *)fn_addr)[0] = sdiv_instruction();
441 ((u32 *)fn_addr)[1] = bx_lr_instruction();
442 flush_icache_range(fn_addr, fn_addr + 8);
443}
444
445#else
446static inline void patch_aeabi_idiv(void) { }
447#endif
448
Stephen Boyd8164f7a2013-03-18 19:44:15 +0100449static void __init cpuid_init_hwcaps(void)
450{
Ard Biesheuvelb8c95922015-03-19 19:03:25 +0100451 int block;
Ard Biesheuvela092aed2015-03-19 19:04:05 +0100452 u32 isar5;
Stephen Boyd8164f7a2013-03-18 19:44:15 +0100453
454 if (cpu_architecture() < CPU_ARCH_ARMv7)
455 return;
456
Ard Biesheuvelb8c95922015-03-19 19:03:25 +0100457 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
458 if (block >= 2)
Stephen Boyd8164f7a2013-03-18 19:44:15 +0100459 elf_hwcap |= HWCAP_IDIVA;
Ard Biesheuvelb8c95922015-03-19 19:03:25 +0100460 if (block >= 1)
Stephen Boyd8164f7a2013-03-18 19:44:15 +0100461 elf_hwcap |= HWCAP_IDIVT;
Will Deacona469abd2013-04-08 17:13:12 +0100462
463 /* LPAE implies atomic ldrd/strd instructions */
Ard Biesheuvelb8c95922015-03-19 19:03:25 +0100464 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
465 if (block >= 5)
Will Deacona469abd2013-04-08 17:13:12 +0100466 elf_hwcap |= HWCAP_LPAE;
Ard Biesheuvela092aed2015-03-19 19:04:05 +0100467
468 /* check for supported v8 Crypto instructions */
469 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
470
471 block = cpuid_feature_extract_field(isar5, 4);
472 if (block >= 2)
473 elf_hwcap2 |= HWCAP2_PMULL;
474 if (block >= 1)
475 elf_hwcap2 |= HWCAP2_AES;
476
477 block = cpuid_feature_extract_field(isar5, 8);
478 if (block >= 1)
479 elf_hwcap2 |= HWCAP2_SHA1;
480
481 block = cpuid_feature_extract_field(isar5, 12);
482 if (block >= 1)
483 elf_hwcap2 |= HWCAP2_SHA2;
484
485 block = cpuid_feature_extract_field(isar5, 16);
486 if (block >= 1)
487 elf_hwcap2 |= HWCAP2_CRC32;
Stephen Boyd8164f7a2013-03-18 19:44:15 +0100488}
489
Russell King58171bf2014-07-04 16:41:21 +0100490static void __init elf_hwcap_fixup(void)
Tony Lindgrenf159f4e2010-07-05 14:53:10 +0100491{
Russell King58171bf2014-07-04 16:41:21 +0100492 unsigned id = read_cpuid_id();
Tony Lindgrenf159f4e2010-07-05 14:53:10 +0100493
494 /*
495 * HWCAP_TLS is available only on 1136 r1p0 and later,
496 * see also kuser_get_tls_init.
497 */
Russell King58171bf2014-07-04 16:41:21 +0100498 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
499 ((id >> 20) & 3) == 0) {
Tony Lindgrenf159f4e2010-07-05 14:53:10 +0100500 elf_hwcap &= ~HWCAP_TLS;
Russell King58171bf2014-07-04 16:41:21 +0100501 return;
502 }
503
504 /* Verify if CPUID scheme is implemented */
505 if ((id & 0x000f0000) != 0x000f0000)
506 return;
507
508 /*
509 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
510 * avoid advertising SWP; it may not be atomic with
511 * multiprocessing cores.
512 */
Ard Biesheuvelb8c95922015-03-19 19:03:25 +0100513 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
514 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
Vladimir Murzin03f12172016-04-19 12:35:20 +0100515 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
Russell King58171bf2014-07-04 16:41:21 +0100516 elf_hwcap &= ~HWCAP_SWP;
Tony Lindgrenf159f4e2010-07-05 14:53:10 +0100517}
518
Russell Kingb69874e2011-06-21 18:57:31 +0100519/*
520 * cpu_init - initialise one CPU.
521 *
522 * cpu_init sets up the per-CPU stacks.
523 */
Jon Medhurst1783d452013-04-25 14:40:22 +0100524void notrace cpu_init(void)
Russell Kingb69874e2011-06-21 18:57:31 +0100525{
Catalin Marinas55bdd692010-05-21 18:06:41 +0100526#ifndef CONFIG_CPU_V7M
Russell Kingb69874e2011-06-21 18:57:31 +0100527 unsigned int cpu = smp_processor_id();
528 struct stack *stk = &stacks[cpu];
529
530 if (cpu >= NR_CPUS) {
Olof Johansson1b0f6682013-12-05 18:29:35 +0100531 pr_crit("CPU%u: bad primary CPU number\n", cpu);
Russell Kingb69874e2011-06-21 18:57:31 +0100532 BUG();
533 }
534
Rob Herring14318efb2012-11-29 20:39:54 +0100535 /*
536 * This only works on resume and secondary cores. For booting on the
537 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
538 */
539 set_my_cpu_offset(per_cpu_offset(cpu));
540
Russell Kingb69874e2011-06-21 18:57:31 +0100541 cpu_proc_init();
542
543 /*
544 * Define the placement constraint for the inline asm directive below.
545 * In Thumb-2, msr with an immediate value is not allowed.
546 */
547#ifdef CONFIG_THUMB2_KERNEL
Arnd Bergmanndad7b982021-05-14 11:26:37 +0100548#define PLC_l "l"
549#define PLC_r "r"
Russell Kingb69874e2011-06-21 18:57:31 +0100550#else
Arnd Bergmanndad7b982021-05-14 11:26:37 +0100551#define PLC_l "I"
552#define PLC_r "I"
Russell Kingb69874e2011-06-21 18:57:31 +0100553#endif
554
555 /*
556 * setup stacks for re-entrant exception handlers
557 */
558 __asm__ (
559 "msr cpsr_c, %1\n\t"
560 "add r14, %0, %2\n\t"
561 "mov sp, r14\n\t"
562 "msr cpsr_c, %3\n\t"
563 "add r14, %0, %4\n\t"
564 "mov sp, r14\n\t"
565 "msr cpsr_c, %5\n\t"
566 "add r14, %0, %6\n\t"
567 "mov sp, r14\n\t"
Daniel Thompsonc0e7f7e2014-09-17 17:12:06 +0100568 "msr cpsr_c, %7\n\t"
569 "add r14, %0, %8\n\t"
570 "mov sp, r14\n\t"
571 "msr cpsr_c, %9"
Russell Kingb69874e2011-06-21 18:57:31 +0100572 :
573 : "r" (stk),
Arnd Bergmanndad7b982021-05-14 11:26:37 +0100574 PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
Russell Kingb69874e2011-06-21 18:57:31 +0100575 "I" (offsetof(struct stack, irq[0])),
Arnd Bergmanndad7b982021-05-14 11:26:37 +0100576 PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
Russell Kingb69874e2011-06-21 18:57:31 +0100577 "I" (offsetof(struct stack, abt[0])),
Arnd Bergmanndad7b982021-05-14 11:26:37 +0100578 PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
Russell Kingb69874e2011-06-21 18:57:31 +0100579 "I" (offsetof(struct stack, und[0])),
Arnd Bergmanndad7b982021-05-14 11:26:37 +0100580 PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
Daniel Thompsonc0e7f7e2014-09-17 17:12:06 +0100581 "I" (offsetof(struct stack, fiq[0])),
Arnd Bergmanndad7b982021-05-14 11:26:37 +0100582 PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
Russell Kingb69874e2011-06-21 18:57:31 +0100583 : "r14");
Catalin Marinas55bdd692010-05-21 18:06:41 +0100584#endif
Russell Kingb69874e2011-06-21 18:57:31 +0100585}
586
Lorenzo Pieralisi18d7f152013-06-19 10:40:48 +0100587u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
Will Deaconeb504392012-01-20 12:01:12 +0100588
589void __init smp_setup_processor_id(void)
590{
591 int i;
Lorenzo Pieralisicb8cf4f2012-11-08 18:05:56 +0000592 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
593 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
Will Deaconeb504392012-01-20 12:01:12 +0100594
595 cpu_logical_map(0) = cpu;
Lorenzo Pieralisicb8cf4f2012-11-08 18:05:56 +0000596 for (i = 1; i < nr_cpu_ids; ++i)
Will Deaconeb504392012-01-20 12:01:12 +0100597 cpu_logical_map(i) = i == cpu ? 0 : i;
598
Ming Lei9394c1c2013-03-11 13:52:12 +0100599 /*
600 * clear __my_cpu_offset on boot CPU to avoid hang caused by
601 * using percpu variable early, for example, lockdep will
602 * access percpu variable inside lock_release
603 */
604 set_my_cpu_offset(0);
605
Olof Johansson1b0f6682013-12-05 18:29:35 +0100606 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
Will Deaconeb504392012-01-20 12:01:12 +0100607}
608
Lorenzo Pieralisi8cf72172013-05-16 10:32:09 +0100609struct mpidr_hash mpidr_hash;
610#ifdef CONFIG_SMP
611/**
612 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
613 * level in order to build a linear index from an
614 * MPIDR value. Resulting algorithm is a collision
615 * free hash carried out through shifting and ORing
616 */
617static void __init smp_build_mpidr_hash(void)
618{
619 u32 i, affinity;
620 u32 fs[3], bits[3], ls, mask = 0;
621 /*
622 * Pre-scan the list of MPIDRS and filter out bits that do
623 * not contribute to affinity levels, ie they never toggle.
624 */
625 for_each_possible_cpu(i)
626 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
627 pr_debug("mask of set bits 0x%x\n", mask);
628 /*
629 * Find and stash the last and first bit set at all affinity levels to
630 * check how many bits are required to represent them.
631 */
632 for (i = 0; i < 3; i++) {
633 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
634 /*
635 * Find the MSB bit and LSB bits position
636 * to determine how many bits are required
637 * to express the affinity level.
638 */
639 ls = fls(affinity);
640 fs[i] = affinity ? ffs(affinity) - 1 : 0;
641 bits[i] = ls - fs[i];
642 }
643 /*
644 * An index can be created from the MPIDR by isolating the
645 * significant bits at each affinity level and by shifting
646 * them in order to compress the 24 bits values space to a
647 * compressed set of values. This is equivalent to hashing
648 * the MPIDR through shifting and ORing. It is a collision free
649 * hash though not minimal since some levels might contain a number
650 * of CPUs that is not an exact power of 2 and their bit
651 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
652 */
653 mpidr_hash.shift_aff[0] = fs[0];
654 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
655 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
656 (bits[1] + bits[0]);
657 mpidr_hash.mask = mask;
658 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
659 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
660 mpidr_hash.shift_aff[0],
661 mpidr_hash.shift_aff[1],
662 mpidr_hash.shift_aff[2],
663 mpidr_hash.mask,
664 mpidr_hash.bits);
665 /*
666 * 4x is an arbitrary value used to warn on a hash table much bigger
667 * than expected on most systems.
668 */
669 if (mpidr_hash_size() > 4 * num_possible_cpus())
670 pr_warn("Large number of MPIDR hash buckets detected\n");
671 sync_cache_w(&mpidr_hash);
672}
673#endif
674
Russell King65987a82018-07-19 11:59:56 +0100675/*
676 * locate processor in the list of supported processor types. The linker
677 * builds this table for us from the entries in arch/arm/mm/proc-*.S
678 */
679struct proc_info_list *lookup_processor(u32 midr)
680{
681 struct proc_info_list *list = lookup_processor_type(midr);
682
683 if (!list) {
684 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
685 smp_processor_id(), midr);
686 while (1)
687 /* can't use cpu_relax() here as it may require MMU setup */;
688 }
689
690 return list;
691}
692
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693static void __init setup_processor(void)
694{
Russell King65987a82018-07-19 11:59:56 +0100695 unsigned int midr = read_cpuid_id();
696 struct proc_info_list *list = lookup_processor(midr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
698 cpu_name = list->cpu_name;
Dave Martin2ecccf92011-08-19 17:58:35 +0100699 __cpu_architecture = __get_cpu_architecture();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
Russell Kinge2099502018-07-19 12:17:38 +0100701 init_proc_vtable(list->proc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702#ifdef MULTI_TLB
703 cpu_tlb = *list->tlb;
704#endif
705#ifdef MULTI_USER
706 cpu_user = *list->user;
707#endif
708#ifdef MULTI_CACHE
709 cpu_cache = *list->cache;
710#endif
711
Olof Johansson1b0f6682013-12-05 18:29:35 +0100712 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
Russell King65987a82018-07-19 11:59:56 +0100713 list->cpu_name, midr, midr & 15,
Russell King4585eaf2014-04-13 18:47:34 +0100714 proc_arch[cpu_architecture()], get_cr());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715
Will Deacona34dbfb2011-11-11 11:35:58 +0100716 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
717 list->arch_name, ENDIANNESS);
718 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
719 list->elf_name, ENDIANNESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 elf_hwcap = list->elf_hwcap;
Stephen Boyd8164f7a2013-03-18 19:44:15 +0100721
722 cpuid_init_hwcaps();
Nicolas Pitre42f25bd2015-12-12 02:49:21 +0100723 patch_aeabi_idiv();
Stephen Boyd8164f7a2013-03-18 19:44:15 +0100724
Catalin Marinasadeff422006-04-10 21:32:35 +0100725#ifndef CONFIG_ARM_THUMB
Stephen Boydc40e3642013-03-18 19:44:14 +0100726 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
Catalin Marinasadeff422006-04-10 21:32:35 +0100727#endif
Russell Kingca8f0b02014-05-27 20:34:28 +0100728#ifdef CONFIG_MMU
729 init_default_cache_policy(list->__cpu_mm_mmu_flags);
730#endif
Rob Herring92871b92013-10-09 17:26:44 +0100731 erratum_a15_798181_init();
732
Russell King58171bf2014-07-04 16:41:21 +0100733 elf_hwcap_fixup();
Tony Lindgrenf159f4e2010-07-05 14:53:10 +0100734
Russell Kingc0e95872008-09-25 15:35:28 +0100735 cacheid_init();
Russell Kingb69874e2011-06-21 18:57:31 +0100736 cpu_init();
Russell Kingccea7a12005-05-31 22:22:32 +0100737}
738
Grant Likely93c02ab2011-04-28 14:27:21 -0600739void __init dump_machine_table(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740{
Russell Kingff69a4c2013-07-26 14:55:59 +0100741 const struct machine_desc *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
Grant Likely62913192011-04-28 14:27:21 -0600743 early_print("Available machine support:\n\nID (hex)\tNAME\n");
744 for_each_machine_desc(p)
Nicolas Pitredce72dd2011-02-21 07:00:32 +0100745 early_print("%08x\t%s\n", p->nr, p->name);
746
747 early_print("\nPlease check your kernel config and/or bootloader.\n");
748
749 while (true)
750 /* can't use cpu_relax() here as it may require MMU setup */;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751}
752
Magnus Damm6a5014a2013-10-22 17:53:16 +0100753int __init arm_add_memory(u64 start, u64 size)
Russell King3a669412005-06-22 21:43:10 +0100754{
Magnus Damm6d7d5da2013-10-22 17:59:54 +0100755 u64 aligned_start;
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400756
Russell King3a669412005-06-22 21:43:10 +0100757 /*
758 * Ensure that start/size are aligned to a page boundary.
Masahiro Yamada909ba292015-01-20 04:38:25 +0100759 * Size is rounded down, start is rounded up.
Russell King3a669412005-06-22 21:43:10 +0100760 */
Magnus Damm6d7d5da2013-10-22 17:59:54 +0100761 aligned_start = PAGE_ALIGN(start);
Masahiro Yamada909ba292015-01-20 04:38:25 +0100762 if (aligned_start > start + size)
763 size = 0;
764 else
765 size -= aligned_start - start;
Will Deacone5ab8582012-04-12 17:15:08 +0100766
Christoph Hellwigd4a451d2018-04-03 16:24:20 +0200767#ifndef CONFIG_PHYS_ADDR_T_64BIT
Magnus Damm6d7d5da2013-10-22 17:59:54 +0100768 if (aligned_start > ULONG_MAX) {
Olof Johansson1b0f6682013-12-05 18:29:35 +0100769 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
Geert Uytterhoeven730b5762020-11-10 16:58:41 +0100770 start);
Magnus Damm6d7d5da2013-10-22 17:59:54 +0100771 return -EINVAL;
772 }
773
774 if (aligned_start + size > ULONG_MAX) {
Olof Johansson1b0f6682013-12-05 18:29:35 +0100775 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
776 (long long)start);
Will Deacone5ab8582012-04-12 17:15:08 +0100777 /*
778 * To ensure bank->start + bank->size is representable in
779 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
780 * This means we lose a page after masking.
781 */
Magnus Damm6d7d5da2013-10-22 17:59:54 +0100782 size = ULONG_MAX - aligned_start;
Will Deacone5ab8582012-04-12 17:15:08 +0100783 }
784#endif
785
Russell King571b1432014-01-11 11:22:18 +0000786 if (aligned_start < PHYS_OFFSET) {
787 if (aligned_start + size <= PHYS_OFFSET) {
788 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
789 aligned_start, aligned_start + size);
790 return -EINVAL;
791 }
792
793 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
794 aligned_start, (u64)PHYS_OFFSET);
795
796 size -= PHYS_OFFSET - aligned_start;
797 aligned_start = PHYS_OFFSET;
798 }
799
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100800 start = aligned_start;
801 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400802
803 /*
804 * Check whether this memory region has non-zero size or
805 * invalid node number.
806 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100807 if (size == 0)
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400808 return -EINVAL;
809
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100810 memblock_add(start, size);
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400811 return 0;
Russell King3a669412005-06-22 21:43:10 +0100812}
813
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814/*
815 * Pick out the memory size. We look for mem=size@start,
816 * where start and size are "size[KkMm]"
817 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100818
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100819static int __init early_mem(char *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820{
821 static int usermem __initdata = 0;
Magnus Damm6a5014a2013-10-22 17:53:16 +0100822 u64 size;
823 u64 start;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100824 char *endp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
826 /*
827 * If the user specifies memory size, we
828 * blow away any automatically generated
829 * size.
830 */
831 if (usermem == 0) {
832 usermem = 1;
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100833 memblock_remove(memblock_start_of_DRAM(),
834 memblock_end_of_DRAM() - memblock_start_of_DRAM());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 }
836
837 start = PHYS_OFFSET;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100838 size = memparse(p, &endp);
839 if (*endp == '@')
840 start = memparse(endp + 1, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
Andrew Morton1c97b732006-04-20 21:41:18 +0100842 arm_add_memory(start, size);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100843
844 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100846early_param("mem", early_mem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
Russell Kingff69a4c2013-07-26 14:55:59 +0100848static void __init request_standard_resources(const struct machine_desc *mdesc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849{
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700850 phys_addr_t start, end, res_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 struct resource *res;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700852 u64 i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
Russell King37efe642008-12-01 11:53:07 +0000854 kernel_code.start = virt_to_phys(_text);
Kees Cook14c4a532016-06-23 21:28:47 +0100855 kernel_code.end = virt_to_phys(__init_begin - 1);
Russell King842eab42010-10-01 14:12:22 +0100856 kernel_data.start = virt_to_phys(_sdata);
Russell King37efe642008-12-01 11:53:07 +0000857 kernel_data.end = virt_to_phys(_end - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700859 for_each_mem_range(i, &start, &end) {
Russell King966fab02016-08-02 14:05:51 -0700860 unsigned long boot_alias_start;
861
862 /*
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700863 * In memblock, end points to the first byte after the
864 * range while in resourses, end points to the last byte in
865 * the range.
866 */
867 res_end = end - 1;
868
869 /*
Russell King966fab02016-08-02 14:05:51 -0700870 * Some systems have a special memory alias which is only
871 * used for booting. We need to advertise this region to
872 * kexec-tools so they know where bootable RAM is located.
873 */
874 boot_alias_start = phys_to_idmap(start);
875 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
Mike Rapoport7e1c4e22018-10-30 15:09:57 -0700876 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700877 if (!res)
878 panic("%s: Failed to allocate %zu bytes\n",
879 __func__, sizeof(*res));
Russell King966fab02016-08-02 14:05:51 -0700880 res->name = "System RAM (boot alias)";
881 res->start = boot_alias_start;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700882 res->end = phys_to_idmap(res_end);
Russell King966fab02016-08-02 14:05:51 -0700883 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
884 request_resource(&iomem_resource, res);
885 }
886
Mike Rapoport7e1c4e22018-10-30 15:09:57 -0700887 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700888 if (!res)
889 panic("%s: Failed to allocate %zu bytes\n", __func__,
890 sizeof(*res));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 res->name = "System RAM";
Russell King966fab02016-08-02 14:05:51 -0700892 res->start = start;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700893 res->end = res_end;
Toshi Kani35d98e92016-01-26 21:57:22 +0100894 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
896 request_resource(&iomem_resource, res);
897
898 if (kernel_code.start >= res->start &&
899 kernel_code.end <= res->end)
900 request_resource(res, &kernel_code);
901 if (kernel_data.start >= res->start &&
902 kernel_data.end <= res->end)
903 request_resource(res, &kernel_data);
904 }
905
906 if (mdesc->video_start) {
907 video_ram.start = mdesc->video_start;
908 video_ram.end = mdesc->video_end;
909 request_resource(&iomem_resource, &video_ram);
910 }
911
912 /*
913 * Some machines don't have the possibility of ever
914 * possessing lp0, lp1 or lp2
915 */
916 if (mdesc->reserve_lp0)
917 request_resource(&ioport_resource, &lp0);
918 if (mdesc->reserve_lp1)
919 request_resource(&ioport_resource, &lp1);
920 if (mdesc->reserve_lp2)
921 request_resource(&ioport_resource, &lp2);
922}
923
Ard Biesheuvel801820b2016-04-25 21:06:53 +0100924#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
925 defined(CONFIG_EFI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926struct screen_info screen_info = {
927 .orig_video_lines = 30,
928 .orig_video_cols = 80,
929 .orig_video_mode = 0,
930 .orig_video_ega_bx = 0,
931 .orig_video_isVGA = 1,
932 .orig_video_points = 8
933};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934#endif
935
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936static int __init customize_machine(void)
937{
Arnd Bergmann883a1062013-01-31 17:51:18 +0000938 /*
939 * customizes platform devices, or adds new ones
940 * On DT based machines, we fall back to populating the
941 * machine from the device tree, if no callback is provided,
942 * otherwise we would always need an init_machine callback.
943 */
Russell King8ff14432010-12-20 10:18:36 +0000944 if (machine_desc->init_machine)
945 machine_desc->init_machine();
Kefeng Wang850bea22016-06-01 14:52:56 +0800946
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 return 0;
948}
949arch_initcall(customize_machine);
950
Shawn Guo90de4132012-04-25 22:24:44 +0800951static int __init init_machine_late(void)
952{
Paul Kocialkowski3f599872015-05-06 15:23:56 +0100953 struct device_node *root;
954 int ret;
955
Shawn Guo90de4132012-04-25 22:24:44 +0800956 if (machine_desc->init_late)
957 machine_desc->init_late();
Paul Kocialkowski3f599872015-05-06 15:23:56 +0100958
959 root = of_find_node_by_path("/");
960 if (root) {
961 ret = of_property_read_string(root, "serial-number",
962 &system_serial);
963 if (ret)
964 system_serial = NULL;
965 }
966
967 if (!system_serial)
968 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
969 system_serial_high,
970 system_serial_low);
971
Shawn Guo90de4132012-04-25 22:24:44 +0800972 return 0;
973}
974late_initcall(init_machine_late);
975
Mika Westerberg3c57fb42010-05-10 09:20:22 +0100976#ifdef CONFIG_KEXEC
Russell King61603012016-03-14 19:34:37 +0000977/*
978 * The crash region must be aligned to 128MB to avoid
979 * zImage relocating below the reserved region.
980 */
981#define CRASH_ALIGN (128 << 20)
Russell King61603012016-03-14 19:34:37 +0000982
Mika Westerberg3c57fb42010-05-10 09:20:22 +0100983static inline unsigned long long get_total_mem(void)
984{
985 unsigned long total;
986
987 total = max_low_pfn - min_low_pfn;
988 return total << PAGE_SHIFT;
989}
990
991/**
992 * reserve_crashkernel() - reserves memory are for crash kernel
993 *
994 * This function reserves memory area given in "crashkernel=" kernel command
995 * line parameter. The memory reserved is used by a dump capture kernel when
996 * primary kernel is crashing.
997 */
998static void __init reserve_crashkernel(void)
999{
1000 unsigned long long crash_size, crash_base;
1001 unsigned long long total_mem;
1002 int ret;
1003
1004 total_mem = get_total_mem();
1005 ret = parse_crashkernel(boot_command_line, total_mem,
1006 &crash_size, &crash_base);
1007 if (ret)
1008 return;
1009
Russell King61603012016-03-14 19:34:37 +00001010 if (crash_base <= 0) {
Russell Kingd0506a22016-04-01 14:47:36 +01001011 unsigned long long crash_max = idmap_to_phys((u32)~0);
Russell King67556d72017-07-19 23:01:38 +01001012 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1013 if (crash_max > lowmem_max)
1014 crash_max = lowmem_max;
Russell King61603012016-03-14 19:34:37 +00001015 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
1016 crash_size, CRASH_ALIGN);
1017 if (!crash_base) {
1018 pr_err("crashkernel reservation failed - No suitable area found.\n");
1019 return;
1020 }
1021 } else {
1022 unsigned long long start;
1023
1024 start = memblock_find_in_range(crash_base,
1025 crash_base + crash_size,
1026 crash_size, SECTION_SIZE);
1027 if (start != crash_base) {
1028 pr_err("crashkernel reservation failed - memory is in use.\n");
1029 return;
1030 }
1031 }
1032
Santosh Shilimkar84f452b2013-06-30 00:28:46 -04001033 ret = memblock_reserve(crash_base, crash_size);
Mika Westerberg3c57fb42010-05-10 09:20:22 +01001034 if (ret < 0) {
Olof Johansson1b0f6682013-12-05 18:29:35 +01001035 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1036 (unsigned long)crash_base);
Mika Westerberg3c57fb42010-05-10 09:20:22 +01001037 return;
1038 }
1039
Olof Johansson1b0f6682013-12-05 18:29:35 +01001040 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1041 (unsigned long)(crash_size >> 20),
1042 (unsigned long)(crash_base >> 20),
1043 (unsigned long)(total_mem >> 20));
Mika Westerberg3c57fb42010-05-10 09:20:22 +01001044
Russell Kingf7f0b7d2016-08-02 14:05:48 -07001045 /* The crashk resource must always be located in normal mem */
Mika Westerberg3c57fb42010-05-10 09:20:22 +01001046 crashk_res.start = crash_base;
1047 crashk_res.end = crash_base + crash_size - 1;
1048 insert_resource(&iomem_resource, &crashk_res);
Russell Kingf7f0b7d2016-08-02 14:05:48 -07001049
1050 if (arm_has_idmap_alias()) {
1051 /*
1052 * If we have a special RAM alias for use at boot, we
1053 * need to advertise to kexec tools where the alias is.
1054 */
1055 static struct resource crashk_boot_res = {
1056 .name = "Crash kernel (boot alias)",
1057 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1058 };
1059
1060 crashk_boot_res.start = phys_to_idmap(crash_base);
1061 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1062 insert_resource(&iomem_resource, &crashk_boot_res);
1063 }
Mika Westerberg3c57fb42010-05-10 09:20:22 +01001064}
1065#else
1066static inline void reserve_crashkernel(void) {}
1067#endif /* CONFIG_KEXEC */
1068
Dave Martin4588c342012-02-17 16:54:28 +00001069void __init hyp_mode_check(void)
1070{
1071#ifdef CONFIG_ARM_VIRT_EXT
Mark Rutland8fbac212013-07-18 17:20:33 +01001072 sync_boot_mode();
1073
Dave Martin4588c342012-02-17 16:54:28 +00001074 if (is_hyp_mode_available()) {
1075 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1076 pr_info("CPU: Virtualization extensions available.\n");
1077 } else if (is_hyp_mode_mismatched()) {
1078 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1079 __boot_cpu_mode & MODE_MASK);
1080 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1081 } else
1082 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1083#endif
1084}
1085
Guenter Roeckce8f1cc2021-06-04 15:07:35 +01001086static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
1087
1088static int arm_restart(struct notifier_block *nb, unsigned long action,
1089 void *data)
1090{
1091 __arm_pm_restart(action, data);
1092 return NOTIFY_DONE;
1093}
1094
1095static struct notifier_block arm_restart_nb = {
1096 .notifier_call = arm_restart,
1097 .priority = 128,
1098};
1099
Grant Likely62913192011-04-28 14:27:21 -06001100void __init setup_arch(char **cmdline_p)
1101{
Ard Biesheuvele9a2f8b2020-10-11 10:20:16 +01001102 const struct machine_desc *mdesc = NULL;
Ard Biesheuvel7a1be312020-10-11 10:21:37 +01001103 void *atags_vaddr = NULL;
Ard Biesheuvele9a2f8b2020-10-11 10:20:16 +01001104
1105 if (__atags_pointer)
Ard Biesheuvelfc2933c2020-10-28 14:20:55 +01001106 atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
Grant Likely62913192011-04-28 14:27:21 -06001107
Grant Likely62913192011-04-28 14:27:21 -06001108 setup_processor();
Ard Biesheuvel7a1be312020-10-11 10:21:37 +01001109 if (atags_vaddr) {
Ard Biesheuvele9a2f8b2020-10-11 10:20:16 +01001110 mdesc = setup_machine_fdt(atags_vaddr);
Ard Biesheuvel7a1be312020-10-11 10:21:37 +01001111 if (mdesc)
1112 memblock_reserve(__atags_pointer,
1113 fdt_totalsize(atags_vaddr));
1114 }
Grant Likely93c02ab2011-04-28 14:27:21 -06001115 if (!mdesc)
Ard Biesheuvele9a2f8b2020-10-11 10:20:16 +01001116 mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
Russell King99cf8f92017-09-21 12:06:20 +01001117 if (!mdesc) {
1118 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1119 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1120 __atags_pointer);
1121 if (__atags_pointer)
Ard Biesheuvele9a2f8b2020-10-11 10:20:16 +01001122 early_print(" r2[]=%*ph\n", 16, atags_vaddr);
Russell King99cf8f92017-09-21 12:06:20 +01001123 dump_machine_table();
1124 }
1125
Grant Likely62913192011-04-28 14:27:21 -06001126 machine_desc = mdesc;
1127 machine_name = mdesc->name;
Russell King719c9d12014-10-28 12:40:26 +00001128 dump_stack_set_arch_desc("%s", mdesc->name);
Grant Likely62913192011-04-28 14:27:21 -06001129
Robin Holt16d6d5b2013-07-08 16:01:39 -07001130 if (mdesc->reboot_mode != REBOOT_HARD)
1131 reboot_mode = mdesc->reboot_mode;
Grant Likely62913192011-04-28 14:27:21 -06001132
Kefeng Wang34f86022021-07-07 18:08:29 -07001133 setup_initial_init_mm(_text, _etext, _edata, _end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Jeremy Kerr48ab7e02010-01-27 01:13:31 +01001135 /* populate cmd_line too for later use, preserving boot_command_line */
1136 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1137 *cmdline_p = cmd_line;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +01001138
Ard Biesheuvel29373672015-09-01 08:59:28 +02001139 early_fixmap_init();
1140 early_ioremap_init();
Stefan Agnera5f4c562015-08-13 00:01:52 +01001141
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +01001142 parse_early_param();
1143
Russell King1221ed12015-04-04 17:25:20 +01001144#ifdef CONFIG_MMU
Jon Medhurstb089c312017-04-10 11:13:59 +01001145 early_mm_init(mdesc);
Russell King1221ed12015-04-04 17:25:20 +01001146#endif
Santosh Shilimkar7c927322013-12-02 20:29:59 +01001147 setup_dma_zone(mdesc);
Shannon Zhao9b08aaa2016-04-07 20:03:28 +08001148 xen_early_init();
Ard Biesheuvelda58fb62015-09-24 13:49:52 -07001149 efi_init();
Laura Abbott98562652017-01-13 22:51:45 +01001150 /*
1151 * Make sure the calculation for lowmem/highmem is set appropriately
Geert Uytterhoevendf8eda02020-11-10 16:59:30 +01001152 * before reserving/allocating any memory
Laura Abbott98562652017-01-13 22:51:45 +01001153 */
Laura Abbott374d446d2017-01-13 22:51:08 +01001154 adjust_lowmem_bounds();
Laura Abbott1c2f87c2014-04-13 22:54:58 +01001155 arm_memblock_init(mdesc);
Laura Abbott98562652017-01-13 22:51:45 +01001156 /* Memory may have been removed so recalculate the bounds. */
1157 adjust_lowmem_bounds();
Russell King2778f622010-07-09 16:27:52 +01001158
Ard Biesheuvel29373672015-09-01 08:59:28 +02001159 early_ioremap_reset();
1160
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001161 paging_init(mdesc);
Linus Walleij5615f692020-10-25 23:55:16 +01001162 kasan_init();
Dima Zavin11b93692011-01-14 23:05:14 +01001163 request_standard_resources(mdesc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
Guenter Roeckce8f1cc2021-06-04 15:07:35 +01001165 if (mdesc->restart) {
1166 __arm_pm_restart = mdesc->restart;
1167 register_restart_handler(&arm_restart_nb);
1168 }
Russell Kinga5287212011-11-04 15:05:24 +00001169
Grant Likely93c02ab2011-04-28 14:27:21 -06001170 unflatten_device_tree();
1171
Lorenzo Pieralisi55871642011-12-14 16:01:24 +00001172 arm_dt_init_cpu_maps();
Mark Rutlandbe120392015-07-31 15:46:19 +01001173 psci_dt_init();
Russell King7bbb7942006-02-16 11:08:09 +00001174#ifdef CONFIG_SMP
Marc Zyngierabcee5f2011-09-08 09:06:10 +01001175 if (is_smp()) {
Jon Medhurstb382b942013-05-21 13:40:51 +00001176 if (!mdesc->smp_init || !mdesc->smp_init()) {
1177 if (psci_smp_available())
1178 smp_set_ops(&psci_smp_ops);
1179 else if (mdesc->smp)
1180 smp_set_ops(mdesc->smp);
1181 }
Russell Kingf00ec482010-09-04 10:47:48 +01001182 smp_init_cpus();
Lorenzo Pieralisi8cf72172013-05-16 10:32:09 +01001183 smp_build_mpidr_hash();
Marc Zyngierabcee5f2011-09-08 09:06:10 +01001184 }
Russell King7bbb7942006-02-16 11:08:09 +00001185#endif
Dave Martin4588c342012-02-17 16:54:28 +00001186
1187 if (!is_smp())
1188 hyp_mode_check();
1189
Mika Westerberg3c57fb42010-05-10 09:20:22 +01001190 reserve_crashkernel();
Russell King7bbb7942006-02-16 11:08:09 +00001191
Palmer Dabbelt4c301f92018-06-22 10:01:23 -07001192#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
eric miao52108642010-12-13 09:42:34 +01001193 handle_arch_irq = mdesc->handle_irq;
1194#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
1196#ifdef CONFIG_VT
1197#if defined(CONFIG_VGA_CONSOLE)
1198 conswitchp = &vga_con;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199#endif
1200#endif
Russell Kingdec12e62010-12-16 13:49:34 +00001201
1202 if (mdesc->init_early)
1203 mdesc->init_early();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204}
1205
1206
1207static int __init topology_init(void)
1208{
1209 int cpu;
1210
Russell King66fb8bd2007-03-13 09:54:21 +00001211 for_each_possible_cpu(cpu) {
1212 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
Stephen Boyd787047e2015-07-29 00:34:48 +01001213 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
Russell King66fb8bd2007-03-13 09:54:21 +00001214 register_cpu(&cpuinfo->cpu, cpu);
1215 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
1217 return 0;
1218}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219subsys_initcall(topology_init);
1220
Russell Kinge119bff2010-01-10 17:23:29 +00001221#ifdef CONFIG_HAVE_PROC_CPU
1222static int __init proc_cpu_init(void)
1223{
1224 struct proc_dir_entry *res;
1225
1226 res = proc_mkdir("cpu", NULL);
1227 if (!res)
1228 return -ENOMEM;
1229 return 0;
1230}
1231fs_initcall(proc_cpu_init);
1232#endif
1233
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234static const char *hwcap_str[] = {
1235 "swp",
1236 "half",
1237 "thumb",
1238 "26bit",
1239 "fastmult",
1240 "fpa",
1241 "vfp",
1242 "edsp",
1243 "java",
Paul Gortmaker8f7f9432006-10-27 05:13:19 +01001244 "iwmmxt",
Lennert Buytenhek99e4a6d2006-12-18 00:59:10 +01001245 "crunch",
Catalin Marinas4369ae12008-11-06 13:23:06 +00001246 "thumbee",
Catalin Marinas2bedbdf2008-11-06 13:23:07 +00001247 "neon",
Catalin Marinas7279dc32009-02-11 13:13:56 +01001248 "vfpv3",
1249 "vfpv3d16",
Will Deacon254cdf82011-06-03 14:15:22 +01001250 "tls",
1251 "vfpv4",
1252 "idiva",
1253 "idivt",
Tetsuyuki Kobayashiab8d46c02013-07-22 14:58:17 +01001254 "vfpd32",
Will Deacona469abd2013-04-08 17:13:12 +01001255 "lpae",
Sudeep KarkadaNageshae9faebc2013-08-13 14:30:32 +01001256 "evtstrm",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 NULL
1258};
1259
Ard Biesheuvelb342ea42014-02-19 22:28:40 +01001260static const char *hwcap2_str[] = {
Ard Biesheuvel8258a982014-02-19 22:29:40 +01001261 "aes",
1262 "pmull",
1263 "sha1",
1264 "sha2",
1265 "crc32",
Ard Biesheuvelb342ea42014-02-19 22:28:40 +01001266 NULL
1267};
1268
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269static int c_show(struct seq_file *m, void *v)
1270{
Lorenzo Pieralisib4b8f772012-09-10 18:55:21 +01001271 int i, j;
1272 u32 cpuid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 for_each_online_cpu(i) {
Russell King15559722005-11-06 21:41:08 +00001275 /*
1276 * glibc reads /proc/cpuinfo to determine the number of
1277 * online processors, looking for lines beginning with
1278 * "processor". Give glibc what it expects.
1279 */
1280 seq_printf(m, "processor\t: %d\n", i);
Lorenzo Pieralisib4b8f772012-09-10 18:55:21 +01001281 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1282 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1283 cpu_name, cpuid & 15, elf_platform);
1284
Pavel Machek4bf96362015-01-04 20:01:23 +01001285#if defined(CONFIG_SMP)
1286 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1287 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1288 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1289#else
1290 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1291 loops_per_jiffy / (500000/HZ),
1292 (loops_per_jiffy / (5000/HZ)) % 100);
1293#endif
Lorenzo Pieralisib4b8f772012-09-10 18:55:21 +01001294 /* dump out the processor features */
1295 seq_puts(m, "Features\t: ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
Lorenzo Pieralisib4b8f772012-09-10 18:55:21 +01001297 for (j = 0; hwcap_str[j]; j++)
1298 if (elf_hwcap & (1 << j))
1299 seq_printf(m, "%s ", hwcap_str[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300
Ard Biesheuvelb342ea42014-02-19 22:28:40 +01001301 for (j = 0; hwcap2_str[j]; j++)
1302 if (elf_hwcap2 & (1 << j))
1303 seq_printf(m, "%s ", hwcap2_str[j]);
1304
Lorenzo Pieralisib4b8f772012-09-10 18:55:21 +01001305 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1306 seq_printf(m, "CPU architecture: %s\n",
1307 proc_arch[cpu_architecture()]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308
Lorenzo Pieralisib4b8f772012-09-10 18:55:21 +01001309 if ((cpuid & 0x0008f000) == 0x00000000) {
1310 /* pre-ARM7 */
1311 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 } else {
Lorenzo Pieralisib4b8f772012-09-10 18:55:21 +01001313 if ((cpuid & 0x0008f000) == 0x00007000) {
1314 /* ARM7 */
1315 seq_printf(m, "CPU variant\t: 0x%02x\n",
1316 (cpuid >> 16) & 127);
1317 } else {
1318 /* post-ARM7 */
1319 seq_printf(m, "CPU variant\t: 0x%x\n",
1320 (cpuid >> 20) & 15);
1321 }
1322 seq_printf(m, "CPU part\t: 0x%03x\n",
1323 (cpuid >> 4) & 0xfff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 }
Lorenzo Pieralisib4b8f772012-09-10 18:55:21 +01001325 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
1328 seq_printf(m, "Hardware\t: %s\n", machine_name);
1329 seq_printf(m, "Revision\t: %04x\n", system_rev);
Paul Kocialkowski3f599872015-05-06 15:23:56 +01001330 seq_printf(m, "Serial\t\t: %s\n", system_serial);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331
1332 return 0;
1333}
1334
1335static void *c_start(struct seq_file *m, loff_t *pos)
1336{
1337 return *pos < 1 ? (void *)1 : NULL;
1338}
1339
1340static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1341{
1342 ++*pos;
1343 return NULL;
1344}
1345
1346static void c_stop(struct seq_file *m, void *v)
1347{
1348}
1349
Jan Engelhardt2ffd6e12008-01-22 20:41:07 +01001350const struct seq_operations cpuinfo_op = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 .start = c_start,
1352 .next = c_next,
1353 .stop = c_stop,
1354 .show = c_show
1355};