blob: 191fc96a41b28968078cb5510c05aa88447d64f6 [file] [log] [blame]
Greg Kroah-Hartmana17ae4c2017-11-24 15:00:32 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 1999, 2012
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Author(s): Hartmut Penner (hp@de.ibm.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "arch/i386/kernel/setup.c"
9 * Copyright (C) 1995, Linus Torvalds
10 */
11
12/*
13 * This file handles the architecture-dependent parts of initialization
14 */
15
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +010016#define KMSG_COMPONENT "setup"
17#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/errno.h>
Heiko Carstens08729222013-01-07 13:56:17 +010020#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/sched.h>
Ingo Molnar29930022017-02-08 18:51:36 +010022#include <linux/sched/task.h>
Ingo Molnar1777e462017-02-05 14:47:12 +010023#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
Tejun Heoff38df32011-12-08 10:22:09 -080025#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/mm.h>
27#include <linux/stddef.h>
28#include <linux/unistd.h>
29#include <linux/ptrace.h>
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +020030#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/user.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/tty.h>
33#include <linux/ioport.h>
34#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/init.h>
36#include <linux/initrd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/root_dev.h>
38#include <linux/console.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/kernel_stat.h>
Christoph Hellwig0b1abd12020-09-11 10:56:52 +020040#include <linux/dma-map-ops.h>
Heiko Carstens1e8e3382005-10-30 15:00:11 -080041#include <linux/device.h>
Peter Oberparleiter585c3042006-06-29 15:08:25 +020042#include <linux/notifier.h>
Heiko Carstens65912a82006-09-20 15:58:41 +020043#include <linux/pfn.h>
Hongjie Yangfe355b72007-02-05 21:18:24 +010044#include <linux/ctype.h>
Heiko Carstens2b67fc42007-02-05 21:16:47 +010045#include <linux/reboot.h>
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020046#include <linux/topology.h>
Michael Holzheu60a0c682011-10-30 15:16:40 +010047#include <linux/kexec.h>
48#include <linux/crash_dump.h>
49#include <linux/memory.h>
Heiko Carstens048cd4e2012-02-27 10:01:52 +010050#include <linux/compat.h>
Martin Schwidefskyce3dc442017-09-12 16:37:33 +020051#include <linux/start_kernel.h>
Gerald Schaefer343dbdb2020-12-08 19:47:15 +010052#include <linux/hugetlb.h>
Sven Schnelle436fc4f2021-08-27 08:36:06 +020053#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +010055#include <asm/boot_data.h>
Michael Holzheu46b05d22007-02-21 10:55:21 +010056#include <asm/ipl.h>
Heiko Carstens1e3cab22012-03-30 09:40:55 +020057#include <asm/facility.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <asm/smp.h>
59#include <asm/mmu_context.h>
60#include <asm/cpcmd.h>
61#include <asm/lowcore.h>
Martin Schwidefsky6c815112017-10-12 13:24:47 +020062#include <asm/nmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#include <asm/irq.h>
Peter Oberparleiter0b642ed2005-05-01 08:58:58 -070064#include <asm/page.h>
65#include <asm/ptrace.h>
Heiko Carstenscc13ad62006-06-25 05:49:30 -070066#include <asm/sections.h>
Hongjie Yangfe355b72007-02-05 21:18:24 +010067#include <asm/ebcdic.h>
Michael Holzheu60a0c682011-10-30 15:16:40 +010068#include <asm/diag.h>
Michael Holzheu4857d4b2012-03-11 11:59:34 -040069#include <asm/os_info.h>
Heinz Graalfscd183452012-06-11 16:06:59 +020070#include <asm/sclp.h>
Martin Schwidefsky78c98f92019-01-28 08:33:08 +010071#include <asm/stacktrace.h>
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +020072#include <asm/sysinfo.h>
Philipp Hachtmann3a368f72014-03-06 18:25:13 +010073#include <asm/numa.h>
Vasily Gorbik686140a2017-10-12 13:01:47 +020074#include <asm/alternative.h>
Martin Schwidefskyf19fbd52018-01-26 12:46:47 +010075#include <asm/nospec-branch.h>
Vasily Gorbik6966d602018-04-11 11:56:55 +020076#include <asm/mem_detect.h>
Vasily Gorbik5abb9352019-04-01 19:11:03 +020077#include <asm/uv.h>
Sven Schnelle0b38b5e2020-01-22 13:38:22 +010078#include <asm/asm-offsets.h>
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040079#include "entry.h"
Gerald Schaeferc1821c22007-02-05 21:18:17 +010080
Linus Torvalds1da177e2005-04-16 15:20:36 -070081/*
82 * Machine setup..
83 */
84unsigned int console_mode = 0;
Heiko Carstens1485c5c2009-03-26 15:24:04 +010085EXPORT_SYMBOL(console_mode);
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087unsigned int console_devno = -1;
Heiko Carstens1485c5c2009-03-26 15:24:04 +010088EXPORT_SYMBOL(console_devno);
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090unsigned int console_irq = -1;
Heiko Carstens1485c5c2009-03-26 15:24:04 +010091EXPORT_SYMBOL(console_irq);
92
Alexander Egorenkov6bda6672021-06-15 19:17:36 +020093/*
94 * Some code and data needs to stay below 2 GB, even when the kernel would be
95 * relocated above 2 GB, because it has to use 31 bit addresses.
Heiko Carstensc78d0c72021-08-04 13:40:31 +020096 * Such code and data is part of the .amode31 section.
Alexander Egorenkov6bda6672021-06-15 19:17:36 +020097 */
Alexander Gordeeve3ec8e02021-09-27 14:18:26 +020098unsigned long __amode31_ref __samode31 = (unsigned long)&_samode31;
99unsigned long __amode31_ref __eamode31 = (unsigned long)&_eamode31;
100unsigned long __amode31_ref __stext_amode31 = (unsigned long)&_stext_amode31;
101unsigned long __amode31_ref __etext_amode31 = (unsigned long)&_etext_amode31;
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200102struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
103struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200104
105/*
106 * Control registers CR2, CR5 and CR15 are initialized with addresses
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200107 * of tables that must be placed below 2G which is handled by the AMODE31
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200108 * sections.
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200109 * Because the AMODE31 sections are relocated below 2G at startup,
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200110 * the content of control registers CR2, CR5 and CR15 must be updated
111 * with new addresses after the relocation. The initial initialization of
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200112 * control registers occurs in head64.S and then gets updated again after AMODE31
113 * relocation. We must access the relevant AMODE31 tables indirectly via
114 * pointers placed in the .amode31.refs linker section. Those pointers get
115 * updated automatically during AMODE31 relocation and always contain a valid
116 * address within AMODE31 sections.
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200117 */
118
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200119static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64);
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200120
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200121static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = {
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200122 [1] = 0xffffffffffffffff
123};
124
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200125static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = {
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200126 0x80000000, 0, 0, 0,
127 0x80000000, 0, 0, 0,
128 0x80000000, 0, 0, 0,
129 0x80000000, 0, 0, 0,
130 0x80000000, 0, 0, 0,
131 0x80000000, 0, 0, 0,
132 0x80000000, 0, 0, 0,
133 0x80000000, 0, 0, 0
134};
135
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200136static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = {
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200137 0, 0, 0x89000000, 0,
138 0, 0, 0x8a000000, 0
139};
140
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200141static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31;
142static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
143static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
144static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200145
Vasily Gorbikd58106c2017-11-17 18:44:28 +0100146int __bootdata(noexec_disabled);
Vasily Gorbik73045a02020-10-19 11:01:33 +0200147unsigned long __bootdata(ident_map_size);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200148struct mem_detect_info __bootdata(mem_detect);
Alexander Egorenkov84733282021-06-15 14:15:07 +0200149struct initrd_data __bootdata(initrd_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
Gerald Schaeferb2d24b92019-02-03 21:37:20 +0100151unsigned long __bootdata_preserved(__kaslr_offset);
Alexander Gordeeve3ec8e02021-09-27 14:18:26 +0200152unsigned long __bootdata(__amode31_base);
Mikhail Zaslonkoc65e6812020-01-30 22:16:27 -0800153unsigned int __bootdata_preserved(zlib_dfltcc_support);
154EXPORT_SYMBOL(zlib_dfltcc_support);
Sven Schnelle17e89e12021-05-05 22:01:10 +0200155u64 __bootdata_preserved(stfle_fac_list[16]);
156EXPORT_SYMBOL(stfle_fac_list);
157u64 __bootdata_preserved(alt_stfle_fac_list[16]);
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200158struct oldmem_data __bootdata_preserved(oldmem_data);
Gerald Schaefera80313f2019-02-03 21:37:20 +0100159
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100160unsigned long VMALLOC_START;
161EXPORT_SYMBOL(VMALLOC_START);
162
163unsigned long VMALLOC_END;
164EXPORT_SYMBOL(VMALLOC_END);
165
166struct page *vmemmap;
167EXPORT_SYMBOL(vmemmap);
Vasily Gorbike670e642020-09-11 12:51:59 +0200168unsigned long vmemmap_size;
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100169
Heiko Carstensc972cc62012-10-05 16:52:18 +0200170unsigned long MODULES_VADDR;
171unsigned long MODULES_END;
Heiko Carstensc972cc62012-10-05 16:52:18 +0200172
Frank Munzert099b7652009-03-26 15:23:43 +0100173/* An array with a pointer to the lowcore of every CPU. */
Heiko Carstensc667aea2015-12-31 10:29:00 +0100174struct lowcore *lowcore_ptr[NR_CPUS];
Frank Munzert099b7652009-03-26 15:23:43 +0100175EXPORT_SYMBOL(lowcore_ptr);
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177/*
Niklas Schnelleb02002c2020-07-13 14:12:49 +0200178 * The Write Back bit position in the physaddr is given by the SLPC PCI.
179 * Leaving the mask zero always uses write through which is safe
180 */
181unsigned long mio_wb_bit_mask __ro_after_init;
182
183/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 * This is set up by the setup-routine at boot-time
185 * for S390 need to find out, what we have to setup
186 * using address 0x10400 ...
187 */
188
189#include <asm/setup.h>
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 * condev= and conmode= setup parameter.
193 */
194
195static int __init condev_setup(char *str)
196{
197 int vdev;
198
199 vdev = simple_strtoul(str, &str, 0);
200 if (vdev >= 0 && vdev < 65536) {
201 console_devno = vdev;
202 console_irq = -1;
203 }
204 return 1;
205}
206
207__setup("condev=", condev_setup);
208
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200209static void __init set_preferred_console(void)
210{
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200211 if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200212 add_preferred_console("ttyS", 0, NULL);
Hendrik Bruecknerc4de0c12009-09-11 10:28:56 +0200213 else if (CONSOLE_IS_3270)
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200214 add_preferred_console("tty3270", 0, NULL);
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200215 else if (CONSOLE_IS_VT220)
Valentin Vidicb7d91d22021-04-27 21:40:10 +0200216 add_preferred_console("ttysclp", 0, NULL);
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200217 else if (CONSOLE_IS_HVC)
218 add_preferred_console("hvc", 0, NULL);
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200219}
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221static int __init conmode_setup(char *str)
222{
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100223#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200224 if (!strcmp(str, "hwc") || !strcmp(str, "sclp"))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 SET_CONSOLE_SCLP;
226#endif
227#if defined(CONFIG_TN3215_CONSOLE)
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200228 if (!strcmp(str, "3215"))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 SET_CONSOLE_3215;
230#endif
231#if defined(CONFIG_TN3270_CONSOLE)
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200232 if (!strcmp(str, "3270"))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 SET_CONSOLE_3270;
234#endif
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200235 set_preferred_console();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 return 1;
237}
238
239__setup("conmode=", conmode_setup);
240
241static void __init conmode_default(void)
242{
243 char query_buffer[1024];
244 char *ptr;
245
246 if (MACHINE_IS_VM) {
Heiko Carstens740b5702006-12-04 15:40:30 +0100247 cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
249 ptr = strstr(query_buffer, "SUBCHANNEL =");
250 console_irq = simple_strtoul(ptr + 13, NULL, 16);
Heiko Carstens740b5702006-12-04 15:40:30 +0100251 cpcmd("QUERY TERM", query_buffer, 1024, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 ptr = strstr(query_buffer, "CONMODE");
253 /*
254 * Set the conmode to 3215 so that the device recognition
255 * will set the cu_type of the console to 3215. If the
256 * conmode is 3270 and we don't set it back then both
257 * 3215 and the 3270 driver will try to access the console
258 * device (3215 as console and 3270 as normal tty).
259 */
Heiko Carstens740b5702006-12-04 15:40:30 +0100260 cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 if (ptr == NULL) {
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100262#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 SET_CONSOLE_SCLP;
264#endif
265 return;
266 }
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200267 if (str_has_prefix(ptr + 8, "3270")) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268#if defined(CONFIG_TN3270_CONSOLE)
269 SET_CONSOLE_3270;
270#elif defined(CONFIG_TN3215_CONSOLE)
271 SET_CONSOLE_3215;
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100272#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 SET_CONSOLE_SCLP;
274#endif
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200275 } else if (str_has_prefix(ptr + 8, "3215")) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276#if defined(CONFIG_TN3215_CONSOLE)
277 SET_CONSOLE_3215;
278#elif defined(CONFIG_TN3270_CONSOLE)
279 SET_CONSOLE_3270;
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100280#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 SET_CONSOLE_SCLP;
282#endif
283 }
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200284 } else if (MACHINE_IS_KVM) {
Masahiro Yamadaa5ff1b32016-08-25 15:17:02 -0700285 if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200286 SET_CONSOLE_VT220;
Masahiro Yamadaa5ff1b32016-08-25 15:17:02 -0700287 else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200288 SET_CONSOLE_SCLP;
289 else
290 SET_CONSOLE_HVC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 } else {
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100292#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 SET_CONSOLE_SCLP;
294#endif
295 }
296}
297
Michael Holzheubf28a592014-04-14 10:38:05 +0200298#ifdef CONFIG_CRASH_DUMP
Sebastian Ottfe72ffb2013-04-30 17:18:46 +0200299static void __init setup_zfcpdump(void)
Michael Holzheu411ed322007-04-27 16:01:49 +0200300{
Alexander Egorenkovbd37b362020-09-29 20:24:55 +0200301 if (!is_ipl_type_dump())
Michael Holzheu411ed322007-04-27 16:01:49 +0200302 return;
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200303 if (oldmem_data.start)
Michael Holzheu3f25dc42011-11-14 11:19:05 +0100304 return;
Sebastian Ottfe72ffb2013-04-30 17:18:46 +0200305 strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
Michael Holzheu411ed322007-04-27 16:01:49 +0200306 console_loglevel = 2;
307}
308#else
Sebastian Ottfe72ffb2013-04-30 17:18:46 +0200309static inline void setup_zfcpdump(void) {}
Michael Holzheubf28a592014-04-14 10:38:05 +0200310#endif /* CONFIG_CRASH_DUMP */
Michael Holzheu411ed322007-04-27 16:01:49 +0200311
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 /*
313 * Reboot, halt and power_off stubs. They just call _machine_restart,
314 * _machine_halt or _machine_power_off.
315 */
316
317void machine_restart(char *command)
318{
Christian Borntraeger7aa8dac2007-11-20 11:13:31 +0100319 if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
Martin Schwidefsky06fa46a2006-06-29 14:57:32 +0200320 /*
321 * Only unblank the console if we are called in enabled
322 * context or a bust_spinlocks cleared the way for us.
323 */
324 console_unblank();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 _machine_restart(command);
326}
327
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328void machine_halt(void)
329{
Martin Schwidefsky06fa46a2006-06-29 14:57:32 +0200330 if (!in_interrupt() || oops_in_progress)
331 /*
332 * Only unblank the console if we are called in enabled
333 * context or a bust_spinlocks cleared the way for us.
334 */
335 console_unblank();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 _machine_halt();
337}
338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339void machine_power_off(void)
340{
Martin Schwidefsky06fa46a2006-06-29 14:57:32 +0200341 if (!in_interrupt() || oops_in_progress)
342 /*
343 * Only unblank the console if we are called in enabled
344 * context or a bust_spinlocks cleared the way for us.
345 */
346 console_unblank();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 _machine_power_off();
348}
349
Martin Schwidefsky53df7512006-01-14 13:21:01 -0800350/*
351 * Dummy power off function.
352 */
353void (*pm_power_off)(void) = machine_power_off;
Heiko Carstens08729222013-01-07 13:56:17 +0100354EXPORT_SYMBOL_GPL(pm_power_off);
Martin Schwidefsky53df7512006-01-14 13:21:01 -0800355
Alexander Egorenkov980d5f92020-09-02 16:52:06 +0200356void *restart_stack;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400357
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200358unsigned long stack_alloc(void)
359{
360#ifdef CONFIG_VMAP_STACK
Sven Schnelle436fc4f2021-08-27 08:36:06 +0200361 void *ret;
362
363 ret = __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP,
364 NUMA_NO_NODE, __builtin_return_address(0));
365 kmemleak_not_leak(ret);
366 return (unsigned long)ret;
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200367#else
Vasily Gorbik32ce55a2018-09-18 18:23:40 +0200368 return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200369#endif
370}
371
372void stack_free(unsigned long stack)
373{
374#ifdef CONFIG_VMAP_STACK
375 vfree((void *) stack);
376#else
Vasily Gorbik32ce55a2018-09-18 18:23:40 +0200377 free_pages(stack, THREAD_SIZE_ORDER);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200378#endif
379}
380
381int __init arch_early_irq_init(void)
382{
383 unsigned long stack;
384
Vasily Gorbik32ce55a2018-09-18 18:23:40 +0200385 stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200386 if (!stack)
387 panic("Couldn't allocate async stack");
388 S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
389 return 0;
390}
391
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200392void __init arch_call_rest_init(void)
393{
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200394 unsigned long stack;
395
396 stack = stack_alloc();
397 if (!stack)
398 panic("Couldn't allocate kernel stack");
399 current->stack = (void *) stack;
400#ifdef CONFIG_VMAP_STACK
401 current->stack_vm_area = (void *) stack;
402#endif
403 set_task_stack_end_magic(current);
404 stack += STACK_INIT_OFFSET;
405 S390_lowcore.kernel_stack = stack;
Heiko Carstensb55e6922021-07-05 17:55:32 +0200406 call_on_stack_noreturn(rest_init, stack);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200407}
408
Martin Schwidefsky87276382019-02-14 15:40:56 +0100409static void __init setup_lowcore_dat_off(void)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700410{
Vasily Gorbik21a66712020-09-24 01:01:29 +0200411 unsigned long int_psw_mask = PSW_KERNEL_BITS;
Sven Schnelleb61b1592021-02-03 09:02:51 +0100412 unsigned long mcck_stack;
Heiko Carstensc667aea2015-12-31 10:29:00 +0100413 struct lowcore *lc;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700414
Vasily Gorbik21a66712020-09-24 01:01:29 +0200415 if (IS_ENABLED(CONFIG_KASAN))
416 int_psw_mask |= PSW_MASK_DAT;
417
Heiko Carstensc9e37352005-05-01 08:58:57 -0700418 /*
419 * Setup lowcore for boot cpu
420 */
Heiko Carstensf1c11742017-07-05 07:37:27 +0200421 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
Mike Rapoporteb31d552018-10-30 15:08:04 -0700422 lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700423 if (!lc)
424 panic("%s: Failed to allocate %zu bytes align=%zx\n",
425 __func__, sizeof(*lc), sizeof(*lc));
426
Martin Schwidefskye258d712013-09-24 09:14:56 +0200427 lc->restart_psw.mask = PSW_KERNEL_BITS;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100428 lc->restart_psw.addr = (unsigned long) restart_int_handler;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200429 lc->external_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100430 lc->external_new_psw.addr = (unsigned long) ext_int_handler;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200431 lc->svc_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100432 lc->svc_new_psw.addr = (unsigned long) system_call;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200433 lc->program_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100434 lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
Martin Schwidefskye258d712013-09-24 09:14:56 +0200435 lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100436 lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200437 lc->io_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100438 lc->io_new_psw.addr = (unsigned long) io_int_handler;
Martin Schwidefsky6e2ef5e2016-10-27 12:41:39 +0200439 lc->clock_comparator = clock_comparator_max;
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200440 lc->nodat_stack = ((unsigned long) &init_thread_union)
Martin Schwidefskydc7ee002013-04-24 10:20:43 +0200441 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
Heiko Carstensd5c352c2016-11-08 11:08:26 +0100442 lc->current_task = (unsigned long)&init_task;
Heiko Carstens8f100bb2016-03-10 10:32:21 +0100443 lc->lpp = LPP_MAGIC;
Christian Ehrhardt25097bf2009-04-14 15:36:16 +0200444 lc->machine_flags = S390_lowcore.machine_flags;
Martin Schwidefskyc3601922016-10-25 12:21:44 +0200445 lc->preempt_count = S390_lowcore.preempt_count;
Martin Schwidefsky6c815112017-10-12 13:24:47 +0200446 nmi_alloc_boot_cpu(lc);
Sven Schnelle56e62a72020-11-21 11:14:56 +0100447 lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
Martin Schwidefskyab96e792009-04-14 15:36:29 +0200448 lc->exit_timer = S390_lowcore.exit_timer;
449 lc->user_timer = S390_lowcore.user_timer;
450 lc->system_timer = S390_lowcore.system_timer;
451 lc->steal_timer = S390_lowcore.steal_timer;
452 lc->last_update_timer = S390_lowcore.last_update_timer;
453 lc->last_update_clock = S390_lowcore.last_update_clock;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400454
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200455 /*
456 * Allocate the global restart stack which is the same for
457 * all CPUs in cast *one* of them does a PSW restart.
458 */
Mike Rapoporteb31d552018-10-30 15:08:04 -0700459 restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700460 if (!restart_stack)
461 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
462 __func__, THREAD_SIZE, THREAD_SIZE);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200463 restart_stack += STACK_INIT_OFFSET;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400464
465 /*
466 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
Hendrik Bruecknerb4a96012013-12-13 12:53:42 +0100467 * restart data to the absolute zero lowcore. This is necessary if
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400468 * PSW restart is done on an offline CPU that has lowcore zero.
469 */
470 lc->restart_stack = (unsigned long) restart_stack;
471 lc->restart_fn = (unsigned long) do_restart;
472 lc->restart_data = 0;
Alexander Gordeev915fea02021-08-24 15:30:21 +0200473 lc->restart_source = -1U;
Michael Holzheu73bf4632012-05-24 14:35:16 +0200474
Sven Schnelleb61b1592021-02-03 09:02:51 +0100475 mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
476 if (!mcck_stack)
477 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
478 __func__, THREAD_SIZE, THREAD_SIZE);
479 lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
480
Michael Holzheu73bf4632012-05-24 14:35:16 +0200481 /* Setup absolute zero lowcore */
Heiko Carstensfbe76562012-06-05 09:59:52 +0200482 mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
483 mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
484 mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
485 mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
486 mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400487
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +0200488 lc->spinlock_lockval = arch_spin_lockval(0);
Martin Schwidefskyb96f7d82017-03-24 17:25:02 +0100489 lc->spinlock_index = 0;
490 arch_spin_lock_setup(0);
Martin Schwidefskyf19fbd52018-01-26 12:46:47 +0100491 lc->br_r1_trampoline = 0x07f1; /* br %r1 */
Sven Schnelle0b38b5e2020-01-22 13:38:22 +0100492 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
493 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
Valentin Schneider6a942f52021-07-07 17:33:38 +0100494 lc->preempt_count = PREEMPT_DISABLED;
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +0200495
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 set_prefix((u32)(unsigned long) lc);
Frank Munzert099b7652009-03-26 15:23:43 +0100497 lowcore_ptr[0] = lc;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700498}
499
Martin Schwidefsky87276382019-02-14 15:40:56 +0100500static void __init setup_lowcore_dat_on(void)
501{
Alexander Gordeev915fea02021-08-24 15:30:21 +0200502 struct lowcore *lc = lowcore_ptr[0];
503
Martin Schwidefsky86a86802019-02-18 18:10:08 +0100504 __ctl_clear_bit(0, 28);
505 S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
506 S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
507 S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
508 S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
Alexander Gordeev915fea02021-08-24 15:30:21 +0200509 __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
Martin Schwidefsky86a86802019-02-18 18:10:08 +0100510 __ctl_set_bit(0, 28);
Alexander Gordeev915fea02021-08-24 15:30:21 +0200511 mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS);
512 mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw);
513 memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area,
514 sizeof(S390_lowcore.cregs_save_area));
Martin Schwidefsky87276382019-02-14 15:40:56 +0100515}
516
Heiko Carstens71189282011-03-23 10:15:59 +0100517static struct resource code_resource = {
518 .name = "Kernel code",
Toshi Kani35d98e92016-01-26 21:57:22 +0100519 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
Heiko Carstens71189282011-03-23 10:15:59 +0100520};
521
522static struct resource data_resource = {
523 .name = "Kernel data",
Toshi Kani35d98e92016-01-26 21:57:22 +0100524 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
Heiko Carstens71189282011-03-23 10:15:59 +0100525};
526
Heiko Carstens4cc69532011-03-23 10:16:00 +0100527static struct resource bss_resource = {
528 .name = "Kernel bss",
Toshi Kani35d98e92016-01-26 21:57:22 +0100529 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
Heiko Carstens4cc69532011-03-23 10:16:00 +0100530};
531
Heiko Carstens71189282011-03-23 10:15:59 +0100532static struct resource __initdata *standard_resources[] = {
533 &code_resource,
534 &data_resource,
Heiko Carstens4cc69532011-03-23 10:16:00 +0100535 &bss_resource,
Heiko Carstens71189282011-03-23 10:15:59 +0100536};
537
538static void __init setup_resources(void)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700539{
Heiko Carstens71189282011-03-23 10:15:59 +0100540 struct resource *res, *std_res, *sub_res;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700541 phys_addr_t start, end;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100542 int j;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700543 u64 i;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700544
Vasily Gorbik320d9552018-02-20 13:28:33 +0100545 code_resource.start = (unsigned long) _text;
546 code_resource.end = (unsigned long) _etext - 1;
547 data_resource.start = (unsigned long) _etext;
548 data_resource.end = (unsigned long) _edata - 1;
549 bss_resource.start = (unsigned long) __bss_start;
550 bss_resource.end = (unsigned long) __bss_stop - 1;
Heiko Carstenscc13ad62006-06-25 05:49:30 -0700551
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700552 for_each_mem_range(i, &start, &end) {
Mike Rapoporteb31d552018-10-30 15:08:04 -0700553 res = memblock_alloc(sizeof(*res), 8);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700554 if (!res)
555 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
556 __func__, sizeof(*res), 8);
Toshi Kani35d98e92016-01-26 21:57:22 +0100557 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100558
559 res->name = "System RAM";
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700560 res->start = start;
561 /*
562 * In memblock, end points to the first byte after the
563 * range while in resourses, end points to the last byte in
564 * the range.
565 */
566 res->end = end - 1;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700567 request_resource(&iomem_resource, res);
Hongjie Yangfe355b72007-02-05 21:18:24 +0100568
Heiko Carstens71189282011-03-23 10:15:59 +0100569 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
570 std_res = standard_resources[j];
571 if (std_res->start < res->start ||
572 std_res->start > res->end)
573 continue;
574 if (std_res->end > res->end) {
Mike Rapoporteb31d552018-10-30 15:08:04 -0700575 sub_res = memblock_alloc(sizeof(*sub_res), 8);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700576 if (!sub_res)
577 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
578 __func__, sizeof(*sub_res), 8);
Heiko Carstens71189282011-03-23 10:15:59 +0100579 *sub_res = *std_res;
580 sub_res->end = res->end;
581 std_res->start = res->end + 1;
582 request_resource(res, sub_res);
583 } else {
584 request_resource(res, std_res);
585 }
Hongjie Yangfe355b72007-02-05 21:18:24 +0100586 }
Heiko Carstensc9e37352005-05-01 08:58:57 -0700587 }
Heiko Carstens4e042af2016-05-31 09:14:00 +0200588#ifdef CONFIG_CRASH_DUMP
589 /*
590 * Re-add removed crash kernel memory as reserved memory. This makes
591 * sure it will be mapped with the identity mapping and struct pages
592 * will be created, so it can be resized later on.
593 * However add it later since the crash kernel resource should not be
594 * part of the System RAM resource.
595 */
596 if (crashk_res.end) {
Heiko Carstens9f88eb4d2016-11-28 11:40:27 +0100597 memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
Heiko Carstens4e042af2016-05-31 09:14:00 +0200598 memblock_reserve(crashk_res.start, resource_size(&crashk_res));
599 insert_resource(&iomem_resource, &crashk_res);
600 }
601#endif
Heiko Carstensc9e37352005-05-01 08:58:57 -0700602}
603
Vasily Gorbik0c4f2622020-10-06 22:12:39 +0200604static void __init setup_memory_end(void)
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100605{
Vasily Gorbik73045a02020-10-19 11:01:33 +0200606 memblock_remove(ident_map_size, ULONG_MAX);
Vasily Gorbik0c4f2622020-10-06 22:12:39 +0200607 max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
Vasily Gorbik73045a02020-10-19 11:01:33 +0200608 pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100609}
610
Michael Holzheu60a0c682011-10-30 15:16:40 +0100611#ifdef CONFIG_CRASH_DUMP
612
613/*
David Hildenbrand47656002020-04-24 10:39:04 +0200614 * When kdump is enabled, we have to ensure that no memory from the area
615 * [0 - crashkernel memory size] is set offline - it will be exchanged with
616 * the crashkernel memory region when kdump is triggered. The crashkernel
617 * memory region can never get offlined (pages are unmovable).
Michael Holzheu60a0c682011-10-30 15:16:40 +0100618 */
619static int kdump_mem_notifier(struct notifier_block *nb,
620 unsigned long action, void *data)
621{
622 struct memory_notify *arg = data;
623
Michael Holzheubd858e82014-07-10 18:14:20 +0200624 if (action != MEM_GOING_OFFLINE)
625 return NOTIFY_OK;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100626 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
627 return NOTIFY_BAD;
David Hildenbrand47656002020-04-24 10:39:04 +0200628 return NOTIFY_OK;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100629}
630
631static struct notifier_block kdump_mem_nb = {
632 .notifier_call = kdump_mem_notifier,
633};
634
635#endif
636
637/*
Vasily Gorbik73045a02020-10-19 11:01:33 +0200638 * Make sure that the area above identity mapping is protected
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100639 */
Vasily Gorbik73045a02020-10-19 11:01:33 +0200640static void __init reserve_above_ident_map(void)
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100641{
Vasily Gorbik73045a02020-10-19 11:01:33 +0200642 memblock_reserve(ident_map_size, ULONG_MAX);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100643}
644
645/*
Michael Holzheu60a0c682011-10-30 15:16:40 +0100646 * Reserve memory for kdump kernel to be loaded with kexec
647 */
648static void __init reserve_crashkernel(void)
649{
650#ifdef CONFIG_CRASH_DUMP
651 unsigned long long crash_base, crash_size;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100652 phys_addr_t low, high;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100653 int rc;
654
Vasily Gorbik73045a02020-10-19 11:01:33 +0200655 rc = parse_crashkernel(boot_command_line, ident_map_size, &crash_size,
Michael Holzheu60a0c682011-10-30 15:16:40 +0100656 &crash_base);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100657
Michael Holzheudab7a7b2011-10-30 15:16:44 +0100658 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
659 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100660 if (rc || crash_size == 0)
661 return;
662
663 if (memblock.memory.regions[0].size < crash_size) {
664 pr_info("crashkernel reservation failed: %s\n",
665 "first memory chunk must be at least crashkernel size");
666 return;
667 }
668
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200669 low = crash_base ?: oldmem_data.start;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100670 high = low + crash_size;
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200671 if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) {
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100672 /* The crashkernel fits into OLDMEM, reuse OLDMEM */
673 crash_base = low;
674 } else {
675 /* Find suitable area in free memory */
David Hildenbrand37c5f6c2015-05-06 13:18:59 +0200676 low = max_t(unsigned long, crash_size, sclp.hsa_size);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100677 high = crash_base ? crash_base + crash_size : ULONG_MAX;
678
679 if (crash_base && crash_base < low) {
680 pr_info("crashkernel reservation failed: %s\n",
681 "crash_base too low");
682 return;
683 }
684 low = crash_base ?: low;
Mike Rapoporta7259df2021-09-02 15:00:26 -0700685 crash_base = memblock_phys_alloc_range(crash_size,
686 KEXEC_CRASH_MEM_ALIGN,
687 low, high);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100688 }
689
690 if (!crash_base) {
691 pr_info("crashkernel reservation failed: %s\n",
692 "no suitable area found");
693 return;
694 }
695
Mike Rapoporta7259df2021-09-02 15:00:26 -0700696 if (register_memory_notifier(&kdump_mem_nb)) {
697 memblock_free(crash_base, crash_size);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100698 return;
Mike Rapoporta7259df2021-09-02 15:00:26 -0700699 }
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100700
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200701 if (!oldmem_data.start && MACHINE_IS_VM)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100702 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
703 crashk_res.start = crash_base;
704 crashk_res.end = crash_base + crash_size - 1;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100705 memblock_remove(crash_base, crash_size);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100706 pr_info("Reserving %lluMB of memory at %lluMB "
707 "for crashkernel (System RAM: %luMB)\n",
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100708 crash_size >> 20, crash_base >> 20,
709 (unsigned long)memblock.memory.total_size >> 20);
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400710 os_info_crashkernel_add(crash_base, crash_size);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100711#endif
712}
713
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100714/*
715 * Reserve the initrd from being used by memblock
716 */
717static void __init reserve_initrd(void)
718{
719#ifdef CONFIG_BLK_DEV_INITRD
Alexander Egorenkov84733282021-06-15 14:15:07 +0200720 if (!initrd_data.start || !initrd_data.size)
Heiko Carstens7be5e352016-12-27 14:47:42 +0100721 return;
Alexander Egorenkov84733282021-06-15 14:15:07 +0200722 initrd_start = initrd_data.start;
723 initrd_end = initrd_start + initrd_data.size;
724 memblock_reserve(initrd_data.start, initrd_data.size);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100725#endif
726}
727
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100728/*
729 * Reserve the memory area used to pass the certificate lists
730 */
731static void __init reserve_certificate_list(void)
732{
733 if (ipl_cert_list_addr)
734 memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
735}
736
Vasily Gorbik6966d602018-04-11 11:56:55 +0200737static void __init reserve_mem_detect_info(void)
738{
739 unsigned long start, size;
740
741 get_mem_detect_reserved(&start, &size);
742 if (size)
743 memblock_reserve(start, size);
744}
745
746static void __init free_mem_detect_info(void)
747{
748 unsigned long start, size;
749
750 get_mem_detect_reserved(&start, &size);
751 if (size)
752 memblock_free(start, size);
753}
754
Vasily Gorbikf01b8bc2018-09-24 15:27:30 +0200755static const char * __init get_mem_info_source(void)
756{
757 switch (mem_detect.info_source) {
758 case MEM_DETECT_SCLP_STOR_INFO:
759 return "sclp storage info";
760 case MEM_DETECT_DIAG260:
761 return "diag260";
762 case MEM_DETECT_SCLP_READ_INFO:
763 return "sclp read info";
764 case MEM_DETECT_BIN_SEARCH:
765 return "binary search";
766 }
767 return "none";
768}
769
Vasily Gorbik6966d602018-04-11 11:56:55 +0200770static void __init memblock_add_mem_detect_info(void)
771{
772 unsigned long start, end;
773 int i;
774
Mike Rapoport87c55872020-10-13 16:57:54 -0700775 pr_debug("physmem info source: %s (%hhd)\n",
776 get_mem_info_source(), mem_detect.info_source);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200777 /* keep memblock lists close to the kernel */
778 memblock_set_bottom_up(true);
Anshuman Khandual02634a42020-01-30 22:14:20 -0800779 for_each_mem_detect_block(i, &start, &end) {
780 memblock_add(start, end - start);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200781 memblock_physmem_add(start, end - start);
Anshuman Khandual02634a42020-01-30 22:14:20 -0800782 }
Vasily Gorbik6966d602018-04-11 11:56:55 +0200783 memblock_set_bottom_up(false);
Heiko Carstens701dc812020-02-19 13:29:15 +0100784 memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200785 memblock_dump_all();
786}
787
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100788/*
789 * Check for initrd being in usable memory
790 */
791static void __init check_initrd(void)
792{
793#ifdef CONFIG_BLK_DEV_INITRD
Alexander Egorenkov84733282021-06-15 14:15:07 +0200794 if (initrd_data.start && initrd_data.size &&
795 !memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
Martin Schwidefsky6d7b2ee2016-12-13 16:19:11 +0100796 pr_err("The initial RAM disk does not fit into the memory\n");
Alexander Egorenkov84733282021-06-15 14:15:07 +0200797 memblock_free(initrd_data.start, initrd_data.size);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100798 initrd_start = initrd_end = 0;
799 }
800#endif
801}
802
803/*
Sebastian Ott0c36b8a2015-06-16 14:03:37 +0200804 * Reserve memory used for lowcore/command line/kernel image.
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100805 */
806static void __init reserve_kernel(void)
807{
Vasily Gorbik320d9552018-02-20 13:28:33 +0100808 unsigned long start_pfn = PFN_UP(__pa(_end));
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100809
Alexander Egorenkovf1a54692021-06-16 14:10:03 +0200810 memblock_reserve(0, STARTUP_NORMAL_OFFSET);
Alexander Egorenkov7c0eaa72021-08-10 15:20:00 +0200811 memblock_reserve((unsigned long)sclp_early_sccb, EXT_SCCB_READ_SCP);
Alexander Gordeeve3ec8e02021-09-27 14:18:26 +0200812 memblock_reserve(__amode31_base, __eamode31 - __samode31);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100813 memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
814 - (unsigned long)_stext);
815}
816
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400817static void __init setup_memory(void)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700818{
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700819 phys_addr_t start, end;
820 u64 i;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700821
822 /*
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100823 * Init storage key for present memory
Heiko Carstensc9e37352005-05-01 08:58:57 -0700824 */
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700825 for_each_mem_range(i, &start, &end)
826 storage_key_init_range(start, end);
827
Peter Oberparleiter0b642ed2005-05-01 08:58:58 -0700828 psw_set_key(PAGE_DEFAULT_KEY);
829
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100830 /* Only cosmetics */
831 memblock_enforce_memory_limit(memblock_end_of_DRAM());
Heiko Carstensc9e37352005-05-01 08:58:57 -0700832}
833
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200834static void __init relocate_amode31_section(void)
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200835{
Alexander Gordeeve3ec8e02021-09-27 14:18:26 +0200836 unsigned long amode31_size = __eamode31 - __samode31;
837 long amode31_offset = __amode31_base - __samode31;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200838 long *ptr;
839
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200840 pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200841
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200842 /* Move original AMODE31 section to the new one */
Alexander Gordeeve3ec8e02021-09-27 14:18:26 +0200843 memmove((void *)__amode31_base, (void *)__samode31, amode31_size);
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200844 /* Zero out the old AMODE31 section to catch invalid accesses within it */
845 memset((void *)__samode31, 0, amode31_size);
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200846
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200847 /* Update all AMODE31 region references */
848 for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
849 *ptr += amode31_offset;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200850}
851
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200852/* This must be called after AMODE31 relocation */
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200853static void __init setup_cr(void)
854{
855 union ctlreg2 cr2;
856 union ctlreg5 cr5;
857 union ctlreg15 cr15;
858
859 __ctl_duct[1] = (unsigned long)__ctl_aste;
860 __ctl_duct[2] = (unsigned long)__ctl_aste;
861 __ctl_duct[4] = (unsigned long)__ctl_duald;
862
863 /* Update control registers CR2, CR5 and CR15 */
864 __ctl_store(cr2.val, 2, 2);
865 __ctl_store(cr5.val, 5, 5);
866 __ctl_store(cr15.val, 15, 15);
867 cr2.ducto = (unsigned long)__ctl_duct >> 6;
868 cr5.pasteo = (unsigned long)__ctl_duct >> 6;
869 cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
870 __ctl_load(cr2.val, 2, 2);
871 __ctl_load(cr5.val, 5, 5);
872 __ctl_load(cr15.val, 15, 15);
873}
874
Martin Schwidefskycf8ba7a2007-05-04 18:48:28 +0200875/*
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +0200876 * Add system information as device randomness
877 */
878static void __init setup_randomness(void)
879{
880 struct sysinfo_3_2_2 *vmms;
881
Mike Rapoport9a8dd702018-10-30 15:07:59 -0700882 vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
883 PAGE_SIZE);
Mike Rapoportecc3e772019-03-11 23:29:26 -0700884 if (!vmms)
885 panic("Failed to allocate memory for sysinfo structure\n");
886
Heiko Carstensda8fd822017-02-04 11:40:36 +0100887 if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
Heiko Carstens4920e3c2017-02-05 23:03:18 +0100888 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
Heiko Carstensda8fd822017-02-04 11:40:36 +0100889 memblock_free((unsigned long) vmms, PAGE_SIZE);
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +0200890}
891
892/*
Martin Schwidefsky3f6813b2016-04-01 15:42:15 +0200893 * Find the correct size for the task_struct. This depends on
894 * the size of the struct fpu at the end of the thread_struct
895 * which is embedded in the task_struct.
896 */
897static void __init setup_task_size(void)
898{
899 int task_size = sizeof(struct task_struct);
900
901 if (!MACHINE_HAS_VX) {
902 task_size -= sizeof(__vector128) * __NUM_VXRS;
903 task_size += sizeof(freg_t) * __NUM_FPRS;
904 }
905 arch_task_struct_size = task_size;
906}
907
908/*
Collin Walling4ad78b82018-12-06 17:30:04 -0500909 * Issue diagnose 318 to set the control program name and
910 * version codes.
911 */
912static void __init setup_control_program_code(void)
913{
914 union diag318_info diag318_info = {
915 .cpnc = CPNC_LINUX,
Collin Wallinga23816f2020-06-22 11:46:35 -0400916 .cpvc = 0,
Collin Walling4ad78b82018-12-06 17:30:04 -0500917 };
918
919 if (!sclp.has_diag318)
920 return;
921
922 diag_stat_inc(DIAG_STAT_X318);
923 asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
924}
925
926/*
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100927 * Print the component list from the IPL report
928 */
929static void __init log_component_list(void)
930{
931 struct ipl_rb_component_entry *ptr, *end;
932 char *str;
933
934 if (!early_ipl_comp_list_addr)
935 return;
Philipp Rudo40260b02019-12-18 11:24:43 +0100936 if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100937 pr_info("Linux is running with Secure-IPL enabled\n");
938 else
939 pr_info("Linux is running with Secure-IPL disabled\n");
940 ptr = (void *) early_ipl_comp_list_addr;
941 end = (void *) ptr + early_ipl_comp_list_size;
942 pr_info("The IPL report contains the following components:\n");
943 while (ptr < end) {
944 if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
945 if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
946 str = "signed, verified";
947 else
948 str = "signed, verification failed";
949 } else {
950 str = "not signed";
951 }
952 pr_info("%016llx - %016llx (%s)\n",
953 ptr->addr, ptr->addr + ptr->len, str);
954 ptr++;
955 }
956}
957
958/*
Heiko Carstensc9e37352005-05-01 08:58:57 -0700959 * Setup function called from init/main.c just after the banner
960 * was printed.
961 */
962
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400963void __init setup_arch(char **cmdline_p)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700964{
965 /*
966 * print what head.S has found out about the machine
967 */
Carsten Ottefa587742008-03-25 18:47:44 +0100968 if (MACHINE_IS_VM)
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +0100969 pr_info("Linux is running as a z/VM "
970 "guest operating system in 64-bit mode\n");
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200971 else if (MACHINE_IS_KVM)
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +0100972 pr_info("Linux is running under KVM in 64-bit mode\n");
Martin Schwidefsky27d71602010-02-26 22:37:38 +0100973 else if (MACHINE_IS_LPAR)
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +0100974 pr_info("Linux is running natively in 64-bit mode\n");
Christian Borntraeger03aa0472018-11-09 09:21:47 +0100975 else
976 pr_info("Linux is running as a guest in 64-bit mode\n");
Heiko Carstensc9e37352005-05-01 08:58:57 -0700977
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100978 log_component_list();
979
Hendrik Bruecknera0443fb2008-07-14 09:59:09 +0200980 /* Have one command line that is parsed and saved in /proc/cmdline */
981 /* boot_command_line has been already set up in early.c */
982 *cmdline_p = boot_command_line;
Heiko Carstens59685292006-03-24 03:15:15 -0800983
Heiko Carstensc9e37352005-05-01 08:58:57 -0700984 ROOT_DEV = Root_RAM0;
Heiko Carstens59685292006-03-24 03:15:15 -0800985
Kefeng Wang638cd5a32021-07-07 18:08:57 -0700986 setup_initial_init_mm(_text, _etext, _edata, _end);
Heiko Carstens59685292006-03-24 03:15:15 -0800987
Martin Schwidefsky6a3d1e82018-04-11 08:35:23 +0200988 if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
989 nospec_auto_detect();
990
Vasily Gorbik95e61b12020-06-18 17:17:19 +0200991 jump_label_init();
Heiko Carstens59685292006-03-24 03:15:15 -0800992 parse_early_param();
Martin Schwidefsky8a07dd02015-10-14 15:53:06 +0200993#ifdef CONFIG_CRASH_DUMP
994 /* Deactivate elfcorehdr= kernel parameter */
995 elfcorehdr_addr = ELFCORE_ADDR_MAX;
996#endif
997
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400998 os_info_init();
Michael Holzheu99ca4e52008-01-26 14:11:11 +0100999 setup_ipl();
Martin Schwidefsky3f6813b2016-04-01 15:42:15 +02001000 setup_task_size();
Collin Walling4ad78b82018-12-06 17:30:04 -05001001 setup_control_program_code();
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001002
1003 /* Do some memory reservations *before* memory is added to memblock */
Vasily Gorbik73045a02020-10-19 11:01:33 +02001004 reserve_above_ident_map();
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001005 reserve_kernel();
1006 reserve_initrd();
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +01001007 reserve_certificate_list();
Vasily Gorbik6966d602018-04-11 11:56:55 +02001008 reserve_mem_detect_info();
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001009 memblock_allow_resize();
1010
1011 /* Get information about *all* installed memory */
Vasily Gorbik6966d602018-04-11 11:56:55 +02001012 memblock_add_mem_detect_info();
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001013
Vasily Gorbik6966d602018-04-11 11:56:55 +02001014 free_mem_detect_info();
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001015
Heiko Carstensc78d0c72021-08-04 13:40:31 +02001016 relocate_amode31_section();
Alexander Egorenkov6bda6672021-06-15 19:17:36 +02001017 setup_cr();
1018
Vasily Gorbik1d6671a2020-09-11 11:38:21 +02001019 setup_uv();
Vasily Gorbik0c4f2622020-10-06 22:12:39 +02001020 setup_memory_end();
Heiko Carstensc9e37352005-05-01 08:58:57 -07001021 setup_memory();
Vasily Gorbik73045a02020-10-19 11:01:33 +02001022 dma_contiguous_reserve(ident_map_size);
Heiko Carstens3f429842017-08-07 15:16:15 +02001023 vmcp_cma_reserve();
Gerald Schaefer343dbdb2020-12-08 19:47:15 +01001024 if (MACHINE_HAS_EDAT2)
1025 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001026
1027 check_initrd();
1028 reserve_crashkernel();
Martin Schwidefsky1a36a392015-10-29 10:28:26 +01001029#ifdef CONFIG_CRASH_DUMP
Michael Holzheu1592a8e2015-05-26 19:05:23 +02001030 /*
1031 * Be aware that smp_save_dump_cpus() triggers a system reset.
1032 * Therefore CPU and device initialization should be done afterwards.
1033 */
1034 smp_save_dump_cpus();
Martin Schwidefsky1a36a392015-10-29 10:28:26 +01001035#endif
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001036
Heiko Carstensc9e37352005-05-01 08:58:57 -07001037 setup_resources();
Martin Schwidefsky87276382019-02-14 15:40:56 +01001038 setup_lowcore_dat_off();
Heiko Carstensd80512f2013-12-16 14:31:26 +01001039 smp_fill_possible_mask();
Heiko Carstens097a1162016-04-14 12:35:22 +02001040 cpu_detect_mhz_feature();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 cpu_init();
Philipp Hachtmann3a368f72014-03-06 18:25:13 +01001042 numa_setup();
Heiko Carstensaf51160e2016-12-03 09:48:01 +01001043 smp_detect_cpus();
Heiko Carstens8c9105802016-12-03 09:50:21 +01001044 topology_init_early();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
1046 /*
1047 * Create kernel page tables and switch to virtual addressing.
1048 */
1049 paging_init();
1050
Martin Schwidefsky87276382019-02-14 15:40:56 +01001051 /*
1052 * After paging_init created the kernel page table, the new PSWs
1053 * in lowcore can now run with DAT enabled.
1054 */
1055 setup_lowcore_dat_on();
1056
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 /* Setup default console */
1058 conmode_default();
Hendrik Brueckner637952c2009-08-23 18:09:06 +02001059 set_preferred_console();
Michael Holzheu411ed322007-04-27 16:01:49 +02001060
Vasily Gorbik686140a2017-10-12 13:01:47 +02001061 apply_alternative_instructions();
Martin Schwidefskyf19fbd52018-01-26 12:46:47 +01001062 if (IS_ENABLED(CONFIG_EXPOLINE))
1063 nospec_init_branches();
Vasily Gorbik686140a2017-10-12 13:01:47 +02001064
Alexander Egorenkovbd37b362020-09-29 20:24:55 +02001065 /* Setup zfcp/nvme dump support */
Sebastian Ottfe72ffb2013-04-30 17:18:46 +02001066 setup_zfcpdump();
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +02001067
1068 /* Add system specific data to the random pool */
1069 setup_randomness();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070}