blob: 3364ebfae21568e9fb48c21e03c44cabb35e0214 [file] [log] [blame]
Greg Kroah-Hartmana17ae4c2017-11-24 15:00:32 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 1999, 2012
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Author(s): Hartmut Penner (hp@de.ibm.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "arch/i386/kernel/setup.c"
9 * Copyright (C) 1995, Linus Torvalds
10 */
11
12/*
13 * This file handles the architecture-dependent parts of initialization
14 */
15
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +010016#define KMSG_COMPONENT "setup"
17#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/errno.h>
Heiko Carstens08729222013-01-07 13:56:17 +010020#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/sched.h>
Ingo Molnar29930022017-02-08 18:51:36 +010022#include <linux/sched/task.h>
Ingo Molnar1777e462017-02-05 14:47:12 +010023#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
Tejun Heoff38df32011-12-08 10:22:09 -080025#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/mm.h>
27#include <linux/stddef.h>
28#include <linux/unistd.h>
29#include <linux/ptrace.h>
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +020030#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/user.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/tty.h>
33#include <linux/ioport.h>
34#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/init.h>
36#include <linux/initrd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/root_dev.h>
38#include <linux/console.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/kernel_stat.h>
Christoph Hellwig0b1abd12020-09-11 10:56:52 +020040#include <linux/dma-map-ops.h>
Heiko Carstens1e8e3382005-10-30 15:00:11 -080041#include <linux/device.h>
Peter Oberparleiter585c3042006-06-29 15:08:25 +020042#include <linux/notifier.h>
Heiko Carstens65912a82006-09-20 15:58:41 +020043#include <linux/pfn.h>
Hongjie Yangfe355b72007-02-05 21:18:24 +010044#include <linux/ctype.h>
Heiko Carstens2b67fc42007-02-05 21:16:47 +010045#include <linux/reboot.h>
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020046#include <linux/topology.h>
Michael Holzheu60a0c682011-10-30 15:16:40 +010047#include <linux/kexec.h>
48#include <linux/crash_dump.h>
49#include <linux/memory.h>
Heiko Carstens048cd4e2012-02-27 10:01:52 +010050#include <linux/compat.h>
Martin Schwidefskyce3dc442017-09-12 16:37:33 +020051#include <linux/start_kernel.h>
Gerald Schaefer343dbdb2020-12-08 19:47:15 +010052#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +010054#include <asm/boot_data.h>
Michael Holzheu46b05d22007-02-21 10:55:21 +010055#include <asm/ipl.h>
Heiko Carstens1e3cab22012-03-30 09:40:55 +020056#include <asm/facility.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <asm/smp.h>
58#include <asm/mmu_context.h>
59#include <asm/cpcmd.h>
60#include <asm/lowcore.h>
Martin Schwidefsky6c815112017-10-12 13:24:47 +020061#include <asm/nmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062#include <asm/irq.h>
Peter Oberparleiter0b642ed2005-05-01 08:58:58 -070063#include <asm/page.h>
64#include <asm/ptrace.h>
Heiko Carstenscc13ad62006-06-25 05:49:30 -070065#include <asm/sections.h>
Hongjie Yangfe355b72007-02-05 21:18:24 +010066#include <asm/ebcdic.h>
Michael Holzheu60a0c682011-10-30 15:16:40 +010067#include <asm/diag.h>
Michael Holzheu4857d4b2012-03-11 11:59:34 -040068#include <asm/os_info.h>
Heinz Graalfscd183452012-06-11 16:06:59 +020069#include <asm/sclp.h>
Martin Schwidefsky78c98f92019-01-28 08:33:08 +010070#include <asm/stacktrace.h>
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +020071#include <asm/sysinfo.h>
Philipp Hachtmann3a368f72014-03-06 18:25:13 +010072#include <asm/numa.h>
Vasily Gorbik686140a2017-10-12 13:01:47 +020073#include <asm/alternative.h>
Martin Schwidefskyf19fbd52018-01-26 12:46:47 +010074#include <asm/nospec-branch.h>
Vasily Gorbik6966d602018-04-11 11:56:55 +020075#include <asm/mem_detect.h>
Vasily Gorbik5abb9352019-04-01 19:11:03 +020076#include <asm/uv.h>
Sven Schnelle0b38b5e2020-01-22 13:38:22 +010077#include <asm/asm-offsets.h>
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040078#include "entry.h"
Gerald Schaeferc1821c22007-02-05 21:18:17 +010079
Linus Torvalds1da177e2005-04-16 15:20:36 -070080/*
81 * Machine setup..
82 */
83unsigned int console_mode = 0;
Heiko Carstens1485c5c2009-03-26 15:24:04 +010084EXPORT_SYMBOL(console_mode);
85
Linus Torvalds1da177e2005-04-16 15:20:36 -070086unsigned int console_devno = -1;
Heiko Carstens1485c5c2009-03-26 15:24:04 +010087EXPORT_SYMBOL(console_devno);
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089unsigned int console_irq = -1;
Heiko Carstens1485c5c2009-03-26 15:24:04 +010090EXPORT_SYMBOL(console_irq);
91
Alexander Egorenkov6bda6672021-06-15 19:17:36 +020092/*
93 * Some code and data needs to stay below 2 GB, even when the kernel would be
94 * relocated above 2 GB, because it has to use 31 bit addresses.
Heiko Carstensc78d0c72021-08-04 13:40:31 +020095 * Such code and data is part of the .amode31 section.
Alexander Egorenkov6bda6672021-06-15 19:17:36 +020096 */
Heiko Carstensc78d0c72021-08-04 13:40:31 +020097unsigned long __amode31_ref __samode31 = __pa(&_samode31);
98unsigned long __amode31_ref __eamode31 = __pa(&_eamode31);
99unsigned long __amode31_ref __stext_amode31 = __pa(&_stext_amode31);
100unsigned long __amode31_ref __etext_amode31 = __pa(&_etext_amode31);
101struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
102struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200103
104/*
105 * Control registers CR2, CR5 and CR15 are initialized with addresses
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200106 * of tables that must be placed below 2G which is handled by the AMODE31
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200107 * sections.
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200108 * Because the AMODE31 sections are relocated below 2G at startup,
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200109 * the content of control registers CR2, CR5 and CR15 must be updated
110 * with new addresses after the relocation. The initial initialization of
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200111 * control registers occurs in head64.S and then gets updated again after AMODE31
112 * relocation. We must access the relevant AMODE31 tables indirectly via
113 * pointers placed in the .amode31.refs linker section. Those pointers get
114 * updated automatically during AMODE31 relocation and always contain a valid
115 * address within AMODE31 sections.
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200116 */
117
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200118static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64);
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200119
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200120static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = {
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200121 [1] = 0xffffffffffffffff
122};
123
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200124static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = {
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200125 0x80000000, 0, 0, 0,
126 0x80000000, 0, 0, 0,
127 0x80000000, 0, 0, 0,
128 0x80000000, 0, 0, 0,
129 0x80000000, 0, 0, 0,
130 0x80000000, 0, 0, 0,
131 0x80000000, 0, 0, 0,
132 0x80000000, 0, 0, 0
133};
134
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200135static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = {
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200136 0, 0, 0x89000000, 0,
137 0, 0, 0x8a000000, 0
138};
139
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200140static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31;
141static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
142static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
143static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200144
Vasily Gorbikd58106c2017-11-17 18:44:28 +0100145int __bootdata(noexec_disabled);
Vasily Gorbik73045a02020-10-19 11:01:33 +0200146unsigned long __bootdata(ident_map_size);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200147struct mem_detect_info __bootdata(mem_detect);
Alexander Egorenkov84733282021-06-15 14:15:07 +0200148struct initrd_data __bootdata(initrd_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
Gerald Schaeferb2d24b92019-02-03 21:37:20 +0100150unsigned long __bootdata_preserved(__kaslr_offset);
Mikhail Zaslonkoc65e6812020-01-30 22:16:27 -0800151unsigned int __bootdata_preserved(zlib_dfltcc_support);
152EXPORT_SYMBOL(zlib_dfltcc_support);
Sven Schnelle17e89e12021-05-05 22:01:10 +0200153u64 __bootdata_preserved(stfle_fac_list[16]);
154EXPORT_SYMBOL(stfle_fac_list);
155u64 __bootdata_preserved(alt_stfle_fac_list[16]);
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200156struct oldmem_data __bootdata_preserved(oldmem_data);
Gerald Schaefera80313f2019-02-03 21:37:20 +0100157
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100158unsigned long VMALLOC_START;
159EXPORT_SYMBOL(VMALLOC_START);
160
161unsigned long VMALLOC_END;
162EXPORT_SYMBOL(VMALLOC_END);
163
164struct page *vmemmap;
165EXPORT_SYMBOL(vmemmap);
Vasily Gorbike670e642020-09-11 12:51:59 +0200166unsigned long vmemmap_size;
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100167
Heiko Carstensc972cc62012-10-05 16:52:18 +0200168unsigned long MODULES_VADDR;
169unsigned long MODULES_END;
Heiko Carstensc972cc62012-10-05 16:52:18 +0200170
Frank Munzert099b7652009-03-26 15:23:43 +0100171/* An array with a pointer to the lowcore of every CPU. */
Heiko Carstensc667aea2015-12-31 10:29:00 +0100172struct lowcore *lowcore_ptr[NR_CPUS];
Frank Munzert099b7652009-03-26 15:23:43 +0100173EXPORT_SYMBOL(lowcore_ptr);
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175/*
Niklas Schnelleb02002c2020-07-13 14:12:49 +0200176 * The Write Back bit position in the physaddr is given by the SLPC PCI.
177 * Leaving the mask zero always uses write through which is safe
178 */
179unsigned long mio_wb_bit_mask __ro_after_init;
180
181/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * This is set up by the setup-routine at boot-time
183 * for S390 need to find out, what we have to setup
184 * using address 0x10400 ...
185 */
186
187#include <asm/setup.h>
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 * condev= and conmode= setup parameter.
191 */
192
193static int __init condev_setup(char *str)
194{
195 int vdev;
196
197 vdev = simple_strtoul(str, &str, 0);
198 if (vdev >= 0 && vdev < 65536) {
199 console_devno = vdev;
200 console_irq = -1;
201 }
202 return 1;
203}
204
205__setup("condev=", condev_setup);
206
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200207static void __init set_preferred_console(void)
208{
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200209 if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200210 add_preferred_console("ttyS", 0, NULL);
Hendrik Bruecknerc4de0c12009-09-11 10:28:56 +0200211 else if (CONSOLE_IS_3270)
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200212 add_preferred_console("tty3270", 0, NULL);
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200213 else if (CONSOLE_IS_VT220)
Valentin Vidicb7d91d22021-04-27 21:40:10 +0200214 add_preferred_console("ttysclp", 0, NULL);
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200215 else if (CONSOLE_IS_HVC)
216 add_preferred_console("hvc", 0, NULL);
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200217}
218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219static int __init conmode_setup(char *str)
220{
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100221#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200222 if (!strcmp(str, "hwc") || !strcmp(str, "sclp"))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 SET_CONSOLE_SCLP;
224#endif
225#if defined(CONFIG_TN3215_CONSOLE)
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200226 if (!strcmp(str, "3215"))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 SET_CONSOLE_3215;
228#endif
229#if defined(CONFIG_TN3270_CONSOLE)
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200230 if (!strcmp(str, "3270"))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 SET_CONSOLE_3270;
232#endif
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200233 set_preferred_console();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 return 1;
235}
236
237__setup("conmode=", conmode_setup);
238
239static void __init conmode_default(void)
240{
241 char query_buffer[1024];
242 char *ptr;
243
244 if (MACHINE_IS_VM) {
Heiko Carstens740b5702006-12-04 15:40:30 +0100245 cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
247 ptr = strstr(query_buffer, "SUBCHANNEL =");
248 console_irq = simple_strtoul(ptr + 13, NULL, 16);
Heiko Carstens740b5702006-12-04 15:40:30 +0100249 cpcmd("QUERY TERM", query_buffer, 1024, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 ptr = strstr(query_buffer, "CONMODE");
251 /*
252 * Set the conmode to 3215 so that the device recognition
253 * will set the cu_type of the console to 3215. If the
254 * conmode is 3270 and we don't set it back then both
255 * 3215 and the 3270 driver will try to access the console
256 * device (3215 as console and 3270 as normal tty).
257 */
Heiko Carstens740b5702006-12-04 15:40:30 +0100258 cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 if (ptr == NULL) {
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100260#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 SET_CONSOLE_SCLP;
262#endif
263 return;
264 }
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200265 if (str_has_prefix(ptr + 8, "3270")) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266#if defined(CONFIG_TN3270_CONSOLE)
267 SET_CONSOLE_3270;
268#elif defined(CONFIG_TN3215_CONSOLE)
269 SET_CONSOLE_3215;
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100270#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 SET_CONSOLE_SCLP;
272#endif
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200273 } else if (str_has_prefix(ptr + 8, "3215")) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274#if defined(CONFIG_TN3215_CONSOLE)
275 SET_CONSOLE_3215;
276#elif defined(CONFIG_TN3270_CONSOLE)
277 SET_CONSOLE_3270;
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100278#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 SET_CONSOLE_SCLP;
280#endif
281 }
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200282 } else if (MACHINE_IS_KVM) {
Masahiro Yamadaa5ff1b32016-08-25 15:17:02 -0700283 if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200284 SET_CONSOLE_VT220;
Masahiro Yamadaa5ff1b32016-08-25 15:17:02 -0700285 else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200286 SET_CONSOLE_SCLP;
287 else
288 SET_CONSOLE_HVC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 } else {
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100290#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 SET_CONSOLE_SCLP;
292#endif
293 }
294}
295
Michael Holzheubf28a592014-04-14 10:38:05 +0200296#ifdef CONFIG_CRASH_DUMP
Sebastian Ottfe72ffb2013-04-30 17:18:46 +0200297static void __init setup_zfcpdump(void)
Michael Holzheu411ed322007-04-27 16:01:49 +0200298{
Alexander Egorenkovbd37b362020-09-29 20:24:55 +0200299 if (!is_ipl_type_dump())
Michael Holzheu411ed322007-04-27 16:01:49 +0200300 return;
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200301 if (oldmem_data.start)
Michael Holzheu3f25dc42011-11-14 11:19:05 +0100302 return;
Sebastian Ottfe72ffb2013-04-30 17:18:46 +0200303 strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
Michael Holzheu411ed322007-04-27 16:01:49 +0200304 console_loglevel = 2;
305}
306#else
Sebastian Ottfe72ffb2013-04-30 17:18:46 +0200307static inline void setup_zfcpdump(void) {}
Michael Holzheubf28a592014-04-14 10:38:05 +0200308#endif /* CONFIG_CRASH_DUMP */
Michael Holzheu411ed322007-04-27 16:01:49 +0200309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 /*
311 * Reboot, halt and power_off stubs. They just call _machine_restart,
312 * _machine_halt or _machine_power_off.
313 */
314
315void machine_restart(char *command)
316{
Christian Borntraeger7aa8dac2007-11-20 11:13:31 +0100317 if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
Martin Schwidefsky06fa46a2006-06-29 14:57:32 +0200318 /*
319 * Only unblank the console if we are called in enabled
320 * context or a bust_spinlocks cleared the way for us.
321 */
322 console_unblank();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 _machine_restart(command);
324}
325
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326void machine_halt(void)
327{
Martin Schwidefsky06fa46a2006-06-29 14:57:32 +0200328 if (!in_interrupt() || oops_in_progress)
329 /*
330 * Only unblank the console if we are called in enabled
331 * context or a bust_spinlocks cleared the way for us.
332 */
333 console_unblank();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 _machine_halt();
335}
336
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337void machine_power_off(void)
338{
Martin Schwidefsky06fa46a2006-06-29 14:57:32 +0200339 if (!in_interrupt() || oops_in_progress)
340 /*
341 * Only unblank the console if we are called in enabled
342 * context or a bust_spinlocks cleared the way for us.
343 */
344 console_unblank();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 _machine_power_off();
346}
347
Martin Schwidefsky53df7512006-01-14 13:21:01 -0800348/*
349 * Dummy power off function.
350 */
351void (*pm_power_off)(void) = machine_power_off;
Heiko Carstens08729222013-01-07 13:56:17 +0100352EXPORT_SYMBOL_GPL(pm_power_off);
Martin Schwidefsky53df7512006-01-14 13:21:01 -0800353
Alexander Egorenkov980d5f92020-09-02 16:52:06 +0200354void *restart_stack;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400355
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200356unsigned long stack_alloc(void)
357{
358#ifdef CONFIG_VMAP_STACK
Christoph Hellwigb200f5b2020-06-01 21:52:14 -0700359 return (unsigned long)__vmalloc_node(THREAD_SIZE, THREAD_SIZE,
360 THREADINFO_GFP, NUMA_NO_NODE,
361 __builtin_return_address(0));
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200362#else
Vasily Gorbik32ce55a2018-09-18 18:23:40 +0200363 return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200364#endif
365}
366
367void stack_free(unsigned long stack)
368{
369#ifdef CONFIG_VMAP_STACK
370 vfree((void *) stack);
371#else
Vasily Gorbik32ce55a2018-09-18 18:23:40 +0200372 free_pages(stack, THREAD_SIZE_ORDER);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200373#endif
374}
375
376int __init arch_early_irq_init(void)
377{
378 unsigned long stack;
379
Vasily Gorbik32ce55a2018-09-18 18:23:40 +0200380 stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200381 if (!stack)
382 panic("Couldn't allocate async stack");
383 S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
384 return 0;
385}
386
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200387void __init arch_call_rest_init(void)
388{
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200389 unsigned long stack;
390
391 stack = stack_alloc();
392 if (!stack)
393 panic("Couldn't allocate kernel stack");
394 current->stack = (void *) stack;
395#ifdef CONFIG_VMAP_STACK
396 current->stack_vm_area = (void *) stack;
397#endif
398 set_task_stack_end_magic(current);
399 stack += STACK_INIT_OFFSET;
400 S390_lowcore.kernel_stack = stack;
Heiko Carstensb55e6922021-07-05 17:55:32 +0200401 call_on_stack_noreturn(rest_init, stack);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200402}
403
Martin Schwidefsky87276382019-02-14 15:40:56 +0100404static void __init setup_lowcore_dat_off(void)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700405{
Vasily Gorbik21a66712020-09-24 01:01:29 +0200406 unsigned long int_psw_mask = PSW_KERNEL_BITS;
Sven Schnelleb61b1592021-02-03 09:02:51 +0100407 unsigned long mcck_stack;
Heiko Carstensc667aea2015-12-31 10:29:00 +0100408 struct lowcore *lc;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700409
Vasily Gorbik21a66712020-09-24 01:01:29 +0200410 if (IS_ENABLED(CONFIG_KASAN))
411 int_psw_mask |= PSW_MASK_DAT;
412
Heiko Carstensc9e37352005-05-01 08:58:57 -0700413 /*
414 * Setup lowcore for boot cpu
415 */
Heiko Carstensf1c11742017-07-05 07:37:27 +0200416 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
Mike Rapoporteb31d552018-10-30 15:08:04 -0700417 lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700418 if (!lc)
419 panic("%s: Failed to allocate %zu bytes align=%zx\n",
420 __func__, sizeof(*lc), sizeof(*lc));
421
Martin Schwidefskye258d712013-09-24 09:14:56 +0200422 lc->restart_psw.mask = PSW_KERNEL_BITS;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100423 lc->restart_psw.addr = (unsigned long) restart_int_handler;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200424 lc->external_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100425 lc->external_new_psw.addr = (unsigned long) ext_int_handler;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200426 lc->svc_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100427 lc->svc_new_psw.addr = (unsigned long) system_call;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200428 lc->program_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100429 lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
Martin Schwidefskye258d712013-09-24 09:14:56 +0200430 lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100431 lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200432 lc->io_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100433 lc->io_new_psw.addr = (unsigned long) io_int_handler;
Martin Schwidefsky6e2ef5e2016-10-27 12:41:39 +0200434 lc->clock_comparator = clock_comparator_max;
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200435 lc->nodat_stack = ((unsigned long) &init_thread_union)
Martin Schwidefskydc7ee002013-04-24 10:20:43 +0200436 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
Heiko Carstensd5c352c2016-11-08 11:08:26 +0100437 lc->current_task = (unsigned long)&init_task;
Heiko Carstens8f100bb2016-03-10 10:32:21 +0100438 lc->lpp = LPP_MAGIC;
Christian Ehrhardt25097bf2009-04-14 15:36:16 +0200439 lc->machine_flags = S390_lowcore.machine_flags;
Martin Schwidefskyc3601922016-10-25 12:21:44 +0200440 lc->preempt_count = S390_lowcore.preempt_count;
Martin Schwidefsky6c815112017-10-12 13:24:47 +0200441 nmi_alloc_boot_cpu(lc);
Sven Schnelle56e62a72020-11-21 11:14:56 +0100442 lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
Martin Schwidefskyab96e792009-04-14 15:36:29 +0200443 lc->exit_timer = S390_lowcore.exit_timer;
444 lc->user_timer = S390_lowcore.user_timer;
445 lc->system_timer = S390_lowcore.system_timer;
446 lc->steal_timer = S390_lowcore.steal_timer;
447 lc->last_update_timer = S390_lowcore.last_update_timer;
448 lc->last_update_clock = S390_lowcore.last_update_clock;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400449
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200450 /*
451 * Allocate the global restart stack which is the same for
452 * all CPUs in cast *one* of them does a PSW restart.
453 */
Mike Rapoporteb31d552018-10-30 15:08:04 -0700454 restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700455 if (!restart_stack)
456 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
457 __func__, THREAD_SIZE, THREAD_SIZE);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200458 restart_stack += STACK_INIT_OFFSET;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400459
460 /*
461 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
Hendrik Bruecknerb4a96012013-12-13 12:53:42 +0100462 * restart data to the absolute zero lowcore. This is necessary if
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400463 * PSW restart is done on an offline CPU that has lowcore zero.
464 */
465 lc->restart_stack = (unsigned long) restart_stack;
466 lc->restart_fn = (unsigned long) do_restart;
467 lc->restart_data = 0;
468 lc->restart_source = -1UL;
Michael Holzheu73bf4632012-05-24 14:35:16 +0200469
Sven Schnelleb61b1592021-02-03 09:02:51 +0100470 mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
471 if (!mcck_stack)
472 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
473 __func__, THREAD_SIZE, THREAD_SIZE);
474 lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
475
Michael Holzheu73bf4632012-05-24 14:35:16 +0200476 /* Setup absolute zero lowcore */
Heiko Carstensfbe76562012-06-05 09:59:52 +0200477 mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
478 mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
479 mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
480 mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
481 mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400482
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +0200483 lc->spinlock_lockval = arch_spin_lockval(0);
Martin Schwidefskyb96f7d82017-03-24 17:25:02 +0100484 lc->spinlock_index = 0;
485 arch_spin_lock_setup(0);
Martin Schwidefskyf19fbd52018-01-26 12:46:47 +0100486 lc->br_r1_trampoline = 0x07f1; /* br %r1 */
Sven Schnelle0b38b5e2020-01-22 13:38:22 +0100487 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
488 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
Valentin Schneider6a942f52021-07-07 17:33:38 +0100489 lc->preempt_count = PREEMPT_DISABLED;
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +0200490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 set_prefix((u32)(unsigned long) lc);
Frank Munzert099b7652009-03-26 15:23:43 +0100492 lowcore_ptr[0] = lc;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700493}
494
Martin Schwidefsky87276382019-02-14 15:40:56 +0100495static void __init setup_lowcore_dat_on(void)
496{
Martin Schwidefsky86a86802019-02-18 18:10:08 +0100497 __ctl_clear_bit(0, 28);
498 S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
499 S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
500 S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
501 S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
502 __ctl_set_bit(0, 28);
Martin Schwidefsky87276382019-02-14 15:40:56 +0100503}
504
Heiko Carstens71189282011-03-23 10:15:59 +0100505static struct resource code_resource = {
506 .name = "Kernel code",
Toshi Kani35d98e92016-01-26 21:57:22 +0100507 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
Heiko Carstens71189282011-03-23 10:15:59 +0100508};
509
510static struct resource data_resource = {
511 .name = "Kernel data",
Toshi Kani35d98e92016-01-26 21:57:22 +0100512 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
Heiko Carstens71189282011-03-23 10:15:59 +0100513};
514
Heiko Carstens4cc69532011-03-23 10:16:00 +0100515static struct resource bss_resource = {
516 .name = "Kernel bss",
Toshi Kani35d98e92016-01-26 21:57:22 +0100517 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
Heiko Carstens4cc69532011-03-23 10:16:00 +0100518};
519
Heiko Carstens71189282011-03-23 10:15:59 +0100520static struct resource __initdata *standard_resources[] = {
521 &code_resource,
522 &data_resource,
Heiko Carstens4cc69532011-03-23 10:16:00 +0100523 &bss_resource,
Heiko Carstens71189282011-03-23 10:15:59 +0100524};
525
526static void __init setup_resources(void)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700527{
Heiko Carstens71189282011-03-23 10:15:59 +0100528 struct resource *res, *std_res, *sub_res;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700529 phys_addr_t start, end;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100530 int j;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700531 u64 i;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700532
Vasily Gorbik320d9552018-02-20 13:28:33 +0100533 code_resource.start = (unsigned long) _text;
534 code_resource.end = (unsigned long) _etext - 1;
535 data_resource.start = (unsigned long) _etext;
536 data_resource.end = (unsigned long) _edata - 1;
537 bss_resource.start = (unsigned long) __bss_start;
538 bss_resource.end = (unsigned long) __bss_stop - 1;
Heiko Carstenscc13ad62006-06-25 05:49:30 -0700539
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700540 for_each_mem_range(i, &start, &end) {
Mike Rapoporteb31d552018-10-30 15:08:04 -0700541 res = memblock_alloc(sizeof(*res), 8);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700542 if (!res)
543 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
544 __func__, sizeof(*res), 8);
Toshi Kani35d98e92016-01-26 21:57:22 +0100545 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100546
547 res->name = "System RAM";
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700548 res->start = start;
549 /*
550 * In memblock, end points to the first byte after the
551 * range while in resourses, end points to the last byte in
552 * the range.
553 */
554 res->end = end - 1;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700555 request_resource(&iomem_resource, res);
Hongjie Yangfe355b72007-02-05 21:18:24 +0100556
Heiko Carstens71189282011-03-23 10:15:59 +0100557 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
558 std_res = standard_resources[j];
559 if (std_res->start < res->start ||
560 std_res->start > res->end)
561 continue;
562 if (std_res->end > res->end) {
Mike Rapoporteb31d552018-10-30 15:08:04 -0700563 sub_res = memblock_alloc(sizeof(*sub_res), 8);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700564 if (!sub_res)
565 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
566 __func__, sizeof(*sub_res), 8);
Heiko Carstens71189282011-03-23 10:15:59 +0100567 *sub_res = *std_res;
568 sub_res->end = res->end;
569 std_res->start = res->end + 1;
570 request_resource(res, sub_res);
571 } else {
572 request_resource(res, std_res);
573 }
Hongjie Yangfe355b72007-02-05 21:18:24 +0100574 }
Heiko Carstensc9e37352005-05-01 08:58:57 -0700575 }
Heiko Carstens4e042af2016-05-31 09:14:00 +0200576#ifdef CONFIG_CRASH_DUMP
577 /*
578 * Re-add removed crash kernel memory as reserved memory. This makes
579 * sure it will be mapped with the identity mapping and struct pages
580 * will be created, so it can be resized later on.
581 * However add it later since the crash kernel resource should not be
582 * part of the System RAM resource.
583 */
584 if (crashk_res.end) {
Heiko Carstens9f88eb4d2016-11-28 11:40:27 +0100585 memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
Heiko Carstens4e042af2016-05-31 09:14:00 +0200586 memblock_reserve(crashk_res.start, resource_size(&crashk_res));
587 insert_resource(&iomem_resource, &crashk_res);
588 }
589#endif
Heiko Carstensc9e37352005-05-01 08:58:57 -0700590}
591
Vasily Gorbik0c4f2622020-10-06 22:12:39 +0200592static void __init setup_memory_end(void)
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100593{
Vasily Gorbik73045a02020-10-19 11:01:33 +0200594 memblock_remove(ident_map_size, ULONG_MAX);
Vasily Gorbik0c4f2622020-10-06 22:12:39 +0200595 max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
Vasily Gorbik73045a02020-10-19 11:01:33 +0200596 pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100597}
598
Michael Holzheu60a0c682011-10-30 15:16:40 +0100599#ifdef CONFIG_CRASH_DUMP
600
601/*
David Hildenbrand47656002020-04-24 10:39:04 +0200602 * When kdump is enabled, we have to ensure that no memory from the area
603 * [0 - crashkernel memory size] is set offline - it will be exchanged with
604 * the crashkernel memory region when kdump is triggered. The crashkernel
605 * memory region can never get offlined (pages are unmovable).
Michael Holzheu60a0c682011-10-30 15:16:40 +0100606 */
607static int kdump_mem_notifier(struct notifier_block *nb,
608 unsigned long action, void *data)
609{
610 struct memory_notify *arg = data;
611
Michael Holzheubd858e82014-07-10 18:14:20 +0200612 if (action != MEM_GOING_OFFLINE)
613 return NOTIFY_OK;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100614 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
615 return NOTIFY_BAD;
David Hildenbrand47656002020-04-24 10:39:04 +0200616 return NOTIFY_OK;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100617}
618
619static struct notifier_block kdump_mem_nb = {
620 .notifier_call = kdump_mem_notifier,
621};
622
623#endif
624
625/*
Vasily Gorbik73045a02020-10-19 11:01:33 +0200626 * Make sure that the area above identity mapping is protected
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100627 */
Vasily Gorbik73045a02020-10-19 11:01:33 +0200628static void __init reserve_above_ident_map(void)
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100629{
Vasily Gorbik73045a02020-10-19 11:01:33 +0200630 memblock_reserve(ident_map_size, ULONG_MAX);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100631}
632
633/*
Michael Holzheu60a0c682011-10-30 15:16:40 +0100634 * Reserve memory for kdump kernel to be loaded with kexec
635 */
636static void __init reserve_crashkernel(void)
637{
638#ifdef CONFIG_CRASH_DUMP
639 unsigned long long crash_base, crash_size;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100640 phys_addr_t low, high;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100641 int rc;
642
Vasily Gorbik73045a02020-10-19 11:01:33 +0200643 rc = parse_crashkernel(boot_command_line, ident_map_size, &crash_size,
Michael Holzheu60a0c682011-10-30 15:16:40 +0100644 &crash_base);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100645
Michael Holzheudab7a7b2011-10-30 15:16:44 +0100646 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
647 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100648 if (rc || crash_size == 0)
649 return;
650
651 if (memblock.memory.regions[0].size < crash_size) {
652 pr_info("crashkernel reservation failed: %s\n",
653 "first memory chunk must be at least crashkernel size");
654 return;
655 }
656
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200657 low = crash_base ?: oldmem_data.start;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100658 high = low + crash_size;
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200659 if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) {
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100660 /* The crashkernel fits into OLDMEM, reuse OLDMEM */
661 crash_base = low;
662 } else {
663 /* Find suitable area in free memory */
David Hildenbrand37c5f6c2015-05-06 13:18:59 +0200664 low = max_t(unsigned long, crash_size, sclp.hsa_size);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100665 high = crash_base ? crash_base + crash_size : ULONG_MAX;
666
667 if (crash_base && crash_base < low) {
668 pr_info("crashkernel reservation failed: %s\n",
669 "crash_base too low");
670 return;
671 }
672 low = crash_base ?: low;
673 crash_base = memblock_find_in_range(low, high, crash_size,
674 KEXEC_CRASH_MEM_ALIGN);
675 }
676
677 if (!crash_base) {
678 pr_info("crashkernel reservation failed: %s\n",
679 "no suitable area found");
680 return;
681 }
682
Michael Holzheu60a0c682011-10-30 15:16:40 +0100683 if (register_memory_notifier(&kdump_mem_nb))
684 return;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100685
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200686 if (!oldmem_data.start && MACHINE_IS_VM)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100687 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
688 crashk_res.start = crash_base;
689 crashk_res.end = crash_base + crash_size - 1;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100690 memblock_remove(crash_base, crash_size);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100691 pr_info("Reserving %lluMB of memory at %lluMB "
692 "for crashkernel (System RAM: %luMB)\n",
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100693 crash_size >> 20, crash_base >> 20,
694 (unsigned long)memblock.memory.total_size >> 20);
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400695 os_info_crashkernel_add(crash_base, crash_size);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100696#endif
697}
698
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100699/*
700 * Reserve the initrd from being used by memblock
701 */
702static void __init reserve_initrd(void)
703{
704#ifdef CONFIG_BLK_DEV_INITRD
Alexander Egorenkov84733282021-06-15 14:15:07 +0200705 if (!initrd_data.start || !initrd_data.size)
Heiko Carstens7be5e352016-12-27 14:47:42 +0100706 return;
Alexander Egorenkov84733282021-06-15 14:15:07 +0200707 initrd_start = initrd_data.start;
708 initrd_end = initrd_start + initrd_data.size;
709 memblock_reserve(initrd_data.start, initrd_data.size);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100710#endif
711}
712
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100713/*
714 * Reserve the memory area used to pass the certificate lists
715 */
716static void __init reserve_certificate_list(void)
717{
718 if (ipl_cert_list_addr)
719 memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
720}
721
Vasily Gorbik6966d602018-04-11 11:56:55 +0200722static void __init reserve_mem_detect_info(void)
723{
724 unsigned long start, size;
725
726 get_mem_detect_reserved(&start, &size);
727 if (size)
728 memblock_reserve(start, size);
729}
730
731static void __init free_mem_detect_info(void)
732{
733 unsigned long start, size;
734
735 get_mem_detect_reserved(&start, &size);
736 if (size)
737 memblock_free(start, size);
738}
739
Vasily Gorbikf01b8bc2018-09-24 15:27:30 +0200740static const char * __init get_mem_info_source(void)
741{
742 switch (mem_detect.info_source) {
743 case MEM_DETECT_SCLP_STOR_INFO:
744 return "sclp storage info";
745 case MEM_DETECT_DIAG260:
746 return "diag260";
747 case MEM_DETECT_SCLP_READ_INFO:
748 return "sclp read info";
749 case MEM_DETECT_BIN_SEARCH:
750 return "binary search";
751 }
752 return "none";
753}
754
Vasily Gorbik6966d602018-04-11 11:56:55 +0200755static void __init memblock_add_mem_detect_info(void)
756{
757 unsigned long start, end;
758 int i;
759
Mike Rapoport87c55872020-10-13 16:57:54 -0700760 pr_debug("physmem info source: %s (%hhd)\n",
761 get_mem_info_source(), mem_detect.info_source);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200762 /* keep memblock lists close to the kernel */
763 memblock_set_bottom_up(true);
Anshuman Khandual02634a42020-01-30 22:14:20 -0800764 for_each_mem_detect_block(i, &start, &end) {
765 memblock_add(start, end - start);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200766 memblock_physmem_add(start, end - start);
Anshuman Khandual02634a42020-01-30 22:14:20 -0800767 }
Vasily Gorbik6966d602018-04-11 11:56:55 +0200768 memblock_set_bottom_up(false);
Heiko Carstens701dc812020-02-19 13:29:15 +0100769 memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200770 memblock_dump_all();
771}
772
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100773/*
774 * Check for initrd being in usable memory
775 */
776static void __init check_initrd(void)
777{
778#ifdef CONFIG_BLK_DEV_INITRD
Alexander Egorenkov84733282021-06-15 14:15:07 +0200779 if (initrd_data.start && initrd_data.size &&
780 !memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
Martin Schwidefsky6d7b2ee2016-12-13 16:19:11 +0100781 pr_err("The initial RAM disk does not fit into the memory\n");
Alexander Egorenkov84733282021-06-15 14:15:07 +0200782 memblock_free(initrd_data.start, initrd_data.size);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100783 initrd_start = initrd_end = 0;
784 }
785#endif
786}
787
788/*
Sebastian Ott0c36b8a2015-06-16 14:03:37 +0200789 * Reserve memory used for lowcore/command line/kernel image.
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100790 */
791static void __init reserve_kernel(void)
792{
Vasily Gorbik320d9552018-02-20 13:28:33 +0100793 unsigned long start_pfn = PFN_UP(__pa(_end));
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100794
Alexander Egorenkovf1a54692021-06-16 14:10:03 +0200795 memblock_reserve(0, STARTUP_NORMAL_OFFSET);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100796 memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
797 - (unsigned long)_stext);
798}
799
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400800static void __init setup_memory(void)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700801{
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700802 phys_addr_t start, end;
803 u64 i;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700804
805 /*
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100806 * Init storage key for present memory
Heiko Carstensc9e37352005-05-01 08:58:57 -0700807 */
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700808 for_each_mem_range(i, &start, &end)
809 storage_key_init_range(start, end);
810
Peter Oberparleiter0b642ed2005-05-01 08:58:58 -0700811 psw_set_key(PAGE_DEFAULT_KEY);
812
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100813 /* Only cosmetics */
814 memblock_enforce_memory_limit(memblock_end_of_DRAM());
Heiko Carstensc9e37352005-05-01 08:58:57 -0700815}
816
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200817static void __init relocate_amode31_section(void)
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200818{
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200819 unsigned long amode31_addr, amode31_size;
820 long amode31_offset;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200821 long *ptr;
822
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200823 /* Allocate a new AMODE31 capable memory region */
824 amode31_size = __eamode31 - __samode31;
825 pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
826 amode31_addr = (unsigned long)memblock_alloc_low(amode31_size, PAGE_SIZE);
827 if (!amode31_addr)
828 panic("Failed to allocate memory for AMODE31 section\n");
829 amode31_offset = amode31_addr - __samode31;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200830
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200831 /* Move original AMODE31 section to the new one */
832 memmove((void *)amode31_addr, (void *)__samode31, amode31_size);
833 /* Zero out the old AMODE31 section to catch invalid accesses within it */
834 memset((void *)__samode31, 0, amode31_size);
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200835
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200836 /* Update all AMODE31 region references */
837 for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
838 *ptr += amode31_offset;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200839}
840
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200841/* This must be called after AMODE31 relocation */
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200842static void __init setup_cr(void)
843{
844 union ctlreg2 cr2;
845 union ctlreg5 cr5;
846 union ctlreg15 cr15;
847
848 __ctl_duct[1] = (unsigned long)__ctl_aste;
849 __ctl_duct[2] = (unsigned long)__ctl_aste;
850 __ctl_duct[4] = (unsigned long)__ctl_duald;
851
852 /* Update control registers CR2, CR5 and CR15 */
853 __ctl_store(cr2.val, 2, 2);
854 __ctl_store(cr5.val, 5, 5);
855 __ctl_store(cr15.val, 15, 15);
856 cr2.ducto = (unsigned long)__ctl_duct >> 6;
857 cr5.pasteo = (unsigned long)__ctl_duct >> 6;
858 cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
859 __ctl_load(cr2.val, 2, 2);
860 __ctl_load(cr5.val, 5, 5);
861 __ctl_load(cr15.val, 15, 15);
862}
863
Martin Schwidefskycf8ba7a2007-05-04 18:48:28 +0200864/*
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +0200865 * Add system information as device randomness
866 */
867static void __init setup_randomness(void)
868{
869 struct sysinfo_3_2_2 *vmms;
870
Mike Rapoport9a8dd702018-10-30 15:07:59 -0700871 vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
872 PAGE_SIZE);
Mike Rapoportecc3e772019-03-11 23:29:26 -0700873 if (!vmms)
874 panic("Failed to allocate memory for sysinfo structure\n");
875
Heiko Carstensda8fd822017-02-04 11:40:36 +0100876 if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
Heiko Carstens4920e3c2017-02-05 23:03:18 +0100877 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
Heiko Carstensda8fd822017-02-04 11:40:36 +0100878 memblock_free((unsigned long) vmms, PAGE_SIZE);
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +0200879}
880
881/*
Martin Schwidefsky3f6813b2016-04-01 15:42:15 +0200882 * Find the correct size for the task_struct. This depends on
883 * the size of the struct fpu at the end of the thread_struct
884 * which is embedded in the task_struct.
885 */
886static void __init setup_task_size(void)
887{
888 int task_size = sizeof(struct task_struct);
889
890 if (!MACHINE_HAS_VX) {
891 task_size -= sizeof(__vector128) * __NUM_VXRS;
892 task_size += sizeof(freg_t) * __NUM_FPRS;
893 }
894 arch_task_struct_size = task_size;
895}
896
897/*
Collin Walling4ad78b82018-12-06 17:30:04 -0500898 * Issue diagnose 318 to set the control program name and
899 * version codes.
900 */
901static void __init setup_control_program_code(void)
902{
903 union diag318_info diag318_info = {
904 .cpnc = CPNC_LINUX,
Collin Wallinga23816f2020-06-22 11:46:35 -0400905 .cpvc = 0,
Collin Walling4ad78b82018-12-06 17:30:04 -0500906 };
907
908 if (!sclp.has_diag318)
909 return;
910
911 diag_stat_inc(DIAG_STAT_X318);
912 asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
913}
914
915/*
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100916 * Print the component list from the IPL report
917 */
918static void __init log_component_list(void)
919{
920 struct ipl_rb_component_entry *ptr, *end;
921 char *str;
922
923 if (!early_ipl_comp_list_addr)
924 return;
Philipp Rudo40260b02019-12-18 11:24:43 +0100925 if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100926 pr_info("Linux is running with Secure-IPL enabled\n");
927 else
928 pr_info("Linux is running with Secure-IPL disabled\n");
929 ptr = (void *) early_ipl_comp_list_addr;
930 end = (void *) ptr + early_ipl_comp_list_size;
931 pr_info("The IPL report contains the following components:\n");
932 while (ptr < end) {
933 if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
934 if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
935 str = "signed, verified";
936 else
937 str = "signed, verification failed";
938 } else {
939 str = "not signed";
940 }
941 pr_info("%016llx - %016llx (%s)\n",
942 ptr->addr, ptr->addr + ptr->len, str);
943 ptr++;
944 }
945}
946
947/*
Heiko Carstensc9e37352005-05-01 08:58:57 -0700948 * Setup function called from init/main.c just after the banner
949 * was printed.
950 */
951
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400952void __init setup_arch(char **cmdline_p)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700953{
954 /*
955 * print what head.S has found out about the machine
956 */
Carsten Ottefa587742008-03-25 18:47:44 +0100957 if (MACHINE_IS_VM)
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +0100958 pr_info("Linux is running as a z/VM "
959 "guest operating system in 64-bit mode\n");
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200960 else if (MACHINE_IS_KVM)
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +0100961 pr_info("Linux is running under KVM in 64-bit mode\n");
Martin Schwidefsky27d71602010-02-26 22:37:38 +0100962 else if (MACHINE_IS_LPAR)
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +0100963 pr_info("Linux is running natively in 64-bit mode\n");
Christian Borntraeger03aa0472018-11-09 09:21:47 +0100964 else
965 pr_info("Linux is running as a guest in 64-bit mode\n");
Heiko Carstensc9e37352005-05-01 08:58:57 -0700966
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100967 log_component_list();
968
Hendrik Bruecknera0443fb2008-07-14 09:59:09 +0200969 /* Have one command line that is parsed and saved in /proc/cmdline */
970 /* boot_command_line has been already set up in early.c */
971 *cmdline_p = boot_command_line;
Heiko Carstens59685292006-03-24 03:15:15 -0800972
Heiko Carstensc9e37352005-05-01 08:58:57 -0700973 ROOT_DEV = Root_RAM0;
Heiko Carstens59685292006-03-24 03:15:15 -0800974
Kefeng Wang638cd5a32021-07-07 18:08:57 -0700975 setup_initial_init_mm(_text, _etext, _edata, _end);
Heiko Carstens59685292006-03-24 03:15:15 -0800976
Martin Schwidefsky6a3d1e82018-04-11 08:35:23 +0200977 if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
978 nospec_auto_detect();
979
Vasily Gorbik95e61b12020-06-18 17:17:19 +0200980 jump_label_init();
Heiko Carstens59685292006-03-24 03:15:15 -0800981 parse_early_param();
Martin Schwidefsky8a07dd02015-10-14 15:53:06 +0200982#ifdef CONFIG_CRASH_DUMP
983 /* Deactivate elfcorehdr= kernel parameter */
984 elfcorehdr_addr = ELFCORE_ADDR_MAX;
985#endif
986
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400987 os_info_init();
Michael Holzheu99ca4e52008-01-26 14:11:11 +0100988 setup_ipl();
Martin Schwidefsky3f6813b2016-04-01 15:42:15 +0200989 setup_task_size();
Collin Walling4ad78b82018-12-06 17:30:04 -0500990 setup_control_program_code();
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100991
992 /* Do some memory reservations *before* memory is added to memblock */
Vasily Gorbik73045a02020-10-19 11:01:33 +0200993 reserve_above_ident_map();
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100994 reserve_kernel();
995 reserve_initrd();
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100996 reserve_certificate_list();
Vasily Gorbik6966d602018-04-11 11:56:55 +0200997 reserve_mem_detect_info();
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100998 memblock_allow_resize();
999
1000 /* Get information about *all* installed memory */
Vasily Gorbik6966d602018-04-11 11:56:55 +02001001 memblock_add_mem_detect_info();
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001002
Vasily Gorbik6966d602018-04-11 11:56:55 +02001003 free_mem_detect_info();
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001004
Heiko Carstensc78d0c72021-08-04 13:40:31 +02001005 relocate_amode31_section();
Alexander Egorenkov6bda6672021-06-15 19:17:36 +02001006 setup_cr();
1007
Vasily Gorbik1d6671a2020-09-11 11:38:21 +02001008 setup_uv();
Vasily Gorbik0c4f2622020-10-06 22:12:39 +02001009 setup_memory_end();
Heiko Carstensc9e37352005-05-01 08:58:57 -07001010 setup_memory();
Vasily Gorbik73045a02020-10-19 11:01:33 +02001011 dma_contiguous_reserve(ident_map_size);
Heiko Carstens3f429842017-08-07 15:16:15 +02001012 vmcp_cma_reserve();
Gerald Schaefer343dbdb2020-12-08 19:47:15 +01001013 if (MACHINE_HAS_EDAT2)
1014 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001015
1016 check_initrd();
1017 reserve_crashkernel();
Martin Schwidefsky1a36a392015-10-29 10:28:26 +01001018#ifdef CONFIG_CRASH_DUMP
Michael Holzheu1592a8e2015-05-26 19:05:23 +02001019 /*
1020 * Be aware that smp_save_dump_cpus() triggers a system reset.
1021 * Therefore CPU and device initialization should be done afterwards.
1022 */
1023 smp_save_dump_cpus();
Martin Schwidefsky1a36a392015-10-29 10:28:26 +01001024#endif
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001025
Heiko Carstensc9e37352005-05-01 08:58:57 -07001026 setup_resources();
Martin Schwidefsky87276382019-02-14 15:40:56 +01001027 setup_lowcore_dat_off();
Heiko Carstensd80512f2013-12-16 14:31:26 +01001028 smp_fill_possible_mask();
Heiko Carstens097a1162016-04-14 12:35:22 +02001029 cpu_detect_mhz_feature();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 cpu_init();
Philipp Hachtmann3a368f72014-03-06 18:25:13 +01001031 numa_setup();
Heiko Carstensaf51160e2016-12-03 09:48:01 +01001032 smp_detect_cpus();
Heiko Carstens8c9105802016-12-03 09:50:21 +01001033 topology_init_early();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
1035 /*
1036 * Create kernel page tables and switch to virtual addressing.
1037 */
1038 paging_init();
1039
Martin Schwidefsky87276382019-02-14 15:40:56 +01001040 /*
1041 * After paging_init created the kernel page table, the new PSWs
1042 * in lowcore can now run with DAT enabled.
1043 */
1044 setup_lowcore_dat_on();
1045
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 /* Setup default console */
1047 conmode_default();
Hendrik Brueckner637952c2009-08-23 18:09:06 +02001048 set_preferred_console();
Michael Holzheu411ed322007-04-27 16:01:49 +02001049
Vasily Gorbik686140a2017-10-12 13:01:47 +02001050 apply_alternative_instructions();
Martin Schwidefskyf19fbd52018-01-26 12:46:47 +01001051 if (IS_ENABLED(CONFIG_EXPOLINE))
1052 nospec_init_branches();
Vasily Gorbik686140a2017-10-12 13:01:47 +02001053
Alexander Egorenkovbd37b362020-09-29 20:24:55 +02001054 /* Setup zfcp/nvme dump support */
Sebastian Ottfe72ffb2013-04-30 17:18:46 +02001055 setup_zfcpdump();
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +02001056
1057 /* Add system specific data to the random pool */
1058 setup_randomness();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059}