blob: 225ab2d0a4c60baab67bc7c8f834f157954eed1a [file] [log] [blame]
Greg Kroah-Hartmana17ae4c2017-11-24 15:00:32 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 1999, 2012
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Author(s): Hartmut Penner (hp@de.ibm.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "arch/i386/kernel/setup.c"
9 * Copyright (C) 1995, Linus Torvalds
10 */
11
12/*
13 * This file handles the architecture-dependent parts of initialization
14 */
15
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +010016#define KMSG_COMPONENT "setup"
17#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/errno.h>
Heiko Carstens08729222013-01-07 13:56:17 +010020#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/sched.h>
Ingo Molnar29930022017-02-08 18:51:36 +010022#include <linux/sched/task.h>
Ingo Molnar1777e462017-02-05 14:47:12 +010023#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
Tejun Heoff38df32011-12-08 10:22:09 -080025#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/mm.h>
27#include <linux/stddef.h>
28#include <linux/unistd.h>
29#include <linux/ptrace.h>
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +020030#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/user.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/tty.h>
33#include <linux/ioport.h>
34#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/init.h>
36#include <linux/initrd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/root_dev.h>
38#include <linux/console.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/kernel_stat.h>
Christoph Hellwig0b1abd12020-09-11 10:56:52 +020040#include <linux/dma-map-ops.h>
Heiko Carstens1e8e3382005-10-30 15:00:11 -080041#include <linux/device.h>
Peter Oberparleiter585c3042006-06-29 15:08:25 +020042#include <linux/notifier.h>
Heiko Carstens65912a82006-09-20 15:58:41 +020043#include <linux/pfn.h>
Hongjie Yangfe355b72007-02-05 21:18:24 +010044#include <linux/ctype.h>
Heiko Carstens2b67fc42007-02-05 21:16:47 +010045#include <linux/reboot.h>
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020046#include <linux/topology.h>
Michael Holzheu60a0c682011-10-30 15:16:40 +010047#include <linux/kexec.h>
48#include <linux/crash_dump.h>
49#include <linux/memory.h>
Heiko Carstens048cd4e2012-02-27 10:01:52 +010050#include <linux/compat.h>
Martin Schwidefskyce3dc442017-09-12 16:37:33 +020051#include <linux/start_kernel.h>
Gerald Schaefer343dbdb2020-12-08 19:47:15 +010052#include <linux/hugetlb.h>
Sven Schnelle436fc4f2021-08-27 08:36:06 +020053#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +010055#include <asm/boot_data.h>
Michael Holzheu46b05d22007-02-21 10:55:21 +010056#include <asm/ipl.h>
Heiko Carstens1e3cab22012-03-30 09:40:55 +020057#include <asm/facility.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <asm/smp.h>
59#include <asm/mmu_context.h>
60#include <asm/cpcmd.h>
61#include <asm/lowcore.h>
Martin Schwidefsky6c815112017-10-12 13:24:47 +020062#include <asm/nmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#include <asm/irq.h>
Peter Oberparleiter0b642ed2005-05-01 08:58:58 -070064#include <asm/page.h>
65#include <asm/ptrace.h>
Heiko Carstenscc13ad62006-06-25 05:49:30 -070066#include <asm/sections.h>
Hongjie Yangfe355b72007-02-05 21:18:24 +010067#include <asm/ebcdic.h>
Michael Holzheu60a0c682011-10-30 15:16:40 +010068#include <asm/diag.h>
Michael Holzheu4857d4b2012-03-11 11:59:34 -040069#include <asm/os_info.h>
Heinz Graalfscd183452012-06-11 16:06:59 +020070#include <asm/sclp.h>
Martin Schwidefsky78c98f92019-01-28 08:33:08 +010071#include <asm/stacktrace.h>
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +020072#include <asm/sysinfo.h>
Philipp Hachtmann3a368f72014-03-06 18:25:13 +010073#include <asm/numa.h>
Vasily Gorbik686140a2017-10-12 13:01:47 +020074#include <asm/alternative.h>
Martin Schwidefskyf19fbd52018-01-26 12:46:47 +010075#include <asm/nospec-branch.h>
Vasily Gorbik6966d602018-04-11 11:56:55 +020076#include <asm/mem_detect.h>
Vasily Gorbik5abb9352019-04-01 19:11:03 +020077#include <asm/uv.h>
Sven Schnelle0b38b5e2020-01-22 13:38:22 +010078#include <asm/asm-offsets.h>
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040079#include "entry.h"
Gerald Schaeferc1821c22007-02-05 21:18:17 +010080
Linus Torvalds1da177e2005-04-16 15:20:36 -070081/*
82 * Machine setup..
83 */
84unsigned int console_mode = 0;
Heiko Carstens1485c5c2009-03-26 15:24:04 +010085EXPORT_SYMBOL(console_mode);
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087unsigned int console_devno = -1;
Heiko Carstens1485c5c2009-03-26 15:24:04 +010088EXPORT_SYMBOL(console_devno);
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090unsigned int console_irq = -1;
Heiko Carstens1485c5c2009-03-26 15:24:04 +010091EXPORT_SYMBOL(console_irq);
92
Alexander Egorenkov6bda6672021-06-15 19:17:36 +020093/*
94 * Some code and data needs to stay below 2 GB, even when the kernel would be
95 * relocated above 2 GB, because it has to use 31 bit addresses.
Heiko Carstensc78d0c72021-08-04 13:40:31 +020096 * Such code and data is part of the .amode31 section.
Alexander Egorenkov6bda6672021-06-15 19:17:36 +020097 */
Alexander Gordeeve3ec8e02021-09-27 14:18:26 +020098unsigned long __amode31_ref __samode31 = (unsigned long)&_samode31;
99unsigned long __amode31_ref __eamode31 = (unsigned long)&_eamode31;
100unsigned long __amode31_ref __stext_amode31 = (unsigned long)&_stext_amode31;
101unsigned long __amode31_ref __etext_amode31 = (unsigned long)&_etext_amode31;
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200102struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
103struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200104
105/*
106 * Control registers CR2, CR5 and CR15 are initialized with addresses
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200107 * of tables that must be placed below 2G which is handled by the AMODE31
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200108 * sections.
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200109 * Because the AMODE31 sections are relocated below 2G at startup,
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200110 * the content of control registers CR2, CR5 and CR15 must be updated
111 * with new addresses after the relocation. The initial initialization of
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200112 * control registers occurs in head64.S and then gets updated again after AMODE31
113 * relocation. We must access the relevant AMODE31 tables indirectly via
114 * pointers placed in the .amode31.refs linker section. Those pointers get
115 * updated automatically during AMODE31 relocation and always contain a valid
116 * address within AMODE31 sections.
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200117 */
118
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200119static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64);
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200120
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200121static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = {
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200122 [1] = 0xffffffffffffffff
123};
124
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200125static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = {
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200126 0x80000000, 0, 0, 0,
127 0x80000000, 0, 0, 0,
128 0x80000000, 0, 0, 0,
129 0x80000000, 0, 0, 0,
130 0x80000000, 0, 0, 0,
131 0x80000000, 0, 0, 0,
132 0x80000000, 0, 0, 0,
133 0x80000000, 0, 0, 0
134};
135
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200136static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = {
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200137 0, 0, 0x89000000, 0,
138 0, 0, 0x8a000000, 0
139};
140
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200141static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31;
142static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
143static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
144static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200145
Vasily Gorbikd58106c2017-11-17 18:44:28 +0100146int __bootdata(noexec_disabled);
Vasily Gorbik73045a02020-10-19 11:01:33 +0200147unsigned long __bootdata(ident_map_size);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200148struct mem_detect_info __bootdata(mem_detect);
Alexander Egorenkov84733282021-06-15 14:15:07 +0200149struct initrd_data __bootdata(initrd_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
Gerald Schaeferb2d24b92019-02-03 21:37:20 +0100151unsigned long __bootdata_preserved(__kaslr_offset);
Alexander Gordeeve3ec8e02021-09-27 14:18:26 +0200152unsigned long __bootdata(__amode31_base);
Mikhail Zaslonkoc65e6812020-01-30 22:16:27 -0800153unsigned int __bootdata_preserved(zlib_dfltcc_support);
154EXPORT_SYMBOL(zlib_dfltcc_support);
Sven Schnelle17e89e12021-05-05 22:01:10 +0200155u64 __bootdata_preserved(stfle_fac_list[16]);
156EXPORT_SYMBOL(stfle_fac_list);
157u64 __bootdata_preserved(alt_stfle_fac_list[16]);
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200158struct oldmem_data __bootdata_preserved(oldmem_data);
Gerald Schaefera80313f2019-02-03 21:37:20 +0100159
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100160unsigned long VMALLOC_START;
161EXPORT_SYMBOL(VMALLOC_START);
162
163unsigned long VMALLOC_END;
164EXPORT_SYMBOL(VMALLOC_END);
165
166struct page *vmemmap;
167EXPORT_SYMBOL(vmemmap);
Vasily Gorbike670e642020-09-11 12:51:59 +0200168unsigned long vmemmap_size;
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100169
Heiko Carstensc972cc62012-10-05 16:52:18 +0200170unsigned long MODULES_VADDR;
171unsigned long MODULES_END;
Heiko Carstensc972cc62012-10-05 16:52:18 +0200172
Frank Munzert099b7652009-03-26 15:23:43 +0100173/* An array with a pointer to the lowcore of every CPU. */
Heiko Carstensc667aea2015-12-31 10:29:00 +0100174struct lowcore *lowcore_ptr[NR_CPUS];
Frank Munzert099b7652009-03-26 15:23:43 +0100175EXPORT_SYMBOL(lowcore_ptr);
176
Sven Schnelle3b051e82021-04-07 09:20:17 +0200177DEFINE_STATIC_KEY_FALSE(cpu_has_bear);
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179/*
Niklas Schnelleb02002c2020-07-13 14:12:49 +0200180 * The Write Back bit position in the physaddr is given by the SLPC PCI.
181 * Leaving the mask zero always uses write through which is safe
182 */
183unsigned long mio_wb_bit_mask __ro_after_init;
184
185/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 * This is set up by the setup-routine at boot-time
187 * for S390 need to find out, what we have to setup
188 * using address 0x10400 ...
189 */
190
191#include <asm/setup.h>
192
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 * condev= and conmode= setup parameter.
195 */
196
197static int __init condev_setup(char *str)
198{
199 int vdev;
200
201 vdev = simple_strtoul(str, &str, 0);
202 if (vdev >= 0 && vdev < 65536) {
203 console_devno = vdev;
204 console_irq = -1;
205 }
206 return 1;
207}
208
209__setup("condev=", condev_setup);
210
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200211static void __init set_preferred_console(void)
212{
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200213 if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200214 add_preferred_console("ttyS", 0, NULL);
Hendrik Bruecknerc4de0c12009-09-11 10:28:56 +0200215 else if (CONSOLE_IS_3270)
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200216 add_preferred_console("tty3270", 0, NULL);
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200217 else if (CONSOLE_IS_VT220)
Valentin Vidicb7d91d22021-04-27 21:40:10 +0200218 add_preferred_console("ttysclp", 0, NULL);
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200219 else if (CONSOLE_IS_HVC)
220 add_preferred_console("hvc", 0, NULL);
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200221}
222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223static int __init conmode_setup(char *str)
224{
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100225#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200226 if (!strcmp(str, "hwc") || !strcmp(str, "sclp"))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 SET_CONSOLE_SCLP;
228#endif
229#if defined(CONFIG_TN3215_CONSOLE)
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200230 if (!strcmp(str, "3215"))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 SET_CONSOLE_3215;
232#endif
233#if defined(CONFIG_TN3270_CONSOLE)
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200234 if (!strcmp(str, "3270"))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 SET_CONSOLE_3270;
236#endif
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200237 set_preferred_console();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 return 1;
239}
240
241__setup("conmode=", conmode_setup);
242
243static void __init conmode_default(void)
244{
245 char query_buffer[1024];
246 char *ptr;
247
248 if (MACHINE_IS_VM) {
Heiko Carstens740b5702006-12-04 15:40:30 +0100249 cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
251 ptr = strstr(query_buffer, "SUBCHANNEL =");
252 console_irq = simple_strtoul(ptr + 13, NULL, 16);
Heiko Carstens740b5702006-12-04 15:40:30 +0100253 cpcmd("QUERY TERM", query_buffer, 1024, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 ptr = strstr(query_buffer, "CONMODE");
255 /*
256 * Set the conmode to 3215 so that the device recognition
257 * will set the cu_type of the console to 3215. If the
258 * conmode is 3270 and we don't set it back then both
259 * 3215 and the 3270 driver will try to access the console
260 * device (3215 as console and 3270 as normal tty).
261 */
Heiko Carstens740b5702006-12-04 15:40:30 +0100262 cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 if (ptr == NULL) {
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100264#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 SET_CONSOLE_SCLP;
266#endif
267 return;
268 }
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200269 if (str_has_prefix(ptr + 8, "3270")) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270#if defined(CONFIG_TN3270_CONSOLE)
271 SET_CONSOLE_3270;
272#elif defined(CONFIG_TN3215_CONSOLE)
273 SET_CONSOLE_3215;
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100274#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 SET_CONSOLE_SCLP;
276#endif
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200277 } else if (str_has_prefix(ptr + 8, "3215")) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278#if defined(CONFIG_TN3215_CONSOLE)
279 SET_CONSOLE_3215;
280#elif defined(CONFIG_TN3270_CONSOLE)
281 SET_CONSOLE_3270;
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100282#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 SET_CONSOLE_SCLP;
284#endif
285 }
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200286 } else if (MACHINE_IS_KVM) {
Masahiro Yamadaa5ff1b32016-08-25 15:17:02 -0700287 if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200288 SET_CONSOLE_VT220;
Masahiro Yamadaa5ff1b32016-08-25 15:17:02 -0700289 else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200290 SET_CONSOLE_SCLP;
291 else
292 SET_CONSOLE_HVC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 } else {
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100294#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 SET_CONSOLE_SCLP;
296#endif
297 }
298}
299
Michael Holzheubf28a592014-04-14 10:38:05 +0200300#ifdef CONFIG_CRASH_DUMP
Sebastian Ottfe72ffb2013-04-30 17:18:46 +0200301static void __init setup_zfcpdump(void)
Michael Holzheu411ed322007-04-27 16:01:49 +0200302{
Alexander Egorenkovbd37b362020-09-29 20:24:55 +0200303 if (!is_ipl_type_dump())
Michael Holzheu411ed322007-04-27 16:01:49 +0200304 return;
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200305 if (oldmem_data.start)
Michael Holzheu3f25dc42011-11-14 11:19:05 +0100306 return;
Sebastian Ottfe72ffb2013-04-30 17:18:46 +0200307 strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
Michael Holzheu411ed322007-04-27 16:01:49 +0200308 console_loglevel = 2;
309}
310#else
Sebastian Ottfe72ffb2013-04-30 17:18:46 +0200311static inline void setup_zfcpdump(void) {}
Michael Holzheubf28a592014-04-14 10:38:05 +0200312#endif /* CONFIG_CRASH_DUMP */
Michael Holzheu411ed322007-04-27 16:01:49 +0200313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 /*
315 * Reboot, halt and power_off stubs. They just call _machine_restart,
316 * _machine_halt or _machine_power_off.
317 */
318
319void machine_restart(char *command)
320{
Christian Borntraeger7aa8dac2007-11-20 11:13:31 +0100321 if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
Martin Schwidefsky06fa46a2006-06-29 14:57:32 +0200322 /*
323 * Only unblank the console if we are called in enabled
324 * context or a bust_spinlocks cleared the way for us.
325 */
326 console_unblank();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 _machine_restart(command);
328}
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330void machine_halt(void)
331{
Martin Schwidefsky06fa46a2006-06-29 14:57:32 +0200332 if (!in_interrupt() || oops_in_progress)
333 /*
334 * Only unblank the console if we are called in enabled
335 * context or a bust_spinlocks cleared the way for us.
336 */
337 console_unblank();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 _machine_halt();
339}
340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341void machine_power_off(void)
342{
Martin Schwidefsky06fa46a2006-06-29 14:57:32 +0200343 if (!in_interrupt() || oops_in_progress)
344 /*
345 * Only unblank the console if we are called in enabled
346 * context or a bust_spinlocks cleared the way for us.
347 */
348 console_unblank();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 _machine_power_off();
350}
351
Martin Schwidefsky53df7512006-01-14 13:21:01 -0800352/*
353 * Dummy power off function.
354 */
355void (*pm_power_off)(void) = machine_power_off;
Heiko Carstens08729222013-01-07 13:56:17 +0100356EXPORT_SYMBOL_GPL(pm_power_off);
Martin Schwidefsky53df7512006-01-14 13:21:01 -0800357
Alexander Egorenkov980d5f92020-09-02 16:52:06 +0200358void *restart_stack;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400359
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200360unsigned long stack_alloc(void)
361{
362#ifdef CONFIG_VMAP_STACK
Sven Schnelle436fc4f2021-08-27 08:36:06 +0200363 void *ret;
364
365 ret = __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP,
366 NUMA_NO_NODE, __builtin_return_address(0));
367 kmemleak_not_leak(ret);
368 return (unsigned long)ret;
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200369#else
Vasily Gorbik32ce55a2018-09-18 18:23:40 +0200370 return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200371#endif
372}
373
374void stack_free(unsigned long stack)
375{
376#ifdef CONFIG_VMAP_STACK
377 vfree((void *) stack);
378#else
Vasily Gorbik32ce55a2018-09-18 18:23:40 +0200379 free_pages(stack, THREAD_SIZE_ORDER);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200380#endif
381}
382
383int __init arch_early_irq_init(void)
384{
385 unsigned long stack;
386
Vasily Gorbik32ce55a2018-09-18 18:23:40 +0200387 stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200388 if (!stack)
389 panic("Couldn't allocate async stack");
390 S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
391 return 0;
392}
393
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200394void __init arch_call_rest_init(void)
395{
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200396 unsigned long stack;
397
398 stack = stack_alloc();
399 if (!stack)
400 panic("Couldn't allocate kernel stack");
401 current->stack = (void *) stack;
402#ifdef CONFIG_VMAP_STACK
403 current->stack_vm_area = (void *) stack;
404#endif
405 set_task_stack_end_magic(current);
406 stack += STACK_INIT_OFFSET;
407 S390_lowcore.kernel_stack = stack;
Heiko Carstensb55e6922021-07-05 17:55:32 +0200408 call_on_stack_noreturn(rest_init, stack);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200409}
410
Martin Schwidefsky87276382019-02-14 15:40:56 +0100411static void __init setup_lowcore_dat_off(void)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700412{
Vasily Gorbik21a66712020-09-24 01:01:29 +0200413 unsigned long int_psw_mask = PSW_KERNEL_BITS;
Sven Schnelleb61b1592021-02-03 09:02:51 +0100414 unsigned long mcck_stack;
Heiko Carstensc667aea2015-12-31 10:29:00 +0100415 struct lowcore *lc;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700416
Vasily Gorbik21a66712020-09-24 01:01:29 +0200417 if (IS_ENABLED(CONFIG_KASAN))
418 int_psw_mask |= PSW_MASK_DAT;
419
Heiko Carstensc9e37352005-05-01 08:58:57 -0700420 /*
421 * Setup lowcore for boot cpu
422 */
Heiko Carstensf1c11742017-07-05 07:37:27 +0200423 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
Mike Rapoporteb31d552018-10-30 15:08:04 -0700424 lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700425 if (!lc)
426 panic("%s: Failed to allocate %zu bytes align=%zx\n",
427 __func__, sizeof(*lc), sizeof(*lc));
428
Martin Schwidefskye258d712013-09-24 09:14:56 +0200429 lc->restart_psw.mask = PSW_KERNEL_BITS;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100430 lc->restart_psw.addr = (unsigned long) restart_int_handler;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200431 lc->external_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100432 lc->external_new_psw.addr = (unsigned long) ext_int_handler;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200433 lc->svc_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100434 lc->svc_new_psw.addr = (unsigned long) system_call;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200435 lc->program_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100436 lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
Martin Schwidefskye258d712013-09-24 09:14:56 +0200437 lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100438 lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200439 lc->io_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100440 lc->io_new_psw.addr = (unsigned long) io_int_handler;
Martin Schwidefsky6e2ef5e2016-10-27 12:41:39 +0200441 lc->clock_comparator = clock_comparator_max;
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200442 lc->nodat_stack = ((unsigned long) &init_thread_union)
Martin Schwidefskydc7ee002013-04-24 10:20:43 +0200443 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
Heiko Carstensd5c352c2016-11-08 11:08:26 +0100444 lc->current_task = (unsigned long)&init_task;
Heiko Carstens8f100bb2016-03-10 10:32:21 +0100445 lc->lpp = LPP_MAGIC;
Christian Ehrhardt25097bf2009-04-14 15:36:16 +0200446 lc->machine_flags = S390_lowcore.machine_flags;
Martin Schwidefskyc3601922016-10-25 12:21:44 +0200447 lc->preempt_count = S390_lowcore.preempt_count;
Martin Schwidefsky6c815112017-10-12 13:24:47 +0200448 nmi_alloc_boot_cpu(lc);
Sven Schnelle56e62a72020-11-21 11:14:56 +0100449 lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
Martin Schwidefskyab96e792009-04-14 15:36:29 +0200450 lc->exit_timer = S390_lowcore.exit_timer;
451 lc->user_timer = S390_lowcore.user_timer;
452 lc->system_timer = S390_lowcore.system_timer;
453 lc->steal_timer = S390_lowcore.steal_timer;
454 lc->last_update_timer = S390_lowcore.last_update_timer;
455 lc->last_update_clock = S390_lowcore.last_update_clock;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400456
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200457 /*
458 * Allocate the global restart stack which is the same for
459 * all CPUs in cast *one* of them does a PSW restart.
460 */
Mike Rapoporteb31d552018-10-30 15:08:04 -0700461 restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700462 if (!restart_stack)
463 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
464 __func__, THREAD_SIZE, THREAD_SIZE);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200465 restart_stack += STACK_INIT_OFFSET;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400466
467 /*
468 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
Hendrik Bruecknerb4a96012013-12-13 12:53:42 +0100469 * restart data to the absolute zero lowcore. This is necessary if
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400470 * PSW restart is done on an offline CPU that has lowcore zero.
471 */
472 lc->restart_stack = (unsigned long) restart_stack;
473 lc->restart_fn = (unsigned long) do_restart;
474 lc->restart_data = 0;
Alexander Gordeev915fea02021-08-24 15:30:21 +0200475 lc->restart_source = -1U;
Michael Holzheu73bf4632012-05-24 14:35:16 +0200476
Sven Schnelleb61b1592021-02-03 09:02:51 +0100477 mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
478 if (!mcck_stack)
479 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
480 __func__, THREAD_SIZE, THREAD_SIZE);
481 lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
482
Michael Holzheu73bf4632012-05-24 14:35:16 +0200483 /* Setup absolute zero lowcore */
Heiko Carstensfbe76562012-06-05 09:59:52 +0200484 mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
485 mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
486 mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
487 mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
488 mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400489
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +0200490 lc->spinlock_lockval = arch_spin_lockval(0);
Martin Schwidefskyb96f7d82017-03-24 17:25:02 +0100491 lc->spinlock_index = 0;
492 arch_spin_lock_setup(0);
Martin Schwidefskyf19fbd52018-01-26 12:46:47 +0100493 lc->br_r1_trampoline = 0x07f1; /* br %r1 */
Sven Schnelle0b38b5e2020-01-22 13:38:22 +0100494 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
495 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
Valentin Schneider6a942f52021-07-07 17:33:38 +0100496 lc->preempt_count = PREEMPT_DISABLED;
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +0200497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 set_prefix((u32)(unsigned long) lc);
Frank Munzert099b7652009-03-26 15:23:43 +0100499 lowcore_ptr[0] = lc;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700500}
501
Martin Schwidefsky87276382019-02-14 15:40:56 +0100502static void __init setup_lowcore_dat_on(void)
503{
Alexander Gordeev915fea02021-08-24 15:30:21 +0200504 struct lowcore *lc = lowcore_ptr[0];
505
Martin Schwidefsky86a86802019-02-18 18:10:08 +0100506 __ctl_clear_bit(0, 28);
507 S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
508 S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
509 S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
510 S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
Alexander Gordeev915fea02021-08-24 15:30:21 +0200511 __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
Martin Schwidefsky86a86802019-02-18 18:10:08 +0100512 __ctl_set_bit(0, 28);
Alexander Gordeev915fea02021-08-24 15:30:21 +0200513 mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS);
514 mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw);
515 memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area,
516 sizeof(S390_lowcore.cregs_save_area));
Martin Schwidefsky87276382019-02-14 15:40:56 +0100517}
518
Heiko Carstens71189282011-03-23 10:15:59 +0100519static struct resource code_resource = {
520 .name = "Kernel code",
Toshi Kani35d98e92016-01-26 21:57:22 +0100521 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
Heiko Carstens71189282011-03-23 10:15:59 +0100522};
523
524static struct resource data_resource = {
525 .name = "Kernel data",
Toshi Kani35d98e92016-01-26 21:57:22 +0100526 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
Heiko Carstens71189282011-03-23 10:15:59 +0100527};
528
Heiko Carstens4cc69532011-03-23 10:16:00 +0100529static struct resource bss_resource = {
530 .name = "Kernel bss",
Toshi Kani35d98e92016-01-26 21:57:22 +0100531 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
Heiko Carstens4cc69532011-03-23 10:16:00 +0100532};
533
Heiko Carstens71189282011-03-23 10:15:59 +0100534static struct resource __initdata *standard_resources[] = {
535 &code_resource,
536 &data_resource,
Heiko Carstens4cc69532011-03-23 10:16:00 +0100537 &bss_resource,
Heiko Carstens71189282011-03-23 10:15:59 +0100538};
539
540static void __init setup_resources(void)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700541{
Heiko Carstens71189282011-03-23 10:15:59 +0100542 struct resource *res, *std_res, *sub_res;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700543 phys_addr_t start, end;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100544 int j;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700545 u64 i;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700546
Vasily Gorbik320d9552018-02-20 13:28:33 +0100547 code_resource.start = (unsigned long) _text;
548 code_resource.end = (unsigned long) _etext - 1;
549 data_resource.start = (unsigned long) _etext;
550 data_resource.end = (unsigned long) _edata - 1;
551 bss_resource.start = (unsigned long) __bss_start;
552 bss_resource.end = (unsigned long) __bss_stop - 1;
Heiko Carstenscc13ad62006-06-25 05:49:30 -0700553
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700554 for_each_mem_range(i, &start, &end) {
Mike Rapoporteb31d552018-10-30 15:08:04 -0700555 res = memblock_alloc(sizeof(*res), 8);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700556 if (!res)
557 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
558 __func__, sizeof(*res), 8);
Toshi Kani35d98e92016-01-26 21:57:22 +0100559 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100560
561 res->name = "System RAM";
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700562 res->start = start;
563 /*
564 * In memblock, end points to the first byte after the
565 * range while in resourses, end points to the last byte in
566 * the range.
567 */
568 res->end = end - 1;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700569 request_resource(&iomem_resource, res);
Hongjie Yangfe355b72007-02-05 21:18:24 +0100570
Heiko Carstens71189282011-03-23 10:15:59 +0100571 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
572 std_res = standard_resources[j];
573 if (std_res->start < res->start ||
574 std_res->start > res->end)
575 continue;
576 if (std_res->end > res->end) {
Mike Rapoporteb31d552018-10-30 15:08:04 -0700577 sub_res = memblock_alloc(sizeof(*sub_res), 8);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700578 if (!sub_res)
579 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
580 __func__, sizeof(*sub_res), 8);
Heiko Carstens71189282011-03-23 10:15:59 +0100581 *sub_res = *std_res;
582 sub_res->end = res->end;
583 std_res->start = res->end + 1;
584 request_resource(res, sub_res);
585 } else {
586 request_resource(res, std_res);
587 }
Hongjie Yangfe355b72007-02-05 21:18:24 +0100588 }
Heiko Carstensc9e37352005-05-01 08:58:57 -0700589 }
Heiko Carstens4e042af2016-05-31 09:14:00 +0200590#ifdef CONFIG_CRASH_DUMP
591 /*
592 * Re-add removed crash kernel memory as reserved memory. This makes
593 * sure it will be mapped with the identity mapping and struct pages
594 * will be created, so it can be resized later on.
595 * However add it later since the crash kernel resource should not be
596 * part of the System RAM resource.
597 */
598 if (crashk_res.end) {
David Hildenbrand952eea92021-11-05 13:44:49 -0700599 memblock_add_node(crashk_res.start, resource_size(&crashk_res),
600 0, MEMBLOCK_NONE);
Heiko Carstens4e042af2016-05-31 09:14:00 +0200601 memblock_reserve(crashk_res.start, resource_size(&crashk_res));
602 insert_resource(&iomem_resource, &crashk_res);
603 }
604#endif
Heiko Carstensc9e37352005-05-01 08:58:57 -0700605}
606
Vasily Gorbik0c4f2622020-10-06 22:12:39 +0200607static void __init setup_memory_end(void)
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100608{
Vasily Gorbik6ad5f022021-10-14 13:45:35 +0200609 memblock_remove(ident_map_size, PHYS_ADDR_MAX - ident_map_size);
Vasily Gorbik0c4f2622020-10-06 22:12:39 +0200610 max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
Vasily Gorbik73045a02020-10-19 11:01:33 +0200611 pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100612}
613
Michael Holzheu60a0c682011-10-30 15:16:40 +0100614#ifdef CONFIG_CRASH_DUMP
615
616/*
David Hildenbrand47656002020-04-24 10:39:04 +0200617 * When kdump is enabled, we have to ensure that no memory from the area
618 * [0 - crashkernel memory size] is set offline - it will be exchanged with
619 * the crashkernel memory region when kdump is triggered. The crashkernel
620 * memory region can never get offlined (pages are unmovable).
Michael Holzheu60a0c682011-10-30 15:16:40 +0100621 */
622static int kdump_mem_notifier(struct notifier_block *nb,
623 unsigned long action, void *data)
624{
625 struct memory_notify *arg = data;
626
Michael Holzheubd858e82014-07-10 18:14:20 +0200627 if (action != MEM_GOING_OFFLINE)
628 return NOTIFY_OK;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100629 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
630 return NOTIFY_BAD;
David Hildenbrand47656002020-04-24 10:39:04 +0200631 return NOTIFY_OK;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100632}
633
634static struct notifier_block kdump_mem_nb = {
635 .notifier_call = kdump_mem_notifier,
636};
637
638#endif
639
640/*
Michael Holzheu60a0c682011-10-30 15:16:40 +0100641 * Reserve memory for kdump kernel to be loaded with kexec
642 */
643static void __init reserve_crashkernel(void)
644{
645#ifdef CONFIG_CRASH_DUMP
646 unsigned long long crash_base, crash_size;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100647 phys_addr_t low, high;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100648 int rc;
649
Vasily Gorbik73045a02020-10-19 11:01:33 +0200650 rc = parse_crashkernel(boot_command_line, ident_map_size, &crash_size,
Michael Holzheu60a0c682011-10-30 15:16:40 +0100651 &crash_base);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100652
Michael Holzheudab7a7b2011-10-30 15:16:44 +0100653 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
654 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100655 if (rc || crash_size == 0)
656 return;
657
658 if (memblock.memory.regions[0].size < crash_size) {
659 pr_info("crashkernel reservation failed: %s\n",
660 "first memory chunk must be at least crashkernel size");
661 return;
662 }
663
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200664 low = crash_base ?: oldmem_data.start;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100665 high = low + crash_size;
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200666 if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) {
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100667 /* The crashkernel fits into OLDMEM, reuse OLDMEM */
668 crash_base = low;
669 } else {
670 /* Find suitable area in free memory */
David Hildenbrand37c5f6c2015-05-06 13:18:59 +0200671 low = max_t(unsigned long, crash_size, sclp.hsa_size);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100672 high = crash_base ? crash_base + crash_size : ULONG_MAX;
673
674 if (crash_base && crash_base < low) {
675 pr_info("crashkernel reservation failed: %s\n",
676 "crash_base too low");
677 return;
678 }
679 low = crash_base ?: low;
Mike Rapoporta7259df2021-09-02 15:00:26 -0700680 crash_base = memblock_phys_alloc_range(crash_size,
681 KEXEC_CRASH_MEM_ALIGN,
682 low, high);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100683 }
684
685 if (!crash_base) {
686 pr_info("crashkernel reservation failed: %s\n",
687 "no suitable area found");
688 return;
689 }
690
Mike Rapoporta7259df2021-09-02 15:00:26 -0700691 if (register_memory_notifier(&kdump_mem_nb)) {
Mike Rapoport3ecc6832021-11-05 13:43:19 -0700692 memblock_phys_free(crash_base, crash_size);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100693 return;
Mike Rapoporta7259df2021-09-02 15:00:26 -0700694 }
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100695
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200696 if (!oldmem_data.start && MACHINE_IS_VM)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100697 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
698 crashk_res.start = crash_base;
699 crashk_res.end = crash_base + crash_size - 1;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100700 memblock_remove(crash_base, crash_size);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100701 pr_info("Reserving %lluMB of memory at %lluMB "
702 "for crashkernel (System RAM: %luMB)\n",
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100703 crash_size >> 20, crash_base >> 20,
704 (unsigned long)memblock.memory.total_size >> 20);
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400705 os_info_crashkernel_add(crash_base, crash_size);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100706#endif
707}
708
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100709/*
710 * Reserve the initrd from being used by memblock
711 */
712static void __init reserve_initrd(void)
713{
714#ifdef CONFIG_BLK_DEV_INITRD
Alexander Egorenkov84733282021-06-15 14:15:07 +0200715 if (!initrd_data.start || !initrd_data.size)
Heiko Carstens7be5e352016-12-27 14:47:42 +0100716 return;
Alexander Gordeevdd9089b2021-10-07 15:01:39 +0200717 initrd_start = (unsigned long)__va(initrd_data.start);
Alexander Egorenkov84733282021-06-15 14:15:07 +0200718 initrd_end = initrd_start + initrd_data.size;
719 memblock_reserve(initrd_data.start, initrd_data.size);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100720#endif
721}
722
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100723/*
724 * Reserve the memory area used to pass the certificate lists
725 */
726static void __init reserve_certificate_list(void)
727{
728 if (ipl_cert_list_addr)
729 memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
730}
731
Vasily Gorbik6966d602018-04-11 11:56:55 +0200732static void __init reserve_mem_detect_info(void)
733{
734 unsigned long start, size;
735
736 get_mem_detect_reserved(&start, &size);
737 if (size)
738 memblock_reserve(start, size);
739}
740
741static void __init free_mem_detect_info(void)
742{
743 unsigned long start, size;
744
745 get_mem_detect_reserved(&start, &size);
746 if (size)
Mike Rapoport3ecc6832021-11-05 13:43:19 -0700747 memblock_phys_free(start, size);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200748}
749
Vasily Gorbikf01b8bc2018-09-24 15:27:30 +0200750static const char * __init get_mem_info_source(void)
751{
752 switch (mem_detect.info_source) {
753 case MEM_DETECT_SCLP_STOR_INFO:
754 return "sclp storage info";
755 case MEM_DETECT_DIAG260:
756 return "diag260";
757 case MEM_DETECT_SCLP_READ_INFO:
758 return "sclp read info";
759 case MEM_DETECT_BIN_SEARCH:
760 return "binary search";
761 }
762 return "none";
763}
764
Vasily Gorbik6966d602018-04-11 11:56:55 +0200765static void __init memblock_add_mem_detect_info(void)
766{
767 unsigned long start, end;
768 int i;
769
Mike Rapoport87c55872020-10-13 16:57:54 -0700770 pr_debug("physmem info source: %s (%hhd)\n",
771 get_mem_info_source(), mem_detect.info_source);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200772 /* keep memblock lists close to the kernel */
773 memblock_set_bottom_up(true);
Anshuman Khandual02634a42020-01-30 22:14:20 -0800774 for_each_mem_detect_block(i, &start, &end) {
775 memblock_add(start, end - start);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200776 memblock_physmem_add(start, end - start);
Anshuman Khandual02634a42020-01-30 22:14:20 -0800777 }
Vasily Gorbik6966d602018-04-11 11:56:55 +0200778 memblock_set_bottom_up(false);
Heiko Carstens701dc812020-02-19 13:29:15 +0100779 memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200780}
781
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100782/*
783 * Check for initrd being in usable memory
784 */
785static void __init check_initrd(void)
786{
787#ifdef CONFIG_BLK_DEV_INITRD
Alexander Egorenkov84733282021-06-15 14:15:07 +0200788 if (initrd_data.start && initrd_data.size &&
789 !memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
Martin Schwidefsky6d7b2ee2016-12-13 16:19:11 +0100790 pr_err("The initial RAM disk does not fit into the memory\n");
Mike Rapoport3ecc6832021-11-05 13:43:19 -0700791 memblock_phys_free(initrd_data.start, initrd_data.size);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100792 initrd_start = initrd_end = 0;
793 }
794#endif
795}
796
797/*
Sebastian Ott0c36b8a2015-06-16 14:03:37 +0200798 * Reserve memory used for lowcore/command line/kernel image.
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100799 */
800static void __init reserve_kernel(void)
801{
Alexander Egorenkovf1a54692021-06-16 14:10:03 +0200802 memblock_reserve(0, STARTUP_NORMAL_OFFSET);
Alexander Gordeeve3ec8e02021-09-27 14:18:26 +0200803 memblock_reserve(__amode31_base, __eamode31 - __samode31);
Alexander Gordeev04f11ed2021-01-21 13:06:02 +0100804 memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
805 memblock_reserve(__pa(_stext), _end - _stext);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100806}
807
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400808static void __init setup_memory(void)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700809{
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700810 phys_addr_t start, end;
811 u64 i;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700812
813 /*
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100814 * Init storage key for present memory
Heiko Carstensc9e37352005-05-01 08:58:57 -0700815 */
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700816 for_each_mem_range(i, &start, &end)
817 storage_key_init_range(start, end);
818
Peter Oberparleiter0b642ed2005-05-01 08:58:58 -0700819 psw_set_key(PAGE_DEFAULT_KEY);
Heiko Carstensc9e37352005-05-01 08:58:57 -0700820}
821
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200822static void __init relocate_amode31_section(void)
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200823{
Alexander Gordeeve3ec8e02021-09-27 14:18:26 +0200824 unsigned long amode31_size = __eamode31 - __samode31;
825 long amode31_offset = __amode31_base - __samode31;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200826 long *ptr;
827
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200828 pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200829
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200830 /* Move original AMODE31 section to the new one */
Alexander Gordeeve3ec8e02021-09-27 14:18:26 +0200831 memmove((void *)__amode31_base, (void *)__samode31, amode31_size);
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200832 /* Zero out the old AMODE31 section to catch invalid accesses within it */
833 memset((void *)__samode31, 0, amode31_size);
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200834
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200835 /* Update all AMODE31 region references */
836 for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
837 *ptr += amode31_offset;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200838}
839
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200840/* This must be called after AMODE31 relocation */
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200841static void __init setup_cr(void)
842{
843 union ctlreg2 cr2;
844 union ctlreg5 cr5;
845 union ctlreg15 cr15;
846
847 __ctl_duct[1] = (unsigned long)__ctl_aste;
848 __ctl_duct[2] = (unsigned long)__ctl_aste;
849 __ctl_duct[4] = (unsigned long)__ctl_duald;
850
851 /* Update control registers CR2, CR5 and CR15 */
852 __ctl_store(cr2.val, 2, 2);
853 __ctl_store(cr5.val, 5, 5);
854 __ctl_store(cr15.val, 15, 15);
855 cr2.ducto = (unsigned long)__ctl_duct >> 6;
856 cr5.pasteo = (unsigned long)__ctl_duct >> 6;
857 cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
858 __ctl_load(cr2.val, 2, 2);
859 __ctl_load(cr5.val, 5, 5);
860 __ctl_load(cr15.val, 15, 15);
861}
862
Martin Schwidefskycf8ba7a2007-05-04 18:48:28 +0200863/*
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +0200864 * Add system information as device randomness
865 */
866static void __init setup_randomness(void)
867{
868 struct sysinfo_3_2_2 *vmms;
869
Alexander Gordeeve0353892021-10-07 12:14:09 +0200870 vmms = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
Mike Rapoportecc3e772019-03-11 23:29:26 -0700871 if (!vmms)
872 panic("Failed to allocate memory for sysinfo structure\n");
Heiko Carstensda8fd822017-02-04 11:40:36 +0100873 if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
Heiko Carstens4920e3c2017-02-05 23:03:18 +0100874 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
Linus Torvalds0b707e52021-11-06 14:48:06 -0700875 memblock_free(vmms, PAGE_SIZE);
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +0200876}
877
878/*
Martin Schwidefsky3f6813b2016-04-01 15:42:15 +0200879 * Find the correct size for the task_struct. This depends on
880 * the size of the struct fpu at the end of the thread_struct
881 * which is embedded in the task_struct.
882 */
883static void __init setup_task_size(void)
884{
885 int task_size = sizeof(struct task_struct);
886
887 if (!MACHINE_HAS_VX) {
888 task_size -= sizeof(__vector128) * __NUM_VXRS;
889 task_size += sizeof(freg_t) * __NUM_FPRS;
890 }
891 arch_task_struct_size = task_size;
892}
893
894/*
Collin Walling4ad78b82018-12-06 17:30:04 -0500895 * Issue diagnose 318 to set the control program name and
896 * version codes.
897 */
898static void __init setup_control_program_code(void)
899{
900 union diag318_info diag318_info = {
901 .cpnc = CPNC_LINUX,
Collin Wallinga23816f2020-06-22 11:46:35 -0400902 .cpvc = 0,
Collin Walling4ad78b82018-12-06 17:30:04 -0500903 };
904
905 if (!sclp.has_diag318)
906 return;
907
908 diag_stat_inc(DIAG_STAT_X318);
909 asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
910}
911
912/*
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100913 * Print the component list from the IPL report
914 */
915static void __init log_component_list(void)
916{
917 struct ipl_rb_component_entry *ptr, *end;
918 char *str;
919
920 if (!early_ipl_comp_list_addr)
921 return;
Philipp Rudo40260b02019-12-18 11:24:43 +0100922 if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100923 pr_info("Linux is running with Secure-IPL enabled\n");
924 else
925 pr_info("Linux is running with Secure-IPL disabled\n");
926 ptr = (void *) early_ipl_comp_list_addr;
927 end = (void *) ptr + early_ipl_comp_list_size;
928 pr_info("The IPL report contains the following components:\n");
929 while (ptr < end) {
930 if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
931 if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
932 str = "signed, verified";
933 else
934 str = "signed, verification failed";
935 } else {
936 str = "not signed";
937 }
938 pr_info("%016llx - %016llx (%s)\n",
939 ptr->addr, ptr->addr + ptr->len, str);
940 ptr++;
941 }
942}
943
944/*
Heiko Carstensc9e37352005-05-01 08:58:57 -0700945 * Setup function called from init/main.c just after the banner
946 * was printed.
947 */
948
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400949void __init setup_arch(char **cmdline_p)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700950{
951 /*
952 * print what head.S has found out about the machine
953 */
Carsten Ottefa587742008-03-25 18:47:44 +0100954 if (MACHINE_IS_VM)
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +0100955 pr_info("Linux is running as a z/VM "
956 "guest operating system in 64-bit mode\n");
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200957 else if (MACHINE_IS_KVM)
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +0100958 pr_info("Linux is running under KVM in 64-bit mode\n");
Martin Schwidefsky27d71602010-02-26 22:37:38 +0100959 else if (MACHINE_IS_LPAR)
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +0100960 pr_info("Linux is running natively in 64-bit mode\n");
Christian Borntraeger03aa0472018-11-09 09:21:47 +0100961 else
962 pr_info("Linux is running as a guest in 64-bit mode\n");
Heiko Carstensc9e37352005-05-01 08:58:57 -0700963
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100964 log_component_list();
965
Hendrik Bruecknera0443fb2008-07-14 09:59:09 +0200966 /* Have one command line that is parsed and saved in /proc/cmdline */
967 /* boot_command_line has been already set up in early.c */
968 *cmdline_p = boot_command_line;
Heiko Carstens59685292006-03-24 03:15:15 -0800969
Heiko Carstensc9e37352005-05-01 08:58:57 -0700970 ROOT_DEV = Root_RAM0;
Heiko Carstens59685292006-03-24 03:15:15 -0800971
Kefeng Wang638cd5a32021-07-07 18:08:57 -0700972 setup_initial_init_mm(_text, _etext, _edata, _end);
Heiko Carstens59685292006-03-24 03:15:15 -0800973
Martin Schwidefsky6a3d1e82018-04-11 08:35:23 +0200974 if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
975 nospec_auto_detect();
976
Vasily Gorbik95e61b12020-06-18 17:17:19 +0200977 jump_label_init();
Heiko Carstens59685292006-03-24 03:15:15 -0800978 parse_early_param();
Martin Schwidefsky8a07dd02015-10-14 15:53:06 +0200979#ifdef CONFIG_CRASH_DUMP
980 /* Deactivate elfcorehdr= kernel parameter */
981 elfcorehdr_addr = ELFCORE_ADDR_MAX;
982#endif
983
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400984 os_info_init();
Michael Holzheu99ca4e52008-01-26 14:11:11 +0100985 setup_ipl();
Martin Schwidefsky3f6813b2016-04-01 15:42:15 +0200986 setup_task_size();
Collin Walling4ad78b82018-12-06 17:30:04 -0500987 setup_control_program_code();
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100988
989 /* Do some memory reservations *before* memory is added to memblock */
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100990 reserve_kernel();
991 reserve_initrd();
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100992 reserve_certificate_list();
Vasily Gorbik6966d602018-04-11 11:56:55 +0200993 reserve_mem_detect_info();
Vasily Gorbik420f48f2021-10-14 13:33:45 +0200994 memblock_set_current_limit(ident_map_size);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100995 memblock_allow_resize();
996
997 /* Get information about *all* installed memory */
Vasily Gorbik6966d602018-04-11 11:56:55 +0200998 memblock_add_mem_detect_info();
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100999
Vasily Gorbik6966d602018-04-11 11:56:55 +02001000 free_mem_detect_info();
Vasily Gorbik6ad5f022021-10-14 13:45:35 +02001001 setup_memory_end();
1002 memblock_dump_all();
1003 setup_memory();
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001004
Heiko Carstensc78d0c72021-08-04 13:40:31 +02001005 relocate_amode31_section();
Alexander Egorenkov6bda6672021-06-15 19:17:36 +02001006 setup_cr();
Vasily Gorbik1d6671a2020-09-11 11:38:21 +02001007 setup_uv();
Vasily Gorbik73045a02020-10-19 11:01:33 +02001008 dma_contiguous_reserve(ident_map_size);
Heiko Carstens3f429842017-08-07 15:16:15 +02001009 vmcp_cma_reserve();
Gerald Schaefer343dbdb2020-12-08 19:47:15 +01001010 if (MACHINE_HAS_EDAT2)
1011 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001012
1013 check_initrd();
1014 reserve_crashkernel();
Martin Schwidefsky1a36a392015-10-29 10:28:26 +01001015#ifdef CONFIG_CRASH_DUMP
Michael Holzheu1592a8e2015-05-26 19:05:23 +02001016 /*
1017 * Be aware that smp_save_dump_cpus() triggers a system reset.
1018 * Therefore CPU and device initialization should be done afterwards.
1019 */
1020 smp_save_dump_cpus();
Martin Schwidefsky1a36a392015-10-29 10:28:26 +01001021#endif
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001022
Heiko Carstensc9e37352005-05-01 08:58:57 -07001023 setup_resources();
Martin Schwidefsky87276382019-02-14 15:40:56 +01001024 setup_lowcore_dat_off();
Heiko Carstensd80512f2013-12-16 14:31:26 +01001025 smp_fill_possible_mask();
Heiko Carstens097a1162016-04-14 12:35:22 +02001026 cpu_detect_mhz_feature();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 cpu_init();
Philipp Hachtmann3a368f72014-03-06 18:25:13 +01001028 numa_setup();
Heiko Carstensaf51160e2016-12-03 09:48:01 +01001029 smp_detect_cpus();
Heiko Carstens8c9105802016-12-03 09:50:21 +01001030 topology_init_early();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031
Sven Schnelle3b051e82021-04-07 09:20:17 +02001032 if (test_facility(193))
1033 static_branch_enable(&cpu_has_bear);
1034
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 /*
1036 * Create kernel page tables and switch to virtual addressing.
1037 */
1038 paging_init();
1039
Martin Schwidefsky87276382019-02-14 15:40:56 +01001040 /*
1041 * After paging_init created the kernel page table, the new PSWs
1042 * in lowcore can now run with DAT enabled.
1043 */
1044 setup_lowcore_dat_on();
1045
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 /* Setup default console */
1047 conmode_default();
Hendrik Brueckner637952c2009-08-23 18:09:06 +02001048 set_preferred_console();
Michael Holzheu411ed322007-04-27 16:01:49 +02001049
Vasily Gorbik686140a2017-10-12 13:01:47 +02001050 apply_alternative_instructions();
Martin Schwidefskyf19fbd52018-01-26 12:46:47 +01001051 if (IS_ENABLED(CONFIG_EXPOLINE))
1052 nospec_init_branches();
Vasily Gorbik686140a2017-10-12 13:01:47 +02001053
Alexander Egorenkovbd37b362020-09-29 20:24:55 +02001054 /* Setup zfcp/nvme dump support */
Sebastian Ottfe72ffb2013-04-30 17:18:46 +02001055 setup_zfcpdump();
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +02001056
1057 /* Add system specific data to the random pool */
1058 setup_randomness();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059}